content
stringlengths 5
1.05M
|
---|
import tushare as ts
import csv
import time
import pandas as pd
pro = ts.pro_api('1dbda79ce58d052196b7ddec1663d53e4ea20571195a1a6055aab0c7')
stock_basic = pro.stock_basic(list_status='L', fields='ts_code, symbol, name, industry')
# 重命名行,便于后面导入neo4j
basic_rename = {'ts_code': 'TS代码', 'symbol': '股票代码', 'name': '股票名称', 'industry': '行业'}
stock_basic.rename(columns=basic_rename, inplace=True)
# 保存为stock.csv
stock_basic.to_csv('stock.csv', encoding='gbk')
# 获取top10_holders
holders = pd.DataFrame(columns=('ts_code', 'ann_date', 'end_date', 'holder_name', 'hold_amount', 'hold_ratio'))
# 获取一年内所有上市股票股东信息(可以获取一个报告期的)
for i in range(3610):
code = stock_basic['TS代码'].values[i]
top10_holders = pro.top10_holders(ts_code=code, start_date='20180101', end_date='20181231')
holders = holders.append(top10_holders)
time.sleep(0.3)# 数据接口限制
# 保存为holders.csv
holders.to_csv('holders.csv', encoding='gbk')
# 获取concept,并查看概念分类数量
concept = pro.concept()
concept.to_csv('concept_num.csv', encoding='gbk')
# 获取concept_detail
concept_details = pd.DataFrame(columns=('id', 'concept_name', 'ts_code', 'name'))
for i in range(358):
id = 'TS' + str(i)
concept_detail = pro.concept_detail(id=id)
concept_details = concept_details.append(concept_detail)
time.sleep(0.3)
# 保存为concept_detail.csv
concept_details.to_csv('concept.csv', encoding='gbk') |
class DinnerPlates:
def __init__(self, capacity: int):
self.capacity = capacity
self.stack = []
def push(self, val: int) -> None:
if len(self.stack[-1]) < self.capacity:
self.stack[-1].append(val)
else:
self.stack.append([val])
def pop(self) -> int:
self.popAtStack(-1)
def popAtStack(self, index: int) -> int:
res = self.stack[index].pop()
if len(self.stack[index]) == 0:
self.stack.pop(index)
return res
'''
[[373, 86], [395, 306], [370], [41, 17], [387], [66], [27], [252, 6], [269, 231], [35, 346]]
[[373, 86], [395, 306], [41, 17], [387], [66], [27], [252, 6], [269, 231], [35, 346]]
[[373, 86], [395, 306], [41], [387], [66], [27], [252, 6], [269, 231], [35, 346]]
[[373, 86], [395, 306], [41], [387], [66], [27], [252, 6], [269], [35, 346]]
[[373, 86], [395, 306], [41], [387], [66], [27], [252, 6], [269], [35, 346]]
[[373, 86], [395, 306], [41], [387], [66], [27], [252, 6], [269], [35]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338], [331, 134], [1, 250], [19]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338], [331, 134], [1, 250]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338], [331, 134], [1]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338], [331, 134]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338], [331]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365, 338]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33], [365]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403, 33]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11], [403]]
[[373, 86], [395, 474], [41, 216], [387, 256], [66, 196], [27, 332], [252, 6], [269, 43], [35, 75], [22, 273], [101, 11]]
''' |
from __future__ import annotations
from typing import Literal
from prettyqt import core
from prettyqt.qt import QtCore
from prettyqt.utils import InvalidParamError, bidict
NAME_TYPE = bidict(
default=QtCore.QTimeZone.NameType.DefaultName,
long=QtCore.QTimeZone.NameType.LongName,
short=QtCore.QTimeZone.NameType.ShortName,
offset=QtCore.QTimeZone.NameType.OffsetName,
)
NameTypeStr = Literal["default", "long", "short", "offset"]
TIME_TYPE = bidict(
standard=QtCore.QTimeZone.TimeType.StandardTime,
daylight=QtCore.QTimeZone.TimeType.DaylightTime,
generic=QtCore.QTimeZone.TimeType.GenericTime,
)
TimeTypeStr = Literal["standard", "daylight", "generic"]
class TimeZone(QtCore.QTimeZone):
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0], str):
super().__init__(QtCore.QByteArray(args[0].encode()))
else:
super().__init__(*args)
def __repr__(self):
return f"{type(self).__name__}({self.get_id()!r})"
def __str__(self):
return self.get_id()
def __reduce__(self):
return type(self), (self.get_id(),)
def get_id(self) -> str:
return bytes(self.id()).decode()
def get_display_name(
self,
datetime: QtCore.QDateTime | TimeTypeStr,
name_type: NameTypeStr = "default",
locale: core.Locale | None = None,
) -> str:
if isinstance(datetime, str):
if datetime not in TIME_TYPE:
raise InvalidParamError(datetime, TIME_TYPE)
datetime = TIME_TYPE[datetime]
if name_type not in NAME_TYPE:
raise InvalidParamError(name_type, NAME_TYPE)
if locale is None:
locale = core.Locale()
return self.displayName(datetime, NAME_TYPE[name_type], locale)
# def get_value(self) -> datetime.datetime:
# try:
# return self.toPython()
# except TypeError:
# return self.toPyTimeZone()
if __name__ == "__main__":
date = core.TimeZone(2000, 11, 11)
dt = TimeZone(date)
|
import json
from channels.generic.websocket import WebsocketConsumer
class ChatConsumer(WebsocketConsumer):
def connect(self):
self.accept()
def disconnect(self, close_code):
pass
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
self.send(text_data=json.dumps({
'message': message
})) |
from copy import deepcopy
class SimpleAgent:
def getAction(self, gameState):
gameState._getValidWords()
return gameState.validWords, len(gameState.validWords)
class HeuristicAgent:
def getAction(self, gameState):
gameState._getValidWords()
if len(gameState.validWords)==1:
return gameState.validWords, 1
maxWords = []
maxOptions = 0
for word in gameState.validWords:
tempGameState = deepcopy(gameState)
nextGameState = tempGameState.step(word)
if nextGameState.finished:
continue
optionsCount = len(nextGameState.validWords)
# print(f"Potential Max Word: {word} ({optionsCount})")
if optionsCount > maxOptions:
maxOptions = optionsCount
maxWords = [word]
elif optionsCount == maxOptions:
maxWords.append(word)
return maxWords, maxOptions
|
from .property import Property
from .namedelement import NamedElement
from .renderer import Renderer
class Method(NamedElement, Renderer):
def __init__(self, **kwargs):
self.parameters = \
[Property(**p) for p in kwargs.pop('parameters', [])]
self.returns = \
[Property(**r) for r in kwargs.pop('returns', [])]
self.flags = kwargs.pop('flags', [])
self.cpp_flags = self.or_cpp_flags(self.flags)
self.errors = kwargs.pop('errors', [])
super(Method, self).__init__(**kwargs)
def markdown(self, loader):
return self.render(loader, "method.md.mako", method=self)
def cpp_prototype(self, loader, interface, ptype):
return self.render(loader, "method.prototype.hpp.mako", method=self,
interface=interface, ptype=ptype, post=str.rstrip)
def or_cpp_flags(self, flags):
"""Return the corresponding ORed cpp flags."""
flags_dict = {"deprecated": "vtable::common_::deprecated",
"hidden": "vtable::common_::hidden",
"unprivileged": "vtable::common_::unprivileged",
"no_reply": "vtable::method_::no_reply"}
cpp_flags = []
for flag in flags:
try:
cpp_flags.append(flags_dict[flag])
except KeyError:
raise ValueError("Invalid flag \"{}\"".format(flag))
return " | ".join(cpp_flags)
|
#importing our libraries that we will be using for emotion detection
from operator import index
from textwrap import fill
from tkinter import Label, Tk
import numpy as np
import cv2
import keras
from tkinter import *
import pandas as pd
import webbrowser
win = Tk() #main application window
win.geometry('600x500')
win.title("Emotional Recommender")
label = Label(win,text="Welcome To Emotional Recommender",font=50,relief = RAISED,bg = 'red').pack(fill=X,padx=15,pady=30)
user_label = Label(win,text="Here's How this application works: \n 1) Click on Capture button to Open up your camera. \n 2) The Model will detect your emotions, \n you can exit the camera window by clicking 'q' on your keyboard. \n 3) The Result will be displayed in the window",font=50,relief = RAISED,bg = 'red').pack(fill=X,padx=15,pady=30)
win.iconbitmap(r'Face-Emotions-Recognition\Emotion-recognition-with-GUI\\images\\Icons8-Ios7-Logos-Google-Drive-Copyrighted.ico') #giving the window an icon
new = 1
cap = cv2.VideoCapture(0) #used for capturing the video using the webcam
model_path = 'Face-Emotions-Recognition\Emotion-recognition-with-GUI\model_optimal.h5' #path of our model
model = keras.models.load_model(model_path) #loading our model that we will use to make predictions of emotions
emotion_dict = {0:'Angry',1:'Disgust',2:'Fear',3:'Happy',4:'Neutral',5:'Sad',6:'Surprise'} #dictionary containing different values
def videocapture():
while True: #continuous loop to keep the window running
isTrue,frame = cap.read() #reading our frames from the capture
facecascade = cv2.CascadeClassifier('Face-Emotions-Recognition\Emotion-recognition-with-GUI\haarcascade_frontalface_default.xml') #using the cascade classifier for face detection
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #converting whatever we are reading into gray
faces = facecascade.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5) #this helps in finding features and location in our images - we are passing in our grayscale input, we are scaling down the image which is done with scaleFactor
# min neighbors helps in determining the quality of detection
for (x, y, w, h) in faces: #drawing rectangles on the faces detected and also adding text
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2) #used to draw our rectangle, we are specifying the start and end point and also color and width of our rectangle
roi_gray = gray[y:y + h, x:x + w] #ROI - Region of interest, in this we are trying to select the rows starting from y to y+h and then columns from x to x+h - this works like NumPy slicing
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0) #resizing the inputs in order to get them in the same shape as the images on which our images were trained
prediction = model.predict(cropped_img) #making predictions on the face detected
maxindex = int(np.argmax(prediction)) #getting the maximum index out of all the predicted indices
cv2.putText(frame, emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA) #printing the emotion label corresponding to the output index from our emotio dictionary
cv2.imshow('Video', cv2.resize(frame,(700,500),interpolation = cv2.INTER_CUBIC)) #creating our video window in which we will be detecting emotions
if cv2.waitKey(1) & 0xFF == ord('q'): #we will have to press q if we wish to exit our window
break
var = emotion_dict[maxindex]
label1 = Label(win,text="Emotion Detected " + " ==> "+ var,font=50,relief=RAISED,bg = 'red').pack(fill=X,padx=15,pady=30)
cap.release() #this will release the hardware and software resources that are being used
cv2.destroyAllWindows() #destroys the window that we created for emotion detection
Button(win,text="Capture",command = videocapture,relief=RAISED,width=15,font=10,bg = 'black',fg = 'green').pack(pady=20)
Button(win,text="Exit Application",command = win.destroy,relief=RAISED,width=15,font=10).pack(pady=5)
win.mainloop() |
print("Hello World")
name = input("Hello, what is your name? ")
age = input("How old are you? ")
lunch = input("What do you want for lunch? ")
print("\n\n\n")
print(f"Hello {name}, your lunch order of {lunch} is here.")
print(f"You are tall for an {age} year old") |
import tensorflow as tf
from match.layers.activation import Dice, activation_layer
from match.layers.core import PredictionLayer, Similarity, DNN
from match.layers.sequence import SequencePoolingLayer, WeightedSequenceLayer
from match.layers.utils import (NoMask, Hash, concat_func, reduce_mean, reduce_sum, reduce_max,
div, softmax, combined_dnn_input)
custom_objects = {'tf': tf,
'Dice':Dice,
'activation_layer':activation_layer,
'PredictionLayer': PredictionLayer,
'Similarity': Similarity,
'DNN':DNN,
'SequencePoolingLayer': SequencePoolingLayer,
'WeightedSequenceLayer': WeightedSequenceLayer,
'NoMask': NoMask,
'Hash': Hash,
'concat_func': concat_func,
'reduce_mean': reduce_mean,
'reduce_sum': reduce_sum,
'reduce_max': reduce_max,
'div': div,
'softmax': softmax,
'combined_dnn_input':combined_dnn_input
} |
from django.db import models
from datetime import date
import datetime
from django.utils import timezone
from django.contrib.auth.models import User
ITEM_CATEGORIA = (
('C','Conferencia'),
('S','Seminario'),
('Co','Congreso'),
('Cu','Curso'),
)
ITEM_TIPO=(('V','Virtual'),('P','Presencial'))
class Evento(models.Model):
e_nombre = models.CharField(max_length=200)
e_categoria=models.CharField(choices=ITEM_CATEGORIA,max_length=1)
e_lugar= models.CharField(max_length=200)
e_direccion=models.CharField(max_length=200)
e_fechaIni =models.DateField(default=date.today)
e_fechaFin=models.DateTimeField(auto_now=False)
e_tipo=models.CharField(choices=ITEM_TIPO,max_length=1)
def __str__(self):
return self.e_nombre
|
import os
import asyncio
import asyncio.subprocess as aiosp
import locale
import yaml
import json
import io
from threading import Thread
import websockets
HOSTNAME = 'localhost'
WSS_PORT = 8765
HTTP_PORT = 8888
ASM_DIR = 'champs'
STDIN_F_NAME = 'stdin.txt'
COREWAR_EXE = "cmake-build-debug/corewar_vm"
# COREWAR_EXE = "corewar"
def append_file(data):
with open(STDIN_F_NAME, 'at') as f:
f.write(data)
def yaml_to_json(msg):
try:
return json.dumps(yaml.safe_load(io.StringIO(msg)))
except Exception:
print(f"can't encode:`\n{msg}`")
raise
class VM:
def __init__(self, cors):
self.p = None
self.cors = cors
async def stop(self):
if self.p.returncode is None:
self.p.terminate()
print(f"proc killed")
ret = await self.p.wait()
print(f"proc finished; ret = {ret}")
async def __aiter__(self):
print("starting", ", ".join(self.cors))
self.p = await aiosp.create_subprocess_exec(
COREWAR_EXE, "-i",
*(f"{ASM_DIR}/{cor}" for cor in self.cors),
stdin=aiosp.PIPE,
stdout=aiosp.PIPE)
print(f"pid: {self.p.pid}")
encoding = locale.getpreferredencoding(False)
msg = ""
while True:
line = await self.p.stdout.readline()
if not line and self.p.returncode is not None:
break
if isinstance(line, bytes):
line = line.decode(encoding=encoding)
if not line.strip():
if not msg:
await asyncio.sleep(.1)
continue
yield yaml_to_json(msg)
msg = ""
else:
msg += line
yield json.dumps({"type": "end"})
async def on_message(vm, msg):
print(f"got message: {msg}")
if msg['type'] == "step":
l = f"{msg.get('steps', 1)}\n"
elif msg['type'] == "run_until_end":
l = "999999\n"
else:
print(f"ERROR: unknwon command {msg['type']}")
return
append_file(l)
vm.p.stdin.write(l.encode())
await vm.p.stdin.drain()
async def consumer_handler(ws: websockets.WebSocketServerProtocol, vm):
print("*start consuming*")
try:
async for message in ws:
try:
msg = json.loads(message)
except:
print("message:", message)
else:
await on_message(vm, msg)
except websockets.ConnectionClosed as e:
print(f"conn closed, code: {e.code}, reason: {e.reason}")
else:
print("*CONSUMING SOUNDS STOP*")
finally:
await vm.stop()
async def producer_handler(ws: websockets.WebSocketServerProtocol, vm):
i = 0
async for message in vm:
print(f"send `{message}`")
await ws.send(message)
i += 1
async def handler(ws: websockets.WebSocketServerProtocol, path):
cors = path.strip('/').split(';')
vm = VM(cors)
consumer_task = asyncio.ensure_future(consumer_handler(ws, vm))
producer_task = asyncio.ensure_future(producer_handler(ws, vm))
done, pending = await asyncio.wait(
[consumer_task, producer_task],
return_when=asyncio.FIRST_COMPLETED,
)
for task in pending:
task.cancel()
for task in done:
try:
await task
except websockets.ConnectionClosedOK:
print("client disconnected")
def wss_server():
ws_address = f"wss://{HOSTNAME}:{WSS_PORT}/WebSocket"
loop = asyncio.new_event_loop()
asyncio.get_child_watcher()._loop = loop
start_server = websockets.serve(handler, "localhost", WSS_PORT, loop=loop)
loop.run_until_complete(start_server)
loop.run_forever()
if __name__ == '__main__':
Thread(name='wss', target=wss_server, daemon=True).start()
from flask import Flask, Response
from werkzeug.exceptions import NotFound
app = Flask("corewar")
def serve_file(*path):
print('get', path)
f_name = os.path.join('vis', *path)
if not os.path.exists(f_name):
raise NotFound()
with open(f_name) as f:
return f.read()
@app.route('/champions')
def hello_world():
response = Response(json.dumps([f for f in os.listdir(ASM_DIR)
if
os.path.isfile(os.path.join(ASM_DIR, f))
and f.endswith('.cor')]))
header = response.headers
header['Access-Control-Allow-Origin'] = '*'
return response
@app.route('/<string:path>')
def static_file(path):
return serve_file(path)
@app.route('/js/<string:path>')
def static_js(path):
return serve_file('js', path)
@app.route('/')
def index():
return serve_file('index.html')
app.run(port=HTTP_PORT)
|
import codecs
import os
import re
import setuptools
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_meta(*meta_file_parts, meta_key):
"""
Extract __*meta*__ from meta_file
"""
meta_file = read(*meta_file_parts)
meta_match = re.search(r"^__{}__ = ['\"]([^'\"]*)['\"]".format(meta_key),
meta_file, re.M)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{}__ string.".format(meta_key))
def read_requirements(*parts):
"""
Given a requirements.txt (or similar style file), returns a list of
requirements.
Assumes anything after a single '#' on a line is a comment, and ignores
empty lines.
"""
requirements = []
for line in read(*parts).splitlines():
new_line = re.sub('(\s*)?#.*$', # the space immediately before the
# hash mark, the hash mark, and
# anything that follows it
'', # replace with a blank string
line)
if new_line: # i.e. we have a non-zero-length string
requirements.append(new_line)
return requirements
##############################################################################
# PACKAGE METADATA #
##############################################################################
META_PATH = ['minchin', 'scripts', 'photosorter', '__init__.py']
NAME = find_meta(*META_PATH, meta_key='title').lower()
VERSION = find_meta(*META_PATH, meta_key='version')
SHORT_DESC = find_meta(*META_PATH, meta_key='description')
LONG_DESC = read('README.rst')
AUTHOR = find_meta(*META_PATH, meta_key='author')
AUTHOR_EMAIL = find_meta(*META_PATH, meta_key='email')
URL = find_meta(*META_PATH, meta_key='url')
LICENSE = find_meta(*META_PATH, meta_key='license')
PACKAGES = setuptools.find_packages()
# pull from requirements.IN, requirements.TXT is generated from this
INSTALL_REQUIRES = read_requirements('requirements.in')
DEV_REQUIRES = read_requirements('dev-requirements.in')
EXTRA_REQUIRES = {
'build': DEV_REQUIRES,
'docs': [
# 'sphinx >= 1.4', # theme requires at least 1.4
# 'cloud_sptheme >=1.8',
# 'releases',
# 'Babel >=1.3,!=2.0', # 2.0 breaks on Windows
],
'test': [
# 'green >=1.9.4', # v2 works
# 'coverage',
# 'isort',
# 'pydocstyle',
# 'pycodestyle',
# 'check-manifest'
],
}
# full list of Classifiers at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
# having an unknown classifier should keep PyPI from accepting the
# package as an upload
# 'Private :: Do Not Upload',
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
# 'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 2 :: Only',
'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
# 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
# 'Programming Language :: Python :: 3 :: Only',
'Natural Language :: English',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
]
##############################################################################
if LICENSE in ['MIT License']:
CLASSIFIERS += ['License :: OSI Approved :: {}'.format(LICENSE)]
# add 'all' key to EXTRA_REQUIRES
all_requires = []
for k, v in EXTRA_REQUIRES.items():
all_requires.extend(v)
EXTRA_REQUIRES['all'] = all_requires
setuptools.setup(
name=NAME,
version=VERSION,
url=URL,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=SHORT_DESC,
long_description=LONG_DESC,
packages=PACKAGES,
package_data={'': ['README.rst',
# 'changelog.rst',
'LICENSE.txt']},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRA_REQUIRES,
platforms='any',
classifiers=CLASSIFIERS,
namespace_packages=['minchin',
'minchin.scripts', ],
entry_points={
'console_scripts': [
'photosorter = minchin.scripts.photosorter:main',
],
},
)
|
from .template_generator import TemplateGenerator |
import pytest
import requests
from uuid import uuid4
from datetime import date, timedelta
from json import JSONDecodeError
from backend.tests.factories import OrderFactory, ProductFactory, OrderProductFactory
from backend.util.response.user_orders import UserOrdersSchema
from backend.util.response.error import ErrorSchema
from backend.util.slug import uuid_to_slug
@pytest.fixture(scope="function", autouse=True)
def factory_session(db_perm_session):
OrderFactory._meta.sqlalchemy_session = db_perm_session
ProductFactory._meta.sqlalchemy_session = db_perm_session
OrderProductFactory._meta.sqlalchemy_session = db_perm_session
def test_select_by_user_controller(domain_url, db_perm_session, token_session, prod_list):
user_slug = uuid_to_slug(uuid4())
prod_id_list = [p.meta["id"] for p in prod_list]
product_list = [ProductFactory.create(es_id=es_id) for es_id in prod_id_list]
db_perm_session.commit()
obj_list = OrderFactory.create_batch(2, user_slug=user_slug)
for product in product_list:
OrderProductFactory.create(order=obj_list[0], product=product, amount=2)
for product in product_list[0:3]:
OrderProductFactory.create(order=obj_list[1], product=product, amount=5)
db_perm_session.commit()
response = token_session.post(
domain_url + "/api/order/user/%s" % user_slug
)
data = response.json()
UserOrdersSchema().load(data)
assert response.status_code == 200
assert len(data["orders"]) == 2
assert data["total"] == 2
assert data["pages"] == 1
order_slug_list = [order["slug"] for order in data["orders"]]
for slug in order_slug_list:
assert slug in [obj.uuid_slug for obj in obj_list]
for order in data["orders"]:
if order["slug"] == obj_list[0].uuid_slug:
assert order["product_types"] == 5
assert order["items_amount"] == 10
else:
assert order["product_types"] == 3
assert order["items_amount"] == 15
response = token_session.post(
domain_url + "/api/order/user/%s" % user_slug,
json={
"page": "1",
"page_size": "1"
}
)
data = response.json()
UserOrdersSchema().load(data)
assert response.status_code == 200
assert len(data["orders"]) == 1
assert data["total"] == 2
assert data["pages"] == 2
response = token_session.post(
domain_url + "/api/order/user/%s" % user_slug,
json={
"datespan": {
"start": str(date.today() - timedelta(days=1)),
"end": str(date.today() + timedelta(days=1))
}
}
)
data = response.json()
UserOrdersSchema().load(data)
assert response.status_code == 200
assert len(data["orders"]) == 2
assert data["total"] == 2
assert data["pages"] == 1
response = token_session.post(
domain_url + "/api/order/user/WILLrogerPEREIRAslugBR"
)
with pytest.raises(JSONDecodeError):
response.json()
assert response.status_code == 204
def test_select_by_user_controller_not_registered(domain_url, db_perm_session, token_session):
user_slug = uuid_to_slug(uuid4())
bad_obj_list = OrderFactory.create_batch(4, user_slug=user_slug)
bad_product = ProductFactory.create()
for order in bad_obj_list:
OrderProductFactory.create(order=order, product=bad_product, amount=5)
db_perm_session.commit()
response = token_session.post(
domain_url + "/api/order/user/%s" % user_slug
)
data = response.json()
ErrorSchema().load(data)
assert response.status_code == 400
assert data["error"].find("not registered") != -1
def test_select_by_user_controller_unauthorized(domain_url):
response = requests.post(
domain_url + "/api/order/user/WILLrogerPEREIRAslugBR",
)
data = response.json()
ErrorSchema().load(data)
assert response.status_code == 401
|
import toml
import numpy
from utils import retry, accounts, firstProducer, numProducers, intToCurrency
config = toml.load('./config.toml')
def allocateFunds(b, e):
dist = numpy.random.pareto(1.161, e - b).tolist() # 1.161 = 80/20 rule
dist.sort()
dist.reverse()
factor = 2_000_000 / sum(dist)
total = 0
for i in range(b, e):
funds = round(factor * dist[i - b] * 10000)
if i >= firstProducer and i < firstProducer + numProducers:
funds = max(funds, round(config['funds']['min_producer_funds'] * 10000))
total += funds
accounts[i]['funds'] = funds
return total
def createStakedAccounts(b, e):
ramFunds = round(config['funds']['ram_funds'] * 10000)
configuredMinStake = round(config['funds']['min_stake'] * 10000)
maxUnstaked = round(config['funds']['max_unstaked'] * 10000)
for i in range(b, e):
a = accounts[i]
funds = a['funds']
print('#' * 80)
print('# %d/%d %s %s' % (i, e, a['name'], intToCurrency(funds)))
print('#' * 80)
if funds < ramFunds:
print('skipping %s: not enough funds to cover ram' % a['name'])
continue
minStake = min(funds - ramFunds, configuredMinStake)
unstaked = min(funds - ramFunds - minStake, maxUnstaked)
stake = funds - ramFunds - unstaked
stakeNet = round(stake / 2)
stakeCpu = stake - stakeNet
print('%s: total funds=%s, ram=%s, net=%s, cpu=%s, unstaked=%s' % (a['name'], intToCurrency(a['funds']), intToCurrency(ramFunds), intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(unstaked)))
assert(funds == ramFunds + stakeNet + stakeCpu + unstaked)
retry(config['cleos']['path'] + 'system newaccount --transfer airdrops %s %s --stake-net "%s" --stake-cpu "%s" --buy-ram "%s" ' %
(a['name'], a['pub'], intToCurrency(stakeNet), intToCurrency(stakeCpu), intToCurrency(ramFunds)))
if unstaked:
retry(config['cleos']['path'] + 'transfer airdrops %s "%s"' % (a['name'], intToCurrency(unstaked)))
if __name__ == '__main__':
allocateFunds(0, len(accounts))
createStakedAccounts(0, len(accounts))
|
#!/usr/bin/env python
'''
These objects and functions are part of a larger poker assistant project.
Content of this script enable the user to simulate game of Texas Holdem Poker.
'''
__author__ = 'François-Guillaume Fernandez'
__license__ = 'MIT License'
__version__ = '0.1'
__maintainer__ = 'François-Guillaume Fernandez'
__status__ = 'Development'
import numpy as np
def get_card_value(idx):
return [2, 3, 4, 5, 6, 7, 8, 9, 10, 'J', 'Q', 'K', 'A'][idx]
class Card:
def __init__(self, suit, value):
self.suit = suit # Diamonds, Clubs, Hearts, Spades
self.value = value # 1 -> 13
def get_value(self):
return get_card_value(self.value)
def get_name(self):
return '%s %s' % (self.get_value(), self.suit)
def get_hand_value(cards):
# Order the cards
cards_values = [card.value for card in cards]
sorted_idx = np.argsort(cards_values)[::-1]
ordered_cards = np.array(cards)[sorted_idx]
cards_values = [card.value for card in ordered_cards]
cards_suits = [card.suit for card in ordered_cards]
nb_cards = len(cards)
# Get the occurrences
from collections import Counter
suit_count = Counter(cards_suits)
value_count = Counter(cards_values)
hand = {'royal flush': None, 'straight flush': None,
'fours': [], 'full house': None,
'flush': [], 'straight': [],
'threes': [], 'double pairs': [], 'pairs': [], 'high': cards_values[0]}
# Check for straight & flush
if nb_cards >= 5:
# Check for flush
for suit, count in suit_count.items():
if count >= 5:
hand['flush'] = [idx for idx, c_suit in enumerate(cards_suits) if c_suit == suit]
hand['flush'] = suit
break
# Check for straight
if len(value_count.keys()) >= 5:
flow = 1
prev_val = cards_values[0]
tmp_values = cards_values
# Ace case for straights
if prev_val == 12:
tmp_values.append(-1)
for idx, val in enumerate(tmp_values):
diff = prev_val - val
if diff > 0:
if diff == 1:
flow = flow + 1
else:
if idx > len(tmp_values) - 5:
break
flow = 1
if flow >= 5:
hand['straight'].append(val + 4)
break
prev_val = val
if len(hand['straight']) > 0 and len(hand['flush']) > 0:
# Straight flush
flush_suit = cards_suits[hand['flush'][0]]
for high in hand['straight']:
# Check if each card is in the flush
if all(card_idx in hand['flush'] for card_idx, card_value in enumerate(cards_values) if card_value in range(high - 4, high + 1)):
hand['straight flush'] = (high, flush_suit)
# Royal flush
if hand['straight flush'] is not None and hand['straight flush'][0] == 12:
hand['royal flush'] = hand['straight flush'][1]
# Check for fours, threes, pairs
for val, count in value_count.items():
if count == 4:
hand['fours'].append(val)
elif count == 3:
hand['threes'].append(val)
elif count == 2:
hand['pairs'].append(val)
# Double pairs
if len(hand['pairs']) >= 2:
hand['double pairs'] = hand['pairs'][:2]
# Full House
if len(hand['threes']) > 0 and len(hand['pairs']) > 0:
hand['full house'] = (hand['threes'][0], hand['pairs'][0])
return hand
def get_winners(hands):
winner_idxs = []
for hand_type in ['royal flush', 'straight flush', 'fours', 'full house', 'flush', 'straight', 'threes', 'double pairs', 'pairs', 'high']:
selection = []
# Short-list hands with current hand type
for hand_idx, hand in enumerate(hands):
if isinstance(hand.get(hand_type), list) and len(hand.get(hand_type)) > 0:
selection.append(hand_idx)
# print('Checking %s: selection %s' % (hand_type, selection))
if len(selection) > 0:
if len(selection) == 1:
winner_idxs = selection
else:
if hand_type == 'royal flush':
winner_idxs = selection
elif hand_type == 'straight flush':
flush_high = np.max([hands[idx][hand_type][0] for idx in selection])
winner_idxs = [idx for idx in selection if hands[idx][hand_type][0] == flush_high]
# Case where you need to compare 2 args potentially
elif hand_type in ['full house', 'double pairs']:
shortlist = selection[np.argmax([hands[idx][hand_type][0] for idx in selection])]
if isinstance(shortlist, int) or len(shortlist) == 1:
winner_idxs = shortlist
else:
winner_idxs = shortlist[np.argmax([hands[idx][hand_type][1] for idx in shortlist])]
# All other hand types are comparable with the high card in the hand type
else:
winner_idxs = selection[np.argmax([hands[idx][hand_type][0] for idx in selection])]
break
return winner_idxs
class Deck:
cards = []
def __init__(self):
for suit in ['Diamonds', 'Clubs', 'Hearts', 'Spades']:
for value in range(0, 13):
self.cards.append(Card(suit, value))
def drow_card(self):
import random
idx = random.randint(0, len(self.cards) - 1)
card = self.cards[idx]
del self.cards[idx]
return card
def drow_cards(self, nb_cards):
cards = []
for c in range(nb_cards):
cards.append(self.drow_card())
return cards
class Player:
cards = []
def __init__(self):
self.cash = 1000
def deal_cards(self, cards):
self.cards = cards
def get_cards(self):
return self.cards
class Game:
def __init__(self, players):
self.deck = Deck()
self.players = players
self.community_cards = []
def deal(self):
print('Dealing cards to players...')
for p in self.players:
p.cards = self.deck.drow_cards(2)
def get_com_cards(self):
return [card.get_name() for card in self.community_cards]
def hit(self):
if len(self.community_cards) == 0:
self.community_cards = self.deck.drow_cards(3)
print('Flop:', self.get_com_cards())
elif len(self.community_cards) == 3:
self.community_cards.append(self.deck.drow_card())
print('Turn:', self.get_com_cards())
elif len(self.community_cards) == 4:
self.community_cards.append(self.deck.drow_card())
print('River:', self.get_com_cards())
else:
raise ValueError('All community cards are already drown')
def get_player_cards(self, idx):
return [card.get_name() for card in self.players[idx].cards]
|
from collections import namedtuple
from reportlab.pdfbase import pdfmetrics, ttfonts
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.pagesizes import A4
from reportlab.lib import colors
from reportlab.lib.units import mm
from settings import config
font_arial = ttfonts.TTFont('Arial', 'arial.ttf')
pdfmetrics.registerFont(font_arial)
HMARGIN = 20 * mm
RMARGIN = 5 * mm
TMARGIN = 10 * mm
BMARGIN = 20 * mm
def get_vertical():
num = 270
while True:
yield num
num -= 5
Item = namedtuple('Item', 'name, code, unit, quantity, price')
class ReportInvoice:
def __init__(self):
self._info = {}
self._order=namedtuple('Info', 'date, number, payer, address, delivery')
self._story = []
self._data = []
self._styles = getSampleStyleSheet()
@staticmethod
def invoice_file(number):
"""
get invoice file name by the number of invoice
:param number: order number
:return path: invoice file name
"""
base_folder = config.invoices_folder()
invoice_file = 'invoice{:06}.pdf'.format(number)
return config.os.path.join(base_folder, invoice_file)
def set_order(self, date, number, payer, address, delivery):
self._order.date = date
self._order.number = number
self._order.payer = payer
self._order.address = address
self._order.delivery = delivery
def add_item(self, name, code, unit, quantity, price):
item = Item(name= name,
code= code,
unit= unit,
quantity= quantity,
price= price)
self._data.append(item)
def _first_page(self, canvas, doc):
"""
make a top of page with company info
make a footer
"""
vertical = get_vertical()
canvas.saveState()
canvas.setFont('Arial', 16)
canvas.drawString(HMARGIN, next(vertical) * mm, self._info['name'])
canvas.setFont('Arial', 12)
format_text = 'ИНН/КПП: {}/{}'.format(self._info['taxpayerID'],
self._info['registrationID'])
canvas.drawString(HMARGIN, next(vertical) * mm, format_text)
format_text = 'Адресс: {}'.format(self._info['address'])
canvas.drawString(HMARGIN, next(vertical) * mm, format_text)
format_text = 'Телефон: {}'.format(self._info['phone'])
canvas.drawString(HMARGIN, next(vertical) * mm, format_text)
format_text = 'Эл.почта: {}'.format(self._info['email'])
canvas.drawString(HMARGIN, next(vertical) * mm, format_text)
format_text = 'Банк: {}'.format(self._info['bank_account']['name'])
canvas.drawString(HMARGIN, next(vertical) * mm, format_text)
format_text = 'БИК: {}'.format(self._info['bank_account']['id'])
canvas.drawString(HMARGIN, next(vertical) * mm, format_text)
format_text = 'р/с: {}'.format(self._info['bank_account']['account'])
canvas.drawString(HMARGIN, next(vertical) * mm, format_text)
format_text = 'к/с: {}'.format(self._info['bank_account']['corr_acc'])
canvas.drawString(HMARGIN, next(vertical) * mm, format_text)
canvas.line(HMARGIN, 228 * mm, 205 * mm, 228 * mm)
format_text = 'Счёт № {:06} от {:%d.%m.%Y}'.format(self._order.number, self._order.date)
canvas.drawCentredString(210 / 2 * mm, 222 * mm, format_text)
format_text = 'Получатель: {}'.format(self._order.payer)
canvas.drawCentredString(210 / 2 * mm, 217 * mm, format_text)
canvas.line(HMARGIN, 215 * mm, 205 * mm, 215 * mm)
self._footer(canvas, str(doc.page))
canvas.restoreState()
def _later_page(self, canvas, doc):
"""
make a footer
"""
canvas.saveState()
self._footer(canvas, str(doc.page))
canvas.restoreState()
def _footer(self, canvas, page):
canvas.line(HMARGIN, 10 * mm, 205 * mm, 10 * mm)
canvas.setFont('Arial', 8)
canvas.drawString(200 * mm, 6 * mm, page)
def _order_table(self):
"""
prepare a data and create a Table
:return Table: Flowable
"""
style = self._styles['Normal']
style.wordWrap = 'CJK'
style.fontName = 'Arial'
table_data = [['№', 'Наименование', 'Код', 'Кол-во', 'Ед.изм.', 'Цена', 'Сумма']]
position = 1
total_cost = 0.0
for item in self._data:
cost = item.price * item.quantity
table_data.append([str(position),
item.name,
'',
str(item.quantity),
item.unit,
'{:.2f}'.format(item.price).replace('.', ','),
'{:.2f}'.format(round(cost, 2)).replace('.', ',')])
position += 1
total_cost += cost
table_data.append(['', '', '', '', '', 'Итого:', '{:.2f}'.format(total_cost).replace('.', ',')])
table_data.append(['', '', '', '', '', 'Доставка:', '{:.2f}'.format(self._order.delivery).replace('.', ',')])
return Table([[Paragraph(cell, style) for cell in row] for row in table_data],
colWidths=[10 * mm, 50 * mm, 40 * mm, 20 * mm, 20 * mm, 20 * mm, 25 * mm],
style=[
('FONT', (0,0), (-1,-1), 'Arial'),
('GRID', (0,0), (-1,-3), 0.5, colors.black),
])
def company_info(self, info_dict):
self._info = info_dict
def make(self):
path = self.invoice_file(self._order.number)
doc = SimpleDocTemplate(path,
pagesize = A4,
rightMargin = RMARGIN,
leftMargin = HMARGIN,
topMargin = TMARGIN,
bottomMargin = BMARGIN,
showBoundary = False)
self._story.append(Spacer(1, 70 * mm))
self._story.append(self._order_table())
try:
doc.build(self._story,
onFirstPage=self._first_page,
onLaterPages=self._later_page)
except PermissionError as e:
print(e.strerror)
|
import os
import sys
from distutils.core import setup
import matplotlib
import numpy
# import py2exe
# from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
# from matplotlib.figure import Figure
sys.setrecursionlimit(5000)
# Compile program using:
# python compileScript.py py2exe
# add any numpy directory containing a dll file to sys.path
def numpy_dll_paths_fix():
paths = set()
np_path = numpy.__path__[0]
for dirpath, _, filenames in os.walk(np_path):
for item in filenames:
if item.endswith(".dll"):
paths.add(dirpath)
sys.path.append(*list(paths))
numpy_dll_paths_fix()
current_dir = os.path.dirname(os.path.realpath(__file__))
dist_dir = os.path.join(current_dir, "ORIGAMI_MS_v1.0.1")
# py2exe options
additionalFiles = [
(
"scripts",
[
"scripts/CIU_RESET.cs",
"scripts/CIU_LIST.cs",
"scripts/CIU_LINEAR.cs",
"scripts/CIU_FITTED.cs",
"scripts/CIU_EXPONENT.cs",
],
),
(
"dlls",
[
"dlls/CIU_RESET.dll",
"dlls/CIU_LIST.dll",
"dlls/CIU_LINEAR.dll",
"dlls/CIU_FITTED.dll",
"dlls/CIU_EXPONENT.dll",
],
),
("", matplotlib.get_py2exe_datafiles()),
]
py2exe_options = dict(
compressed=True,
optimize=0,
bundle_files=3,
excludes=[
"Tkconstants",
"Tkinter",
"tcl",
"Qt",
"PyQt5.*",
"PyQt4",
"pywin",
"pywin.debugger",
"pywin.debugger.dbgcon",
"pywin.dialogs",
"pywin.dialogs.list",
"scipy.*",
"pandas.*",
"pdb",
"doctest",
],
includes=["matplotlib.backends.backend_qt5agg", "email.mime.*", "jinja2"],
packages=["wx.lib.pubsub", "pkg_resources"],
dll_excludes=["Qt5Gui", "Qt5Widgets", "Qt5Svg", "Qt5Gui"],
dist_dir=dist_dir,
)
# main setup
setup(
name="ORIGAMI-MS",
version="v1.0.1",
description="ORIGAMI - A Software Suite for Activated Ion Mobility Mass Spectrometry ",
author="Lukasz G. Migas",
url="https://www.click2go.umip.com/i/s_w/ORIGAMI.html",
windows=[{"script": "ORIGAMIMS.py", "icon_resources": [(1, "icon.ico")]}],
console=[{"script": "ORIGAMIMS.py"}],
data_files=matplotlib.get_py2exe_datafiles(),
# data_files = additionalFiles,
options=dict(py2exe=py2exe_options),
)
|
from allauth.account.views import SignupView
from allauth.account.views import ConfirmEmailView
from users.forms import CustomUserCreationForm
class MySignupView(SignupView):
form_class = CustomUserCreationForm
class ConfirmEmailView(ConfirmEmailView):
def post(self, *args, **kwargs):
self.object = confirmation = self.get_object()
confirmation.confirm(self.request)
get_adapter(self.request).add_message(
self.request,
messages.SUCCESS,
'account/messages/email_confirmed.txt',
{'email': confirmation.email_address.email}
)
if app_settings.LOGIN_ON_EMAIL_CONFIRMATION:
resp = self.login_on_confirm(confirmation)
if resp is not None:
return resp
redirect_url = self.get_redirect_url()
if not redirect_url:
ctx = self.get_context_data()
return self.render_to_response(ctx)
return redirect(redirect_url) |
# coding=utf-8
# Copyright 2020 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for run_collect_eval."""
import os
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import gin
from tensor2robot.research.pose_env import pose_env
from tensor2robot.utils import continuous_collect_eval
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
class PoseEnvModelsTest(parameterized.TestCase):
@parameterized.parameters(
(pose_env.PoseEnvRandomPolicy,),
)
def test_run_pose_env_collect(self, demo_policy_cls):
urdf_root = pose_env.get_pybullet_urdf_root()
config_dir = 'research/pose_env/configs'
gin_config = os.path.join(
FLAGS.test_srcdir, config_dir, 'run_random_collect.gin')
gin.parse_config_file(gin_config)
tmp_dir = absltest.get_default_test_tmpdir()
root_dir = os.path.join(tmp_dir, str(demo_policy_cls))
gin.bind_parameter('PoseToyEnv.urdf_root', urdf_root)
gin.bind_parameter(
'collect_eval_loop.root_dir', root_dir)
gin.bind_parameter('run_meta_env.num_tasks', 2)
gin.bind_parameter('run_meta_env.num_episodes_per_adaptation', 1)
gin.bind_parameter(
'collect_eval_loop.policy_class', demo_policy_cls)
continuous_collect_eval.collect_eval_loop()
output_files = tf.io.gfile.glob(os.path.join(
root_dir, 'policy_collect', '*.tfrecord'))
self.assertLen(output_files, 2)
if __name__ == '__main__':
absltest.main()
|
PACKAGE_HEADER = """\
/**
* Dlang vulkan types and function definitions package
*
* Copyright: Copyright 2015-2016 The Khronos Group Inc.; Copyright 2016 Alex Parrill, Peter Particle.
* License: $(https://opensource.org/licenses/MIT, MIT License).
* Authors: Copyright 2016 Alex Parrill, Peter Particle
*/
module {PACKAGE_PREFIX};
public import {PACKAGE_PREFIX}.types;
public import {PACKAGE_PREFIX}.functions;\
""" |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# The MIT License (MIT)
# Copyright (c) 2016 Blockstack
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# pylint: skip-file
from __future__ import print_function
import copy
def process_soa(io, data, name, print_name=False):
"""
Replace {SOA} in template with a set of serialized SOA records
"""
indent = ' ' * len('{} {} IN SOA '.format(name, data['ttl']))
print('{} {} IN SOA {} {} ('.format(name, data['ttl'], data['mname'], data['rname']), file=io)
for item in ['serial', 'refresh', 'retry', 'expire', 'minimum']:
print('{}{} ; {}'.format(indent, data[item], item), file=io)
print('{})'.format(indent), file=io)
def _quote_field(data, field):
"""
Quote a field in a list of DNS records.
Return the new data records.
"""
if data is None:
return None
data[field] = '"%s"' % data[field]
data[field] = data[field].replace(";", "\;")
return data
def process_rr(io, data, record_type, record_keys, name, print_name):
""" Print out single line record entries """
if data is None:
return
if isinstance(record_keys, str):
record_keys = [record_keys]
elif not isinstance(record_keys, list):
raise ValueError('record_keys must be a string or list of strings')
name_display = name if print_name else ' ' * len(name)
print('{} {} IN {} '.format(name_display, data['ttl'], record_type), end='', file=io)
for i, key in enumerate(record_keys):
print(data[key], end='\n' if i == len(record_keys) - 1 else ' ', file=io)
def process_ns(io, data, name, print_name=False):
process_rr(io, data, 'NS', 'host', name, print_name)
def process_a(io, data, name, print_name=False):
return process_rr(io, data, 'A', 'ip', name, print_name)
def process_aaaa(io, data, name, print_name=False):
return process_rr(io, data, 'AAAA', 'ip', name, print_name)
def process_cname(io, data, name, print_name=False):
return process_rr(io, data, 'CNAME', 'alias', name, print_name)
def process_mx(io, data, name, print_name=False):
return process_rr(io, data, 'MX', ['preference', 'host'], name, print_name)
def process_ptr(io, data, name, print_name=False):
return process_rr(io, data, 'PTR', 'host', name, print_name)
def process_txt(io, data, name, print_name=False):
return process_rr(io, _quote_field(data, 'txt'), 'TXT', 'txt', name, print_name)
def process_srv(io, data, name, print_name=False):
return process_rr(io, data, 'SRV', ['priority', 'weight', 'port', 'target'], name, print_name)
|
#!/usr/bin/env python
"""Run BL server."""
import argparse
from bl_lookup.server import app
from bl_lookup.bl import models, generate_bl_map
parser = argparse.ArgumentParser(description='Start BL lookup server.')
parser.add_argument('--host', default='0.0.0.0', type=str)
parser.add_argument('--port', default=8144, type=int)
parser.add_argument('--model', type=str)
args = parser.parse_args()
data = dict()
uri_maps = dict()
for version in models:
data[version], uri_maps[version] = generate_bl_map(version=version)
if args.model is not None:
data['custom'], uri_maps['custom'] = generate_bl_map(url=args.model)
app.userdata = {
'data': data,
'uri_maps': uri_maps,
}
app.run(
host=args.host,
port=args.port,
debug=False,
)
|
from quo.table import Table
data = [
["Name", "Gender", "Age"],
["Alice", "F", 24],
["Bob", "M", 19],
["Dave", "", 24]
]
Table(data)
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Keras initializers useful for TFP Keras layers."""
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
class BlockwiseInitializer(tf.keras.initializers.Initializer):
"""Initializer which concats other intializers."""
def __init__(self, initializers, sizes, validate_args=False):
"""Creates the `BlockwiseInitializer`.
Args:
initializers: `list` of Keras initializers, e.g., `"glorot_uniform"` or
`tf.keras.initializers.Constant(0.5413)`.
sizes: `list` of `int` scalars representing the number of elements
associated with each initializer in `initializers`.
validate_args: Python `bool` indicating we should do (possibly expensive)
graph-time assertions, if necessary.
"""
self._initializers = initializers
self._sizes = sizes
self._validate_args = validate_args
@property
def initializers(self):
return self._initializers
@property
def sizes(self):
return self._sizes
@property
def validate_args(self):
return self._validate_args
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not provided will return tensor
of `tf.float32`.
"""
dtype = tf.as_dtype(dtype or tf.keras.backend.floatx())
if isinstance(shape, tf.TensorShape):
shape_dtype = tf.int32
shape_ = np.int32(shape)
else:
if not tf.is_tensor(shape):
shape = tf.convert_to_tensor(
value=shape, dtype_hint=tf.int32, name='shape')
shape_dtype = shape.dtype.base_dtype
shape_ = tf.get_static_value(shape, partial=True)
sizes_ = tf.get_static_value(self.sizes)
if sizes_ is not None:
sizes_ = np.array(sizes_, shape_dtype.as_numpy_dtype)
assertions = []
message = 'Rightmost dimension of shape must equal `sum(sizes)`.'
n = shape[-1] if shape_ is None or shape_[-1] is None else shape_[-1]
if sizes_ is not None and not tf.is_tensor(n):
if sum(sizes_) != n:
raise ValueError(message)
elif self.validate_args:
assertions.append(tf.debugging.assert_equal(
shape[-1], tf.reduce_sum(self.sizes), message=message))
s = (shape[:-1]
if shape_ is None or any(s is None for s in shape_[:-1])
else shape_[:-1])
if sizes_ is not None and isinstance(s, (np.ndarray, np.generic)):
return tf.concat([
tf.keras.initializers.get(init)(np.concatenate([
s, np.array([e], shape_dtype.as_numpy_dtype)], axis=-1), dtype)
for init, e in zip(self.initializers, sizes_.tolist())
], axis=-1)
sizes = tf.split(self.sizes, len(self.initializers))
return tf.concat([
tf.keras.initializers.get(init)(tf.concat([s, e], axis=-1), dtype)
for init, e in zip(self.initializers, sizes)
], axis=-1)
def get_config(self):
"""Returns initializer configuration as a JSON-serializable dict."""
return {
'initializers': [
tf.initializers.serialize(
tf.keras.initializers.get(init))
for init in self.initializers
],
'sizes': self.sizes,
'validate_args': self.validate_args,
}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary."""
return cls(**{
'initializers': [tf.initializers.deserialize(init)
for init in config.get('initializers', [])],
'sizes': config.get('sizes', []),
'validate_args': config.get('validate_args', False),
})
tf.keras.utils.get_custom_objects()[
'BlockwiseInitializer'] = BlockwiseInitializer
|
import numpy as np
from numpy import exp, log, log10
# Constants
version = 20180411
# %% Parameters for out-of-equilibrium Bayes factor calcualtions
trials = 5
n12_range = [1e3, 1e4]
n12_points = 10
n12s = np.logspace(log10(n12_range[0]), log10(n12_range[1]), num=n12_points)
jobs_count_tars = 204 # max: 224
jobs_count_tars_common = 3000 # 1000
jobs_count_t_bayes = 132
jobs_count_onsager = 10
manager_script = "job_manager.py"
DETACHED_PROCESS = 0x00000008
dim = 2 # number of dimensions (1 or 2)
L = 1.0 # linear size of the system, in um
x_min = 0.0 # in um
x_max = L # in um
t_step = 0.04 # in seconds
# Diffusivity constants
D_0 = 0.01 # in um^2/s
q = 1 # number of D peaks
gamma_drag = 400.0 # viscous drag, in fN * s / um
# Total force constants
# >> saw-tooth case for main check <<
# D_case = 1
# D_ratio = 2.0 # Ratio between max and min of the diffusivity
# ksi_range = (-25.0, 25.0) # range of ratios of the total force to diffusivity gradient
# ksi_step = 0.5 # sampling step in the interval
# trials = 1000 # 1000
# N = int(1.0e4) # int(1.0e6) or int(1.0e5)
# >> round-well case for gradient jump check <<
D_case = 2
D_ratio = 10.0 # Ratio between max and min of the diffusivity
well_radius = L / 6.0
ksi_range = (0.5, 25.0) # range of ratios of the total force to diffusivity gradient
ksi_step = 100.0 # sampling step in the interval
N = int(1.0e5) # int(1.0e6) or int(1.0e5)
# Simulation parameters
progress_update_interval = 100.0
# Integer. How many intermediate smaller steps are made before the next point is saved
internal_steps_number = 100
k = 2.0 * q / L * (D_ratio - 1.0) # D'/D
D_grad_abs = k * D_0 # absolute value of the diffusivity gradient used for force calculations
# Batch parameters
sleep_time = 0.2
logs_folder = "./logs/"
output_folder = "./output/"
args_file = "arguments.dat"
args_lock = "arguments.lock"
position_file = 'position.dat'
position_lock = 'position.lock'
stop_file = 'stop'
lock_timeout = 600 # s
output_folder = './output/'
CSV_DELIMITER = ';'
# k = 100.0 # diffusivity gradient, D'/D, in um^-1
max_D_case = 7
max_f_case = 8
str_mode = 'periodic' # bc_type = ENUM_BC_PERIODIC;
|
#!/usr/bin/env python
"""
This application replicates the switch CLI command 'show interface fex'
It largely uses raw queries to the APIC API
"""
from acitoolkit import Credentials, Session
from tabulate import tabulate
def show_interface_fex(apic, node_ids):
"""
Show interface fex
:param apic: Session instance logged in to the APIC
:param node_ids: List of strings containing node ids
:return: None
"""
for node_id in node_ids:
query_url = ('/api/mo/topology/pod-1/node-%s.json?query-target=subtree'
'&target-subtree-class=satmDExtCh' % node_id)
resp = apic.get(query_url)
if not resp.ok:
print('Could not collect APIC data for switch %s.' % node_id)
print(resp.text)
return
fex_list = {}
for obj in resp.json()['imdata']:
obj_attr = obj['satmDExtCh']['attributes']
fex_list[obj_attr['id']] = (obj_attr['model'], obj_attr['ser'])
query_url = ('/api/mo/topology/pod-1/node-%s.json?query-target=subtree'
'&target-subtree-class=satmFabP' % node_id)
resp = apic.get(query_url)
if not resp.ok:
print('Could not collect APIC data for switch %s.' % node_id)
print(resp.text)
return
data = []
for obj in resp.json()['imdata']:
obj_attr = obj['satmFabP']['attributes']
fex = obj_attr['extChId']
fabric_port = obj_attr['id']
if fabric_port.startswith('po'):
continue
fabric_port_state = obj_attr['fsmSt']
fex_uplink = obj_attr['remoteLinkId']
try:
model, serial = fex_list[fex]
except KeyError:
model, serial = '--', '--'
data.append((int(fex), fabric_port, fabric_port_state, fex_uplink, model, serial))
data.sort(key=lambda tup: tup[0])
if len(data):
print('Switch:', node_id)
print(tabulate(data, headers=['Fex', 'Fabric Port', 'Fabric Port State',
'Fex uplink', 'Fex model', 'Fex serial']))
print('\n')
def get_node_ids(apic, args):
"""
Get the list of node ids from the command line arguments.
If none, get all of the node ids
:param apic: Session instance logged in to the APIC
:param args: Command line arguments
:return: List of strings containing node ids
"""
if args.switch is not None:
names = [args.switch]
else:
names = []
query_url = ('/api/node/class/fabricNode.json?'
'query-target-filter=eq(fabricNode.role,"leaf")')
resp = apic.get(query_url)
if not resp.ok:
print('Could not get switch list from APIC.')
return
nodes = resp.json()['imdata']
for node in nodes:
names.append(str(node['fabricNode']['attributes']['id']))
return names
def main():
"""
Main common routine for show interface description
:return: None
"""
# Set up the command line options
creds = Credentials(['apic', 'nosnapshotfiles'],
description=("This application replicates the switch "
"CLI command 'show interface fex'"))
creds.add_argument('-s', '--switch',
type=str,
default=None,
help='Specify a particular switch id, e.g. "101"')
args = creds.get()
# Login to APIC
apic = Session(args.url, args.login, args.password)
if not apic.login().ok:
print('%% Could not login to APIC')
return
# Show interface description
node_ids = get_node_ids(apic, args)
show_interface_fex(apic, node_ids)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
from hsclient.hydroshare import Aggregation, File, HydroShare, Resource
|
from .apps import GitHubApp
App = GitHubApp
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetSizesResult',
'AwaitableGetSizesResult',
'get_sizes',
]
@pulumi.output_type
class GetSizesResult:
"""
A collection of values returned by getSizes.
"""
def __init__(__self__, filters=None, id=None, sizes=None, sorts=None):
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if sizes and not isinstance(sizes, list):
raise TypeError("Expected argument 'sizes' to be a list")
pulumi.set(__self__, "sizes", sizes)
if sorts and not isinstance(sorts, list):
raise TypeError("Expected argument 'sorts' to be a list")
pulumi.set(__self__, "sorts", sorts)
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetSizesFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def sizes(self) -> Sequence['outputs.GetSizesSizeResult']:
return pulumi.get(self, "sizes")
@property
@pulumi.getter
def sorts(self) -> Optional[Sequence['outputs.GetSizesSortResult']]:
return pulumi.get(self, "sorts")
class AwaitableGetSizesResult(GetSizesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSizesResult(
filters=self.filters,
id=self.id,
sizes=self.sizes,
sorts=self.sorts)
def get_sizes(filters: Optional[Sequence[pulumi.InputType['GetSizesFilterArgs']]] = None,
sorts: Optional[Sequence[pulumi.InputType['GetSizesSortArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSizesResult:
"""
Retrieves information about the Droplet sizes that DigitalOcean supports, with
the ability to filter and sort the results. If no filters are specified, all sizes
will be returned.
:param Sequence[pulumi.InputType['GetSizesFilterArgs']] filters: Filter the results.
The `filter` block is documented below.
:param Sequence[pulumi.InputType['GetSizesSortArgs']] sorts: Sort the results.
The `sort` block is documented below.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['sorts'] = sorts
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getSizes:getSizes', __args__, opts=opts, typ=GetSizesResult).value
return AwaitableGetSizesResult(
filters=__ret__.filters,
id=__ret__.id,
sizes=__ret__.sizes,
sorts=__ret__.sorts)
|
from django.apps import AppConfig
class MaingateConfig(AppConfig):
name = 'MainGate'
|
a3 = int(input())
a2 = int(input())
a1 = int(input())
atotal = (a3 * 3) + (a2 * 2) + a1
b3 = int(input())
b2 = int(input())
b1 = int(input())
btotal = (b3 * 3) + (b2 * 2) + b1
if atotal > btotal:
print("A")
elif atotal < btotal:
print("B")
else:
print("T")
|
"""
2015 Day 14
https://adventofcode.com/2015/day/14
"""
from dataclasses import dataclass
from typing import Sequence, Tuple
import aocd # type: ignore
@dataclass(frozen=True)
class Reindeer:
"""
Object encapsulating a reindeer's name and inherent attributes.
"""
name: str
speed: int
flytime: int
rest_needed: int
@classmethod
def from_description_line(cls, line: str) -> "Reindeer":
"""
Parse one reindeer from a line in the puzzle description.
"""
words = line.split()
return cls(
name=words[0],
speed=int(words[3]),
flytime=int(words[6]),
rest_needed=int(words[13]),
)
@classmethod
def all_from_description(cls, description: str) -> "Sequence[Reindeer]":
"""
Parse all reindeer from the puzzle description.
"""
return tuple(
cls.from_description_line(line) for line in description.split("\n")
)
@dataclass
class ReindeerStatus:
"""
Object containing the reindeer's current status in the race - resting or flying - along with
how much time until that is due to change again and the total distance travalled and points
scored so far.
"""
reindeer: Reindeer
flying: bool
time_left: int
distance_travelled: int
score: int
@classmethod
def from_reindeer(cls, reindeer: Reindeer) -> "ReindeerStatus":
"""
Create the default status for the first split-second of the race for the given reindeer.
"""
return cls(
reindeer=reindeer,
flying=True,
time_left=reindeer.flytime,
distance_travelled=0,
score=0,
)
def progress(self):
"""
Progress to the next minute, also then changing status to flying or resting if necessary.
"""
self.time_left -= 1
if self.flying:
self.distance_travelled += self.reindeer.speed
if self.time_left == 0:
self.flying = not self.flying
self.time_left = (
self.reindeer.flytime if self.flying else self.reindeer.rest_needed
)
def add_score(self, distance_to_score: int):
"""
Record a point scored if the reindeer has travelled at least as far as the distance
provided.
"""
if self.distance_travelled >= distance_to_score:
self.score += 1
def race(reindeer: Sequence[Reindeer], seconds: int = 2503) -> Tuple[int, int]:
"""
Race the provided group of reindeer and return the further distance travelled and the score of
the winner.
"""
racers = [ReindeerStatus.from_reindeer(individual) for individual in reindeer]
for _ in range(seconds):
for racer in racers:
racer.progress()
furthest_travelled = max(racer.distance_travelled for racer in racers)
for racer in racers:
racer.add_score(furthest_travelled)
return (
max(racer.distance_travelled for racer in racers),
max(racer.score for racer in racers),
)
def test_example():
"""
Example from puzzle description..
"""
example = "\n".join(
(
"Comet can fly 14 km/s for 10 seconds, but then must rest for 127 seconds.",
"Dancer can fly 16 km/s for 11 seconds, but then must rest for 162 seconds.",
)
)
example_racers = Reindeer.all_from_description(example)
assert race(example_racers, 1000) == (1120, 689)
def main():
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2015, day=14)
reindeer = Reindeer.all_from_description(data)
distance, score = race(reindeer)
print(f"Part 1: {distance}")
print(f"Part 2: {score}")
if __name__ == "__main__":
main()
|
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.staticfiles.urls import static
from . import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^auth/', include('loginsys.urls')),
url(r'^', include('mainpage.urls')),
url(r'^personal/', include('persCabinet.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import numpy as np
from sklearn.cluster import AgglomerativeClustering
import logging
class Clusterer(object):
"""
Abstract class for different clustering
variants based on the online clustering.
"""
def __init__(self, *args, **kwargs):
"""
Initializes the clusterer.
"""
raise NotImplementedError("This class is abstract. Derive it.")
def fit(self, X):
"""
Clusters based on the given dictionary of discriminators.
Parameters:
-----------
X : Dictionary of discriminators to be clustered.
n_clusters : The number of clusters to find. It must be None if distance_threshold is not None.
distance_threshold : The linkage distance threshold under which, clusters will not be merged. If not None, n_clusters must be None.
Returns:
--------
clusters : Dictionary of discriminator ids and their offline cluster.
"""
raise NotImplementedError("This method is abstract. Override it.")
class MinDistanceClustering(Clusterer):
"""
Agglomerative clustering based on the
intersection levels of WiSARD's discriminators
without merging them. It resembles single linkage.
"""
def __init__(self, n_clusters=None, distance_threshold=None):
if (n_clusters is None and distance_threshold is None) or (
n_clusters is not None and distance_threshold is not None):
raise KeyError(
"Check parameters n_clusters and distance_threshold!")
self.n_clusters = n_clusters
self.distance_threshold = distance_threshold
def fit(self, X):
"""
Clusters based on the given dictionary of discriminators.
Parameters:
-----------
X : Dictionary of discriminators to be clustered.
n_clusters : The number of clusters to find. It must be None if distance_threshold is not None.
distance_threshold : The linkage distance threshold under which, clusters will not be merged. If not None, n_clusters must be None.
Returns:
--------
clusters : Dictionary of discriminator ids and their offline cluster.
"""
# Creating lists needed throughout the clustering process
discr = list(X.values())
ids = [[x] for x in X.keys()]
# Compute similarities between every pair of objects in the data set
distances = np.zeros((len(discr), len(discr)))
for i in range(len(discr)):
for j in range(len(discr)):
if i > j:
distances[i][j] = discr[i].intersection_level(discr[j])
distances[j][i] = distances[i][j]
logging.info("Calculated distance matrix.")
while True:
# Stop conditions
if len(distances) <= self.n_clusters:
logging.info("Number of clusters reached.")
break
if self.distance_threshold is not None:
if np.max(distances.flatten()) < self.distance_threshold:
logging.info(
"Nothing to merge anymore, discriminators too dissimilar.")
break
# Get position of highest intersection
pos_highest = np.unravel_index(distances.argmax(), distances.shape)
first = pos_highest[0]
second = pos_highest[1]
if distances[first][second] == 0.0:
logging.info(
"Can't cluster anymore, discriminators too dissimilar.")
break
logging.info(
"Highest intersection level at {} with {}%".format(
pos_highest, distances[first][second]))
# Combine discriminators and delete unnecessary one
ids[first] = sorted(ids[first] + ids[second])
del ids[second]
del discr[second]
# Recalculate distances
for i in range(len(distances)):
if i != first:
distances[i][first] = min(
distances[first][i], distances[second][i])
distances[first][i] = distances[i][first]
# Reshape distance matrix by deleting merged rows and columns
distances = np.delete(distances, second, 0)
distances = np.delete(distances, second, 1)
logging.info(
"Recalculated distances and shrinked distance matrix to size: {}.".format(
distances.shape))
# Calculate clustering labels
clusters = dict()
for i, group in enumerate(ids):
for id_ in group:
clusters[id_] = int(i)
return clusters
class MergeClustering(MinDistanceClustering):
"""
Agglomerative clustering based on the
intersection levels of WiSARD's discriminators
with merging them.
"""
def __init__(self, n_clusters=None, distance_threshold=None):
if (n_clusters is None and distance_threshold is None) or (
n_clusters is not None and distance_threshold is not None):
raise KeyError(
"Check parameters n_clusters and distance_threshold!")
self.n_clusters = n_clusters
self.distance_threshold = distance_threshold
def fit(self, X):
"""
Clusters based on the given dictionary of discriminators.
Parameters:
-----------
X : Dictionary of discriminators to be clustered.
n_clusters : The number of clusters to find. It must be None if distance_threshold is not None.
distance_threshold : The linkage distance threshold under which, clusters will not be merged. If not None, n_clusters must be None.
Returns:
--------
clusters : Dictionary of discriminator ids and their offline cluster.
"""
# Creating lists needed throughout the clustering process
discr = list(X.values())
ids = [[x] for x in X.keys()]
# Compute similarities between every pair of objects in the data set
distances = np.zeros((len(discr), len(discr)))
for i in range(len(discr)):
for j in range(len(discr)):
if i > j:
distances[i][j] = discr[i].intersection_level(discr[j])
distances[j][i] = distances[i][j]
logging.info("Calculated distance matrix.")
while True:
# Stop conditions
if len(distances) <= self.n_clusters:
logging.info("Number of clusters reached.")
break
if self.distance_threshold is not None:
if np.max(distances.flatten()) < self.distance_threshold:
logging.info(
"Nothing to merge anymore, discriminators too dissimilar.")
break
# Get position of highest intersection
pos_highest = np.unravel_index(distances.argmax(), distances.shape)
first = pos_highest[0]
second = pos_highest[1]
if distances[first][second] == 0.0:
logging.info(
"Can't cluster anymore, discriminators too dissimilar.")
break
logging.info(
"Highest intersection level at {} with {}%".format(
pos_highest, distances[first][second]))
# Merge discriminators and delete unnecessary one
discr[first].merge(discr[second])
ids[first] = sorted(ids[first] + ids[second])
del ids[second]
del discr[second]
logging.info(
"Merging discriminator {} into {} and deleting {}.".format(
second, first, second))
# Reshape distance matrix by deleting merged rows and columns
distances = np.delete(distances, second, 0)
distances = np.delete(distances, second, 1)
# Recalculate distances
for i in range(len(distances)):
if i != first:
distances[i][first] = discr[first].intersection_level(
discr[i])
distances[first][i] = distances[i][first]
logging.info(
"Recalculated distances and shrinked distance matrix to size: {}.".format(
distances.shape))
# Calculate clustering labels
clusters = dict()
for i, group in enumerate(ids):
for id_ in group:
clusters[id_] = int(i)
return clusters
class CentroidClustering(Clusterer):
"""
This clusterer works based on the agglomerative
clustering implementation from sklearn and the
approximate centroids of the discriminators.
"""
def __init__(self, n_clusters=None, distance_threshold=None):
if (n_clusters is None and distance_threshold is None) or (
n_clusters is not None and distance_threshold is not None):
raise KeyError(
"Check parameters n_clusters and distance_threshold!")
self.n_clusters = n_clusters
self.distance_threshold = distance_threshold
def fit(self, X, centroids, linkage="ward"):
"""
Clusters based on the given dictionary of discriminators.
Parameters:
-----------
X : Dictionary of discriminators to be clustered.
n_clusters : The number of clusters to find. It must be None if distance_threshold is not None.
distance_threshold : The linkage distance threshold under which, clusters will not be merged. If not None, n_clusters must be None.
Returns:
--------
clusters : Dictionary of discriminator ids and their offline cluster.
"""
# Cluster using Agglomerative Clustering
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters,
distance_threshold=self.distance_threshold,
linkage=linkage)
clusterer.fit(centroids)
# Group discriminator ids into cluster groups
clusters = dict()
for x, c in zip(X.keys(), clusterer.labels_):
clusters[x] = c
return clusters
|
# Generated by Django 3.1.7 on 2021-03-02 15:10
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('alpha_two_code', models.CharField(blank=True, max_length=2, null=True)),
],
options={
'verbose_name_plural': 'Countries',
},
),
migrations.CreateModel(
name='Shop',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('image', models.URLField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='ShopBranch',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.CharField(max_length=255)),
('street_address', models.CharField(max_length=255)),
('city', models.CharField(max_length=255)),
('region', models.CharField(blank=True, max_length=255, null=True)),
('state', models.CharField(blank=True, max_length=255, null=True)),
('postal_code', models.CharField(max_length=7)),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('eta_rang', models.CharField(blank=True, max_length=7, null=True)),
('description', models.TextField()),
('opening_time', models.TimeField()),
('closing_time', models.TimeField()),
('is_active', models.BooleanField(default=False)),
],
options={
'verbose_name_plural': 'Shop Branches',
},
),
migrations.CreateModel(
name='ShopType',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='UserBranch',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('permission', models.IntegerField(choices=[(0, 'Viewer'), (1, 'Moderator'), (2, 'Editor'), (3, 'Admin')], default=0)),
('branch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shop.shopbranch')),
],
options={
'verbose_name_plural': 'User Branches',
},
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Samvaran Kashyap Rallabandi - <srallaba@redhat.com>
#
# Topology validator for Ansible based infra provsioning tool linch-pin
from ansible.module_utils.basic import *
import datetime
import sys
import json
import os
import shlex
import tempfile
import yaml
import jsonschema
from jsonschema import validate
DOCUMENTATION = '''
---
version_added: "0.1"
module: topo_check
short_description: Topology validator module in ansible
description:
- This module allows a user to validate the yaml/json
provisioning topologies against json schema.
options:
data_file:
description:
path to topology file can be in json/yaml format
required: true
data_format:
description:
format of the topology file
default: yaml
schema_file:
description:
Schema to be validated against
required: true
author: Samvaran Kashyap Rallabandi -
'''
class JSONSchema:
def __init__(self, data_file_path, schema_file_path):
self.data_file = data_file_path
self.schema_file = schema_file_path
def validate(self):
data = self.get_data(self.data_file)
schema = open(self.schema_file).read()
if type(data) is dict:
return data
try:
result = jsonschema.validate(json.loads(data), json.loads(schema))
return {"status": True, "data": json.loads(data)}
except jsonschema.ValidationError as e:
return {"error": e.message, "status": False}
except jsonschema.SchemaError as e:
return {"error": e, "status": False}
except Exception as e:
return {"error": e, "status": False}
def get_data(self, file_path):
ext = file_path.split(".")[-1]
if (ext == "yml" or ext == "yaml"):
fd = open(file_path)
return json.dumps(yaml.safe_load(fd))
if (ext == "json"):
return open(self.topo_file).read()
else:
return {"error": "Invalid File Format"}
def check_file_paths(module, *args):
for file_path in args:
if not os.path.exists(file_path):
msg = "File not found %s not found" % (file_path)
module.fail_json(msg=msg)
if not os.access(file_path, os.R_OK):
msg = "File not accesible %s not found" % (file_path)
module.fail_json(msg=msg)
if os.path.isdir(file_path):
msg = "Recursive directory not supported %s " % (file_path)
module.fail_json(msg=msg)
def validate_grp_names(data):
res_grps = data['resource_groups']
if 'resource_group_vars' in data.keys():
res_grp_vars = data['resource_group_vars']
else:
res_grp_vars = []
res_grp_names = [x['resource_group_name'] for x in res_grps]
if len(res_grp_vars) > 0:
res_grp_vars = [x['resource_group_name'] for x in res_grp_vars]
dup_grp_names = set(res_grp_names)
dup_grp_vars = set(res_grp_vars)
if len(dup_grp_vars) != len(res_grp_vars) or \
len(dup_grp_names) != len(res_grp_names):
msg = "error: duplicate names found in resource_group_name \
attributes please check the results for duplicate names"
return {"msg": msg, "result": str(dup_grp_names)+str(dup_grp_vars)}
else:
return True
def validate_values(module, data_file_path):
data = open(data_file_path).read()
data = yaml.safe_load(data)
status = validate_grp_names(data)
if not status:
module.fail_json(msg="%s" % (json.dumps(status)))
else:
return status
def main():
module = AnsibleModule(
argument_spec={
'data': {'required': True, 'aliases': ['topology_file']},
'schema': {'required': True, 'aliases': ['topology_file']},
'data_format': {'required': False,'choices':['json','yaml','yml']},
},
required_one_of=[],
supports_check_mode=True
)
data_file_path = os.path.expanduser(module.params['data'])
schema_file_path = os.path.expanduser(module.params['schema'])
check_file_paths(module, data_file_path, schema_file_path)
validate_values(module, data_file_path)
schema_obj = JSONSchema(data_file_path, schema_file_path)
output = schema_obj.validate()
resp = {"path": data_file_path, "content": output}
if output["status"]:
changed = True
module.exit_json(isvalid=changed, output=output)
else:
module.fail_json(msg=resp)
main()
|
"""
A simple set of math methods that we can build our
logging infrastructure on top of.
"""
import lib.logger
def add_some_numbers(a, b):
""" Adds the passed parameters and returns the result.
"""
logger_name = 'add_some_numbers'
logger = logging.getLogger(__name__).getChild(logger_name)
result = a + b
logger.info("Result of add_some_numbers: {}".format(result))
return result
if __name__ == "__main__":
""" Rudimentary tests, nothing should be returned.
In the event of a test failure, we get an AssertionError.
"""
try:
result = add_some_numbers(4,5)
expected_result = 9
assert result == expected_result
add_logger.info("PASS: Add succeeded, {} == {}".format(result, expected_result))
except AssertionError:
add_logger.info("FAIL: Add failed, {} != {}".format(result, expected_result))
try:
result = add_some_numbers(5,5)
expected_result = 9
assert not (result == expected_result)
add_logger.info("PASS: Add failed, {} != {}".format(result, expected_result))
except AssertionError:
add_logger.info("FAIL: Add succeeded, {} == {}".format(result, expected_result))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Given a tryjob and perf profile, generates an AFDO profile."""
from __future__ import print_function
import argparse
import distutils.spawn
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
_CREATE_LLVM_PROF = 'create_llvm_prof'
_GS_PREFIX = 'gs://'
def _fetch_gs_artifact(remote_name, local_name):
assert remote_name.startswith(_GS_PREFIX)
subprocess.check_call(['gsutil', 'cp', remote_name, local_name])
def _fetch_and_maybe_unpack(remote_name, local_name):
unpackers = [
('.tar.bz2', ['tar', 'xaf']),
('.bz2', ['bunzip2']),
('.tar.xz', ['tar', 'xaf']),
('.xz', ['xz', '-d']),
]
unpack_ext = None
unpack_cmd = None
for ext, unpack in unpackers:
if remote_name.endswith(ext):
unpack_ext, unpack_cmd = ext, unpack
break
download_to = local_name + unpack_ext if unpack_ext else local_name
_fetch_gs_artifact(remote_name, download_to)
if unpack_cmd is not None:
print('Unpacking', download_to)
subprocess.check_output(unpack_cmd + [download_to])
assert os.path.exists(local_name)
def _generate_afdo(perf_profile_loc, tryjob_loc, output_name):
if perf_profile_loc.startswith(_GS_PREFIX):
local_loc = 'perf.data'
_fetch_and_maybe_unpack(perf_profile_loc, local_loc)
perf_profile_loc = local_loc
chrome_in_debug_loc = 'debug/opt/google/chrome/chrome.debug'
debug_out = 'debug.tgz'
_fetch_gs_artifact(os.path.join(tryjob_loc, 'debug.tgz'), debug_out)
print('Extracting chrome.debug.')
# This has tons of artifacts, and we only want Chrome; don't waste time
# extracting the rest in _fetch_and_maybe_unpack.
subprocess.check_call(['tar', 'xaf', 'debug.tgz', chrome_in_debug_loc])
# Note that the AFDO tool *requires* a binary named `chrome` to be present if
# we're generating a profile for chrome. It's OK for this to be split debug
# information.
os.rename(chrome_in_debug_loc, 'chrome')
print('Generating AFDO profile.')
subprocess.check_call([
_CREATE_LLVM_PROF, '--out=' + output_name, '--binary=chrome',
'--profile=' + perf_profile_loc
])
def _abspath_or_gs_link(path):
if path.startswith(_GS_PREFIX):
return path
return os.path.abspath(path)
def _tryjob_arg(tryjob_arg):
# Forward gs args through
if tryjob_arg.startswith(_GS_PREFIX):
return tryjob_arg
# Clicking on the 'Artifacts' link gives us a pantheon link that's basically
# a preamble and gs path.
pantheon = 'https://pantheon.corp.google.com/storage/browser/'
if tryjob_arg.startswith(pantheon):
return _GS_PREFIX + tryjob_arg[len(pantheon):]
# Otherwise, only do things with a tryjob ID (e.g. R75-11965.0.0-b3648595)
if not tryjob_arg.startswith('R'):
raise ValueError('Unparseable tryjob arg; give a tryjob ID, pantheon '
'link, or gs:// link. Please see source for more.')
chell_path = 'chromeos-image-archive/chell-chrome-pfq-tryjob/'
# ...And assume it's from chell, since that's the only thing we generate
# profiles with today.
return _GS_PREFIX + chell_path + tryjob_arg
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--perf_profile',
required=True,
help='Path to our perf profile. Accepts either a gs:// path or local '
'filepath.')
parser.add_argument(
'--tryjob',
required=True,
type=_tryjob_arg,
help="Path to our tryjob's artifacts. Accepts a gs:// path, pantheon "
'link, or tryjob ID, e.g. R75-11965.0.0-b3648595. In the last case, '
'the assumption is that you ran a chell-chrome-pfq-tryjob.')
parser.add_argument(
'-o',
'--output',
default='afdo.prof',
help='Where to put the AFDO profile. Default is afdo.prof.')
parser.add_argument(
'-k',
'--keep_artifacts_on_failure',
action='store_true',
help="Don't remove the tempdir on failure")
args = parser.parse_args()
if not distutils.spawn.find_executable(_CREATE_LLVM_PROF):
sys.exit(_CREATE_LLVM_PROF + ' not found; are you in the chroot?')
profile = _abspath_or_gs_link(args.perf_profile)
afdo_output = os.path.abspath(args.output)
initial_dir = os.getcwd()
temp_dir = tempfile.mkdtemp(prefix='generate_afdo')
success = True
try:
os.chdir(temp_dir)
_generate_afdo(profile, args.tryjob, afdo_output)
# The AFDO tooling is happy to generate essentially empty profiles for us.
# Chrome's profiles are often 8+ MB; if we only see a small fraction of
# that, something's off. 512KB was arbitrarily selected.
if os.path.getsize(afdo_output) < 512 * 1024:
raise ValueError('The AFDO profile is suspiciously small for Chrome. '
'Something might have gone wrong.')
except:
success = False
raise
finally:
os.chdir(initial_dir)
if success or not args.keep_artifacts_on_failure:
shutil.rmtree(temp_dir, ignore_errors=True)
else:
print('Artifacts are available at', temp_dir)
if __name__ == '__main__':
sys.exit(main())
|
from forum.models import User, SubscriptionSettings, QuestionSubscription
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
def handle_noargs(self, **options):
users = User.objects.all()
for u in users:
s = SubscriptionSettings(user=u)
s.save()
user_questions = u.questions.all()
for q in user_questions:
sub = QuestionSubscription(user=u, question=q)
sub.save() |
import tensorflow as tf
def get_max_quant_value(num_bits: int) -> float:
return 2 ** (num_bits - 1) - 1
def quantize(input_tensor: tf.Tensor, scale: float, num_bits: int):
"""
https://arxiv.org/pdf/1910.06188.pdf
"""
threshold = tf.cast(get_max_quant_value(num_bits), tf.float32)
return tf.clip_by_value(tf.math.round(input_tensor * scale), -threshold, threshold)
def dequantize(input_tensor: tf.Tensor, scale: float):
return input_tensor / scale
@tf.function
def quantize_and_dequantize(x: tf.Tensor, scale: float, num_bits=8):
quantized = quantize(x, scale=scale, num_bits=num_bits)
return dequantize(quantized, scale=scale)
@tf.function
def get_weight_scale_factor(weight: tf.Tensor, num_bits: int) -> float:
"""
https://arxiv.org/pdf/1910.06188.pdf
"""
threshold = tf.math.reduce_max(tf.math.abs(weight))
return tf.cast(get_max_quant_value(num_bits), tf.float32) / threshold
@tf.function
def fake_quantize(x: tf.Tensor, num_bits=8):
scale = get_weight_scale_factor(x, num_bits)
return quantize_and_dequantize(x, scale, num_bits)
@tf.custom_gradient # noqa
def fake_quantize(x, num_bits=8):
def straight_through_estimator(dy):
return dy
scale = get_weight_scale_factor(x, num_bits)
return quantize_and_dequantize(x, scale, num_bits), straight_through_estimator
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 10:33:04 2020
@author: dianasimpson
"""
# Imported packages and files
import tkinter as tk
from tkinter import ttk, Menu
# Global variables for formatting
FT_DOG = 'dark olive green'
FT_GRY = '#E7E7E7'
LB_N16 = ('Times', 16)
LB_B16 = ('Times Bold', 16)
LB_N18 = ('TImes', 18)
LB_N20 = ('TImes', 20)
LB_B20 = ('Times Bold', 20)
# Create GUI class
class VRES_GUI():
# Initialising class
def __init__(self):
# Create instance
self.win = tk.Tk()
# Add a title
self.win.title("Navigability Analysis of Australian University"
" Websites")
# Running method for menu
self.generateMenu()
# Initialising Tab1
self.createTab1()
# Window placement
self.winPlacement()
# Method to generate the menu items for the app
def generateMenu(self):
# Creating a Menu Bar
self.menuBar = Menu(self.win)
self.win.config(menu=self.menuBar)
# Add menu items
fileMenu = Menu(self.menuBar, tearoff=0)
fileMenu.add_command(label="New")
fileMenu.add_separator()
fileMenu.add_command(label="Exit", command=self.win.destroy)
self.menuBar.add_cascade(label="File", menu=fileMenu)
# Add another Menu to the Menu Bar and an item
helpMenu = Menu(self.menuBar, tearoff=0)
helpMenu.add_command(label="About")
self.menuBar.add_cascade(label="Help", menu=helpMenu)
# Generating method for window placement
def winPlacement(self):
# Updating the base window to get information
self.win.update()
# Generate the desired sized window
screenWidth = self.win.winfo_screenwidth()
screenHeight = self.win.winfo_screenheight()
print("Screen width: ", screenWidth, "\t Screen height: ",
screenHeight)
winW = self.win.winfo_width()
winH = self.win.winfo_height()
print("win width: ", winW, "\t win height: ", winH)
# win.geometry(("%dx%d") % (winW, winH))
# Position window in the centre of the screen
posRight = int(screenWidth/2 - winW/2)
posDown = int(screenHeight/2 - winH/2)
self.win.geometry("+{}+{}".format(posRight, posDown))
# ComboBox CallBack alternation requires label update
def focusCall(self, event):
print('focusCall initiatied')
selectedDegree = self.degree.get()
print(selectedDegree)
self.focusChoice.configure(foreground=FT_DOG, text=selectedDegree)
# Generates items for first tab
def createTab1(self):
# Create Tab Control
self.tabControl = ttk.Notebook(self.win)
# Adding tabs to the window for organisation & asthetic appeal
self.tab1 = ttk.Frame(self.tabControl)
self.tabControl.add(self.tab1, text='Parameters')
self.tab2 = ttk.Frame(self.tabControl)
self.tabControl.add(self.tab2, text='Analysis')
self.tab3 = ttk.Frame(self.tabControl)
self.tabControl.add(self.tab3, text='Results')
# self.tabControl.pack(expand=1, fill='both')
self.tabControl.grid(row=0, column=0, sticky='NSEW')
self.tabControl.grid_columnconfigure(0, weight=1)
self.tabControl.grid_rowconfigure(0, weight=1)
# Creating frame for right side of tab1
self.focusBox = ttk.Labelframe(self.tab1, text='Parameter Summary')
self.focusBox.grid(column=1, row=0, padx=10, pady=10, sticky='NS')
self.focusBox.grid_columnconfigure(1, weight=1)
self.focusBox.grid_rowconfigure(0, weight=1)
# Creating a label to show which focus group has been selected
self.focusChoice = ttk.Label(self.focusBox, text='Make a Choice',
font=LB_B20)
self.focusChoice.grid(columnspan=2, row=1, padx=5, pady=5)
self.focusChoice.grid_rowconfigure(1, weight=1)
# Creating drop-down menu to specify focus group
focusLabel = ttk.Label(self.focusBox, text='Focus Group:', font=LB_N18)
focusLabel.grid(column=0, row=0, padx=5, pady=5)
focusLabel.grid_columnconfigure(0, weight=1)
focusLabel.grid_rowconfigure(0, weight=1)
self.degree = tk.StringVar()
self.groupChosen = ttk.Combobox(self.focusBox, width=30,
textvariable=self.degree,
state='readonly', validate='all',
justify='center')
self.groupChosen['values'] = ('Undergraduate, International',
'Undergarduate, Domestic',
'Masters, International',
'Masters, Domestic',
'Doctorate, International',
'Doctorate, Domestic')
self.groupChosen.grid(column=1, row=0, padx=5, pady=5)
self.groupChosen.grid_columnconfigure(1, weight=1)
self.groupChosen.grid_rowconfigure(0, weight=1)
self.groupChosen.current(0)
print(self.degree.get())
self.groupChosen.bind("<<ComboboxSelectd>>", self.focusCall)
mainGui = VRES_GUI()
mainGui.win.mainloop()
|
#Driver code
class Node:
def __init__(self, val):
self.val = val
self.right = None
self.left = None
def create():
n = int(input())
if n == -1:
return None
root = Node(n)
root.left = create()
root.right = create()
return root
def print_tree(root):
if root != None:
print(root.val, end=" ")
print_tree(root.left)
print_tree(root.right)
#Main solution code
def count_unival(root):
if root == None:
return True, 0
if root.left == None and root.right == None:
return True, 1
left_val, l_val = count_unival(root.left)
right_val, r_val = count_unival(root.right)
if right_val and left_val:
if (root.left == None and root.val == root.right.val) or (root.right == None and root.val == root.left.val) or (root.val == root.left.val and root.val == root.right.val):
return True, 1 + l_val + r_val
return False, l_val + r_val
#Driver code
if __name__ == '__main__':
tree = create()
print("\n")
print_tree(tree)
print("\n")
context, count = count_unival(tree)
print(count)
|
from __future__ import division
from past.utils import old_div
import unittest
from nineml.abstraction import (
Dynamics, Regime, Alias, Parameter, AnalogReceivePort, AnalogReducePort,
OnCondition, AnalogSendPort, Constant, StateAssignment)
from nineml.abstraction.dynamics.visitors.modifiers import (
DynamicsSubstituteAliases)
from nineml import units as un
# Testing Skeleton for class: DynamicsClonerPrefixNamespace
class DynamicsSubstituteAliasesTest(unittest.TestCase):
def setUp(self):
self.a = Dynamics(
name='A',
aliases=['A1:=P1 / P2', 'A2 := ARP2 + P3', 'A3 := A4 * P4 * P5',
'A4:=P6 ** 2 + ADP1', 'A5:=SV1 * SV2 * P8',
'A6:=SV1 * P1 / P8', 'A7:=A1 / P8'],
regimes=[
Regime('dSV1/dt = -A1 / A2',
'dSV2/dt = -ADP1 / P7',
'dSV3/dt = -A1 * A3 / (A2 * C1)',
transitions=[OnCondition('SV1 > 10',
target_regime_name='R2')],
aliases=[Alias('A1', 'P1 / P2 * 2'),
Alias('A5', 'SV1 * SV2 * P8 * 2')],
name='R1'),
Regime('dSV1/dt = -A1 / A2',
'dSV3/dt = -A1 / A2 * A4',
transitions=[OnCondition(
'C2 > A6',
state_assignments=[
StateAssignment('SV1', 'SV1 - A7')],
target_regime_name='R1')],
name='R2')],
analog_ports=[AnalogReceivePort('ARP1', dimension=un.resistance),
AnalogReceivePort('ARP2', dimension=un.charge),
AnalogReducePort('ADP1',
dimension=un.dimensionless),
AnalogSendPort('A5', dimension=un.current)],
parameters=[Parameter('P1', dimension=un.voltage),
Parameter('P2', dimension=un.resistance),
Parameter('P3', dimension=un.charge),
Parameter('P4', dimension=old_div(un.length, un.current ** 2)),
Parameter('P5', dimension=old_div(un.current ** 2, un.length)),
Parameter('P6', dimension=un.dimensionless),
Parameter('P7', dimension=un.time),
Parameter('P8', dimension=un.current)],
constants=[Constant('C1', value=10.0, units=un.unitless),
Constant('C2', value=1.0, units=un.ohm)])
self.ref_substituted_a = Dynamics(
name='substituted_A',
aliases=['A5:=SV1 * SV2 * P8'],
regimes=[
Regime('dSV1/dt = -2 * (P1 / P2) / (ARP2 + P3)',
'dSV2/dt = -ADP1 / P7',
('dSV3/dt = -2 * (P1 / P2) * ((P6 ** 2 + ADP1) * P4 * '
'P5) / ((ARP2 + P3) * C1)'),
transitions=[OnCondition('SV1 > 10',
target_regime_name='R2')],
aliases=[Alias('A5', 'SV1 * SV2 * P8 * 2')],
name='R1'),
Regime('dSV1/dt = -(P1 / P2) / (ARP2 + P3)',
'dSV3/dt = -(P1 / P2) / (ARP2 + P3) * (P6 ** 2 + ADP1)',
transitions=[OnCondition(
'C2 > (SV1 * P1 / P8)',
state_assignments=[
StateAssignment('SV1', 'SV1 - (P1 / P2) / P8')],
target_regime_name='R1')],
name='R2')],
analog_ports=[AnalogReceivePort('ARP1', dimension=un.resistance),
AnalogReceivePort('ARP2', dimension=un.charge),
AnalogReducePort('ADP1',
dimension=un.dimensionless),
AnalogSendPort('A5', dimension=un.current)],
parameters=[Parameter('P1', dimension=un.voltage),
Parameter('P2', dimension=un.resistance),
Parameter('P3', dimension=un.charge),
Parameter('P4', dimension=old_div(un.length, un.current ** 2)),
Parameter('P5', dimension=old_div(un.current ** 2, un.length)),
Parameter('P6', dimension=un.dimensionless),
Parameter('P7', dimension=un.time),
Parameter('P8', dimension=un.current)],
constants=[Constant('C1', value=10.0, units=un.unitless),
Constant('C2', value=1.0, units=un.ohm)]
)
def test_substitute_aliases(self):
substituted_a = self.a.clone()
substituted_a.name = 'substituted_A'
DynamicsSubstituteAliases(substituted_a)
self.assertEqual(substituted_a, self.ref_substituted_a,
substituted_a.find_mismatch(self.ref_substituted_a))
|
from modpy.proxy._polynomial import PolynomialModel
from modpy.proxy._kriging import SimpleKrigingModel, OrdinaryKrigingModel, UniversalKrigingModel
|
"""
FiftyOne v0.7.1 admin revision.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
def up(db):
db.admin.command({"setFeatureCompatibilityVersion": "4.4"})
def down(db):
db.admin.command({"setFeatureCompatibilityVersion": "4.2"})
|
import oscar.apps.customer.apps as apps
class CustomerConfig(apps.CustomerConfig):
name = 'apps.customer'
|
"""
Selection Sort
https://en.wikipedia.org/wiki/Selection_sort
Worst-case performance: O(N^2)
If you call selection_sort(arr,True), you can see the process of the sort
Default is simulation = False
"""
def selection_sort(arr, simulation=False):
iteration = 0
if simulation:
print("iteration",iteration,":",*arr)
for i in range(len(arr)):
minimum = i
for j in range(i + 1, len(arr)):
# "Select" the correct value
if arr[j] < arr[minimum]:
minimum = j
arr[minimum], arr[i] = arr[i], arr[minimum]
if simulation:
iteration = iteration + 1
print("iteration",iteration,":",*arr)
return arr
def main():
array = [1,5,8,5,150,44,4,3,6] #static inputs
result = selection_sort(array)
print(result)
if __name__=="__main__":
main()
|
import itertools
from networking_p4.extensions import p4
def list_quota_opts():
return [
('quotas',
itertools.chain(
p4.p4_quota_opts)
),
]
|
default_app_config = "grandchallenge.workstations.apps.WorkstationsConfig"
|
# coding: utf-8
import enum
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import Enum
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Numeric
from sqlalchemy import String
from sqlalchemy.orm import relationship
import typing
from rolling.server.document.character import CharacterDocument
from rolling.server.extension import ServerSideDocument as Document
from rolling.util import quantity_to_str
if typing.TYPE_CHECKING:
from rolling.kernel import Kernel
class OfferItemPosition(enum.Enum):
REQUEST = "REQUEST"
OFFER = "OFFER"
class OfferOperand(enum.Enum):
AND = "AND"
OR = "OR"
class OfferStatus(enum.Enum):
DRAFT = "DRAFT"
OPEN = "OPEN"
CLOSED = "CLOSED"
ACCEPTED = "ACCEPTED"
REFUSED = "REFUSED"
class OfferItemDocument(Document):
__tablename__ = "offer_item"
id = Column(Integer, autoincrement=True, primary_key=True)
offer_id = Column(Integer, ForeignKey("offer.id"))
position = Column(
Enum(*[p.value for p in OfferItemPosition], name="offer_item__position"),
nullable=False,
)
resource_id = Column(String(255), nullable=True)
stuff_id = Column(String(255), nullable=True)
quantity = Column(Numeric(12, 6, asdecimal=False), nullable=False, default=0.0)
offer: "OfferDocument" = relationship("OfferDocument", back_populates="items")
def get_name(self, kernel: "Kernel", quantity: bool = False) -> str:
quantity_str = ""
if self.resource_id:
resource_properties = kernel.game.config.resources[self.resource_id]
if quantity:
quantity_str = quantity_to_str(
self.quantity, resource_properties.unit, kernel
)
quantity_str = f" ({quantity_str})"
return f"{resource_properties.name}{quantity_str}"
stuff_properties = kernel.game.stuff_manager.get_stuff_properties_by_id(
self.stuff_id
)
if quantity:
quantity_str = f" ({round(self.quantity)})"
return f"{stuff_properties.name}{quantity_str}"
class OfferDocument(Document):
__tablename__ = "offer"
id = Column(Integer, autoincrement=True, primary_key=True)
character_id = Column(String(255), ForeignKey("character.id"))
title = Column(String(255), nullable=False)
read = Column(Boolean, default=False)
request_operand = Column(
Enum(*[o.value for o in OfferOperand], name="request_operand"),
nullable=False,
default=OfferOperand.OR.value,
)
offer_operand = Column(
Enum(*[o.value for o in OfferOperand], name="offer_operand"),
nullable=False,
default=OfferOperand.OR.value,
)
permanent = Column(Boolean, nullable=False, default=False)
with_character_id = Column(String(255), ForeignKey("character.id"))
status = Column(
Enum(*[s.value for s in OfferStatus], name="status"),
nullable=False,
default=OfferStatus.DRAFT.value,
)
from_character = relationship(
"CharacterDocument",
foreign_keys=[character_id],
primaryjoin=CharacterDocument.id == character_id,
)
to_character = relationship(
"CharacterDocument",
foreign_keys=[with_character_id],
primaryjoin=CharacterDocument.id == with_character_id,
)
items: typing.List["OfferItemDocument"] = relationship(
"OfferItemDocument",
back_populates="offer",
primaryjoin=OfferItemDocument.offer_id == id,
)
@property
def request_items(self) -> typing.List[OfferItemDocument]:
return [i for i in self.items if i.position == OfferItemPosition.REQUEST.value]
@property
def offer_items(self) -> typing.List[OfferItemDocument]:
return [i for i in self.items if i.position == OfferItemPosition.OFFER.value]
|
default_app_config = 'gallery.apps.GalleryAppConfig' |
import datetime
import json
import numpy as np
import pandas as pd
import requests
import psycopg2.extras
def query_yahoo_finance(stock_code, start, execution_date):
# convert execution_date to timestamp
execution_date = execution_date.format("%Y-%m-%d")
element = datetime.datetime.strptime(execution_date,"%Y-%m-%d")
end = int(datetime.datetime.timestamp(element))
site = "https://query1.finance.yahoo.com/v8/finance/chart/{stock_code:s}?period1={start:d}&period2={end:d}&interval=1d&events=history".format(
stock_code=stock_code, start= start, end=end)
response = requests.get(site)
data = json.loads(response.text)
df = pd.DataFrame(data['chart']['result'][0]['indicators']['quote'][0],
index=pd.to_datetime(np.array(data['chart']['result'][0]['timestamp'])*1000*1000*1000))
df['date'] = df.index.strftime("%Y-%m-%d")
df['stock_code'] = stock_code
return df.dropna()
def check_stock_day_exist(stock_code, execution_date, config):
# create database connection
conn = config.db_conn()
cursor = conn.cursor()
query_sql = """
SELECT EXISTS(
SELECT 1
FROM history as h
WHERE h.stock_code = %(stock_code)s and h.date = %(date)s
);
"""
execution_date = execution_date.format("%Y-%m-%d")
cursor.execute(query_sql, {'stock_code': stock_code, 'date': execution_date})
result = cursor.fetchone()
print('[check_stock_day_exist.month_record]', execution_date, result)
# close database connection
cursor.close()
conn.close()
return result[0]
def insert_new_data(stock_code, df, config):
# create database connection
conn = config.db_conn()
cursor = conn.cursor()
table = 'history'
tuples = [tuple(x) for x in df.to_numpy()]
cols = ','.join(list(df.columns))
insert_sql = """
INSERT INTO {table:s}({cols:s})
VALUES %s
ON CONFLICT (stock_code, date)
DO NOTHING;
""".format(table=table, cols=cols)
try:
psycopg2.extras.execute_values(cursor, insert_sql, tuples)
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print("Error: %s" % error)
conn.rollback()
cursor.close()
raise
# close database connection
cursor.close()
conn.close()
# def unpause_airflow_dag(dag_ids):
# print(dag_ids)
# import time
# time.sleep(5)
# # unpause airflow dag
# airflow_webserver_url = "http://airflow_webserver:8080"
# for dag_id in dag_ids:
# unpause_url = "{airflow_webserver_url}/api/experimental/dags/{dag_id}/paused/false".format(airflow_webserver_url=airflow_webserver_url, dag_id=dag_id)
# a = requests.get(unpause_url)
# print(a)
# print("[Unpaused All DAGs]")
|
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.datasets import load_digits
from sklearn.preprocessing import scale
digits = load_digits()
X = digits.data # --> 8x8 픽셀
y = digits.target
#plt.matshow(digits.images[0])
#plt.show()
## scale ##
X_scale = scale(X, axis=0) # scaling
pca = PCA(n_components=2)
reduced_x = pca.fit_transform(X_scale) |
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
from random import randint
class Solution:
def findKthLargest(self, nums, k):
if len(nums) < k:
return []
index = randint(0, len(nums) - 1)
pivot = nums[index]
less = [i for i in nums[:index] + nums[index + 1:] if i < pivot]
great = [i for i in nums[:index] + nums[index + 1:] if i > pivot]
if len(great) == k - 1:
return pivot
elif len(great) > k - 1:
return self.findKthLargest(great, k)
else:
return self.findKthLargest(less, k - len(great) - 1)
|
#coding:utf-8
#
# id: bugs.core_5808
# title: Support backup of encrypted databases
# decription:
# THIS TEST USES IBSurgeon Demo Encryption package
# ################################################
# ( https://ib-aid.com/download-demo-firebird-encryption-plugin/ ; https://ib-aid.com/download/crypt/CryptTest.zip )
# License file plugins\\dbcrypt.conf with unlimited expiration was provided by IBSurgeon to Firebird Foundation (FF).
# This file was preliminary stored in FF Test machine.
# Test assumes that this file and all neccessary libraries already were stored into FB_HOME and %FB_HOME%\\plugins.
#
# After test database will be created, we try to encrypt it using 'alter database encrypt with <plugin_name> ...' command
# (where <plugin_name> = dbcrypt - name of .dll in FB_HOME\\plugins\\ folder that implements encryption).
# Then we allow engine to complete this job - take delay about 1..2 seconds BEFORE detach from database.
# After this we make backup of encrypted database + restore.
#
# Then we make snapshot of firebird.log, run 'gfix -v -full' of restored database and once again take snapshot of firebird.log.
# Comparison of these two logs is result of validation. It should contain line about start and line with finish info.
# The latter must look like this: "Validation finished: 0 errors, 0 warnings, 0 fixed"
#
# Checked on:
# 40sS, build 4.0.0.1487: OK, 6.552s.
# 40sC, build 4.0.0.1421: OK, 11.812s.
# 40Cs, build 4.0.0.1485: OK, 8.097s.
#
# 15.04.2021. Adapted for run both on Windows and Linux. Checked on:
# Windows: 4.0.0.2416
# Linux: 4.0.0.2416
#
# tracker_id: CORE-5808
# min_versions: ['4.0']
# versions: 4.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 4.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# import os
# import time
# import difflib
# import subprocess
# import re
# import fdb
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
# db_conn.close()
#
# #--------------------------------------------
#
# def svc_get_fb_log( f_fb_log ):
#
# global subprocess
#
# subprocess.call( [ context['fbsvcmgr_path'],
# "localhost:service_mgr",
# "action_get_fb_log"
# ],
# stdout=f_fb_log, stderr=subprocess.STDOUT
# )
# return
#
# #--------------------------------------------
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
# #--------------------------------------------
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if type(f_names_list[i]) == file:
# del_name = f_names_list[i].name
# elif type(f_names_list[i]) == str:
# del_name = f_names_list[i]
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# print('type(f_names_list[i])=',type(f_names_list[i]))
# del_name = None
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
# #--------------------------------------------
#
# tmpfdb='$(DATABASE_LOCATION)'+'tmp_core_5808.fdb'
# tmpfbk='$(DATABASE_LOCATION)'+'tmp_core_5808.fbk'
#
# f_list=( tmpfdb, tmpfbk )
# cleanup( f_list )
#
#
# # 14.04.2021.
# # Name of encryption plugin depends on OS:
# # * for Windows we (currently) use plugin by IBSurgeon, its name is 'dbcrypt';
# # * for Linux we use:
# # ** 'DbCrypt_example' for FB 3.x
# # ** 'fbSampleDbCrypt' for FB 4.x+
# #
# PLUGIN_NAME = 'dbcrypt' if os.name == 'nt' else '"fbSampleDbCrypt"'
#
# con = fdb.create_database( dsn = 'localhost:'+tmpfdb )
# cur = con.cursor()
#
# ##############################################
# # WARNING! Do NOT use 'connection_obj.execute_immediate()' for ALTER DATABASE ENCRYPT... command!
# # There is bug in FB driver which leads this command to fail with 'token unknown' message
# # The reason is that execute_immediate() silently set client dialect = 0 and any encryption statement
# # can not be used for such value of client dialect.
# # One need to to use only cursor_obj.execute() for encryption!
# # See letter from Pavel Cisar, 20.01.20 10:36
# ##############################################
# cur.execute('alter database encrypt with %(PLUGIN_NAME)s key Red' % locals())
#
# con.commit()
#
# time.sleep(2)
# # ^
# # +-------- !! ALLOW BACKGROUND ENCRYPTION PROCESS TO COMPLETE ITS JOB !!
#
# con.close()
#
# f_backup_log = open( os.path.join(context['temp_directory'],'tmp_backup_5808.log'), 'w')
# f_backup_err = open( os.path.join(context['temp_directory'],'tmp_backup_5808.err'), 'w')
#
# subprocess.call( [ context['gbak_path'], "-v", "-b", 'localhost:' + tmpfdb, tmpfbk],
# stdout = f_backup_log,
# stderr = f_backup_err
# )
# flush_and_close( f_backup_log )
# flush_and_close( f_backup_err )
#
#
# f_restore_log = open( os.path.join(context['temp_directory'],'tmp_restore_5808.log'), 'w')
# f_restore_err = open( os.path.join(context['temp_directory'],'tmp_restore_5808.err'), 'w')
#
# subprocess.call( [ context['gbak_path'], "-v", "-rep", tmpfbk, 'localhost:'+tmpfdb],
# stdout = f_restore_log,
# stderr = f_restore_err
# )
# flush_and_close( f_restore_log )
# flush_and_close( f_restore_err )
#
# f_fblog_before=open( os.path.join(context['temp_directory'],'tmp_5808_fblog_before.txt'), 'w')
# svc_get_fb_log( f_fblog_before )
# flush_and_close( f_fblog_before )
#
#
# f_validate_log = open( os.path.join(context['temp_directory'],'tmp_validate_5808.log'), 'w')
# f_validate_err = open( os.path.join(context['temp_directory'],'tmp_validate_5808.err'), 'w')
#
# subprocess.call( [ context['gfix_path'], "-v", "-full", tmpfdb ],
# stdout = f_validate_log,
# stderr = f_validate_err
# )
# flush_and_close( f_validate_log )
# flush_and_close( f_validate_err )
#
# f_fblog_after=open( os.path.join(context['temp_directory'],'tmp_5808_fblog_after.txt'), 'w')
# svc_get_fb_log( f_fblog_after )
# flush_and_close( f_fblog_after )
#
#
# # Compare firebird.log versions BEFORE and AFTER this test:
# ######################
#
# oldfb=open(f_fblog_before.name, 'r')
# newfb=open(f_fblog_after.name, 'r')
#
# difftext = ''.join(difflib.unified_diff(
# oldfb.readlines(),
# newfb.readlines()
# ))
# oldfb.close()
# newfb.close()
#
#
# with open( f_backup_err.name,'r') as f:
# for line in f:
# print("UNEXPECTED PROBLEM ON BACKUP, STDERR: "+line)
#
# with open( f_restore_err.name,'r') as f:
# for line in f:
# print("UNEXPECTED PROBLEM ON RESTORE, STDERR: "+line)
#
#
# f_diff_txt=open( os.path.join(context['temp_directory'],'tmp_5808_diff.txt'), 'w')
# f_diff_txt.write(difftext)
# flush_and_close( f_diff_txt )
#
# allowed_patterns = (
# re.compile( '\\+\\s+Validation\\s+started', re.IGNORECASE)
# ,re.compile( '\\+\\s+Validation\\s+finished:\\s+0\\s+errors,\\s+0\\s+warnings,\\s+0\\s+fixed', re.IGNORECASE)
# )
#
#
# with open( f_diff_txt.name,'r') as f:
# for line in f:
# match2some = filter( None, [ p.search(line) for p in allowed_patterns ] )
# if match2some:
# print( (' '.join( line.split()).upper() ) )
#
# # CLEANUP:
# ##########
# # do NOT remove this pause otherwise some of logs will not be enable for deletion and test will finish with
# # Exception raised while executing Python test script. exception: WindowsError: 32
# time.sleep(1)
# cleanup( (f_backup_log, f_backup_err, f_restore_log, f_restore_err, f_validate_log, f_validate_err, f_fblog_before, f_fblog_after, f_diff_txt, tmpfdb, tmpfbk) )
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
+ VALIDATION STARTED
+ VALIDATION FINISHED: 0 ERRORS, 0 WARNINGS, 0 FIXED
"""
@pytest.mark.version('>=4.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
|
MAIN_PAGE_ADMIN_TEMPLATE = """\
<head>
<link type="text/css" rel="stylesheet" href="/stylesheets/main.css" />
</head>
<div align="center">
<h1>General</h1>
<div align="center">
<table>
<tr>
<td>
<form action="/mainsearch" method="post">
<div><input type="submit" value="Search"></div>
</form>
</td>
<td>
<form action="/mainclear" method="post">
<div><input type="submit" value="Clear"></div>
</form>
</td>
</tr>
</table>
</div>
<h1>Books</h1>
<div align="center">
<table>
<tr>
<td>
<form action="/listbooks" method="get">
<div><input type="submit" value="List"></div>
</form>
</td>
<td>
<form action="/loadbookpage" method="get">
<div><input type="submit" value="Load"></div>
</form>
</td>
<td>
<form action="/clearbooks" method="post">
<div><input type="submit" value="Clear"></div>
</form>
</td>
<td>
<form action="/statbooks" method="get">
<div><input type="submit" value="Stats"></div>
</form>
</td>
</tr>
</table>
</div>
<h1>Chars</h1>
<div align="center">
<table>
<tr>
<td>
<form action="/listchichars" method="get">
<div><input type="submit" value="List"></div>
</form>
</td>
<td>
<form action="/addchichar" method="get">
<div><input type="submit" value="Add"></div>
</form>
</td>
<td>
<form action="/statchichars" method="get">
<div><input type="submit" value="Stats"></div>
</form>
</td>
<td>
<form action="/exportchichars" method="get">
<div><input type="submit" value="Export"></div>
</form>
</td>
</tr>
</table>
</div>
<h1>Words</h1>
<div align="center">
<table>
<tr>
<td>
<form action="/listwords" method="get">
<div><input type="submit" value="List"></div>
</form>
</td>
<td>
<form action="/addword" method="get">
<div><input type="submit" value="Add"></div>
</form>
</td>
<td>
<form action="/statwords" method="get">
<div><input type="submit" value="Stats"></div>
</form>
</td>
</tr>
</table>
</div>
<h1>Sentences</h1>
<div align="center">
<table>
<tr>
<td>
<form action="/listsentences" method="get">
<div><input type="submit" value="List"></div>
</form>
</td>
<td>
<form action="/addsentence" method="get">
<div><input type="submit" value="Add"></div>
</form>
</td>
<td>
<form action="/statsentences" method="get">
<div><input type="submit" value="Stats"></div>
</form>
</td>
</tr>
</table>
</div>
<h1>Training</h1>
<div align="center">
<table>
<tr>
<td>
<form action="/char2pinyintest" method="get">
<div><input type="submit" value="Char2Pinyin"></div>
</form>
</td>
<td>
<form action="/def2chartest" method="get">
<div><input type="submit" value="Def2Char"></div>
</form>
</td>
</tr>
</table>
</div>
<h1>My Stats</h1>
<div align="center">
<table>
<tr>
<td>
<form action="/listviewstats" method="get">
<div><input type="submit" value="View"></div>
</form>
</td>
<td>
<form action="/charteststats" method="get">
<div><input type="submit" value="CharTestStats"></div>
</form>
</td>
</tr>
</table>
</div>
</div>
<hr>
<a href="%s">%s</a>
"""
MAIN_PAGE_USER_TEMPLATE = """\
<head>
<link type="text/css" rel="stylesheet" href="/stylesheets/main.css" />
</head>
<div align="center">
<h1>Chars</h1>
<form action="/listchichars" method="get">
<div><input type="submit" value="List"></div>
</form>
<h1>Sentences</h1>
<form action="/listsentences" method="get">
<div><input type="submit" value="List"></div>
</form>
<h1>Training</h1>
<div>TODO</div>
<h1>My Stats</h1>
<div align="center">
<table>
<tr>
<td>
<form action="/listviewstats" method="get">
<div><input type="submit" value="View"></div>
</form>
</td>
<td>
<form action="/clearviewstats" method="post">
<div><input type="submit" value="Clear"></div>
</form>
</td>
</tr>
</table>
</div>
</div>
<hr>
<a href="%s">%s</a>
"""
LOAD_GENERAL = """\
<head>
<link type="text/css" rel="stylesheet" href="/stylesheets/main.css" />
</head>
<form action="/domainload" method="post">
<div><textarea name="dataentry" rows="10" cols="40"></textarea></div>
<div><input type="submit" value="Load"></div>
</form>
"""
SEARCH_GENERAL = """\
<head>
<link type="text/css" rel="stylesheet" href="/stylesheets/main.css" />
</head>
<h1>Search</h1>
<hr>
<form action="/mainsearch" method="post">
<div><textarea name="searchquery" rows="1" cols="10">%s</textarea></div>
<div><input type="submit" value="Search"></div>
</form>
<hr>
<h1>Chars</h1>
%s
<h1>Words</h1>
%s
<h1>Sentences</h1>
%s
<hr>
<form action="/" method="get">
<div><input type="submit" value="Home"></div>
</form>
"""
|
import logging
import os
import sys
from argparse import ArgumentParser
from signal import SIGUSR1, SIGUSR2, signal
from subprocess import PIPE, run
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
from loren_frank_data_processing import save_xarray
from loren_frank_data_processing.position import (EDGE_ORDER, EDGE_SPACING,
make_track_graph)
from replay_trajectory_classification import (ClusterlessClassifier,
SortedSpikesClassifier)
from scipy.ndimage import label
from src.analysis import (get_place_field_max, get_replay_info,
reshape_to_segments)
from src.load_data import load_data
from src.parameters import (ANIMALS, FIGURE_DIR, PROBABILITY_THRESHOLD,
PROCESSED_DATA_DIR, SAMPLING_FREQUENCY,
TRANSITION_TO_CATEGORY,
continuous_transition_types, discrete_diag,
knot_spacing, model, model_kwargs, movement_var,
place_bin_size, replay_speed, spike_model_penalty)
from src.visualization import plot_ripple_decode_1D
from tqdm.auto import tqdm
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(level='INFO', format=FORMAT, datefmt='%d-%b-%y %H:%M:%S')
plt.switch_backend('agg')
def sorted_spikes_analysis_1D(epoch_key,
plot_ripple_figures=False,
exclude_interneuron_spikes=False,
use_multiunit_HSE=False,
brain_areas=None,
overwrite=False):
animal, day, epoch = epoch_key
data_type, dim = 'sorted_spikes', '1D'
logging.info('Loading data...')
data = load_data(epoch_key,
exclude_interneuron_spikes=exclude_interneuron_spikes)
is_training = data['position_info'].speed > 4
position = data['position_info'].loc[:, 'linear_position']
track_graph, center_well_id = make_track_graph(epoch_key, ANIMALS)
# Set up naming
group = f'/{data_type}/{dim}/'
epoch_identifier = f'{animal}_{day:02d}_{epoch:02d}_{data_type}_{dim}'
if exclude_interneuron_spikes:
logging.info('Excluding interneuron spikes...')
epoch_identifier += '_no_interneuron'
group += 'no_interneuron/'
if brain_areas is not None:
area_str = '-'.join(brain_areas)
epoch_identifier += f'_{area_str}'
group += f'{area_str}/'
if use_multiunit_HSE:
epoch_identifier += '_multiunit_HSE'
group += 'classifier/multiunit_HSE/'
replay_times = data['multiunit_HSE_times']
else:
group += 'classifier/ripples/'
replay_times = data['ripple_times']
model_name = os.path.join(
PROCESSED_DATA_DIR, epoch_identifier + '_model.pkl')
try:
if overwrite:
raise FileNotFoundError
results = xr.open_dataset(
os.path.join(
PROCESSED_DATA_DIR, f'{animal}_{day:02}_{epoch:02}.nc'),
group=group)
logging.info('Found existing results. Loading...')
ripple_spikes = reshape_to_segments(
data['spikes'], replay_times.loc[:, ['start_time', 'end_time']])
classifier = SortedSpikesClassifier.load_model(model_name)
logging.info(classifier)
except (FileNotFoundError, OSError):
logging.info('Fitting classifier...')
classifier = SortedSpikesClassifier(
place_bin_size=place_bin_size, movement_var=movement_var,
replay_speed=replay_speed,
discrete_transition_diag=discrete_diag,
spike_model_penalty=spike_model_penalty, knot_spacing=knot_spacing,
continuous_transition_types=continuous_transition_types).fit(
position, data['spikes'], is_training=is_training,
track_graph=track_graph, center_well_id=center_well_id,
edge_order=EDGE_ORDER, edge_spacing=EDGE_SPACING)
classifier.save_model(model_name)
logging.info(classifier)
# Plot Place Fields
g = (classifier.place_fields_ * data['sampling_frequency']).plot(
x='position', col='neuron', col_wrap=4)
arm_grouper = (data['position_info']
.groupby('arm_name')
.linear_position)
max_df = arm_grouper.max()
min_df = arm_grouper.min()
plt.xlim((0, data['position_info'].linear_position.max()))
max_rate = (classifier.place_fields_.values.max() *
data['sampling_frequency'])
for ax in g.axes.flat:
for arm_name, min_position in min_df.iteritems():
ax.axvline(min_position, color='lightgrey', zorder=0,
linestyle='--')
ax.text(min_position + 0.2, max_rate, arm_name,
color='lightgrey', horizontalalignment='left',
verticalalignment='top', fontsize=8)
for arm_name, max_position in max_df.iteritems():
ax.axvline(max_position, color='lightgrey', zorder=0,
linestyle='--')
plt.suptitle(epoch_key, y=1.04, fontsize=16)
fig_name = (
f'{animal}_{day:02d}_{epoch:02d}_{data_type}_place_fields_1D.png')
fig_name = os.path.join(FIGURE_DIR, 'neuron_place_fields', fig_name)
plt.savefig(fig_name, bbox_inches='tight')
plt.close(g.fig)
# Decode
is_test = ~is_training
test_groups = pd.DataFrame(
{'test_groups': label(is_test.values)[0]}, index=is_test.index)
immobility_results = []
for _, df in tqdm(test_groups.loc[is_test].groupby('test_groups'),
desc='immobility'):
start_time, end_time = df.iloc[0].name, df.iloc[-1].name
test_spikes = data['spikes'].loc[start_time:end_time]
immobility_results.append(
classifier.predict(test_spikes, time=test_spikes.index))
immobility_results = xr.concat(immobility_results, dim='time')
results = [(immobility_results
.sel(time=slice(df.start_time, df.end_time))
.assign_coords(time=lambda ds: ds.time - ds.time[0]))
for _, df in replay_times.iterrows()]
results = (xr.concat(results, dim=replay_times.index)
.assign_coords(state=lambda ds: ds.state.to_index()
.map(TRANSITION_TO_CATEGORY)))
logging.info('Saving results...')
ripple_spikes = reshape_to_segments(
data['spikes'], replay_times.loc[:, ['start_time', 'end_time']])
save_xarray(PROCESSED_DATA_DIR, epoch_key,
results.drop(['likelihood', 'causal_posterior']),
group=f'/{data_type}/{dim}/classifier/ripples/')
logging.info('Saving replay_info...')
replay_info = get_replay_info(
results, data['spikes'], replay_times, data['position_info'],
track_graph, SAMPLING_FREQUENCY, PROBABILITY_THRESHOLD, epoch_key,
classifier, data["ripple_consensus_trace_zscore"])
prob = int(PROBABILITY_THRESHOLD * 100)
epoch_identifier = f'{animal}_{day:02d}_{epoch:02d}_{data_type}_{dim}'
replay_info_filename = os.path.join(
PROCESSED_DATA_DIR, f'{epoch_identifier}_replay_info_{prob:02d}.csv')
replay_info.to_csv(replay_info_filename)
if plot_ripple_figures:
logging.info('Plotting ripple figures...')
place_field_max = get_place_field_max(classifier)
linear_position_order = place_field_max.argsort(axis=0).squeeze()
ripple_position = reshape_to_segments(
position, replay_times.loc[:, ['start_time', 'end_time']])
for ripple_number in tqdm(replay_times.index, desc='ripple figures'):
try:
posterior = (
results
.acausal_posterior
.sel(ripple_number=ripple_number)
.dropna('time', how='all')
.assign_coords(
time=lambda ds: 1000 * ds.time /
np.timedelta64(1, 's')))
plot_ripple_decode_1D(
posterior, ripple_position.loc[ripple_number],
ripple_spikes.loc[ripple_number], linear_position_order,
data['position_info'], classifier)
plt.suptitle(
f'ripple number = {animal}_{day:02d}_{epoch:02d}_'
f'{ripple_number:04d}')
fig_name = (
f'{animal}_{day:02d}_{epoch:02d}_{ripple_number:04d}_'
f'{data_type}_{dim}_acasual_classification.png')
fig_name = os.path.join(
FIGURE_DIR, 'ripple_classifications', fig_name)
plt.savefig(fig_name, bbox_inches='tight')
plt.close(plt.gcf())
except (ValueError, IndexError):
logging.warn(f'No figure for ripple number {ripple_number}...')
continue
logging.info('Done...')
def clusterless_analysis_1D(epoch_key,
plot_ripple_figures=False,
exclude_interneuron_spikes=False,
use_multiunit_HSE=False,
brain_areas=None,
overwrite=False):
animal, day, epoch = epoch_key
data_type, dim = 'clusterless', '1D'
logging.info('Loading data...')
data = load_data(epoch_key,
brain_areas=brain_areas,
exclude_interneuron_spikes=exclude_interneuron_spikes)
is_training = data['position_info'].speed > 4
position = data['position_info'].loc[:, 'linear_position']
track_graph, center_well_id = make_track_graph(epoch_key, ANIMALS)
# Set up naming
group = f'/{data_type}/{dim}/'
epoch_identifier = f'{animal}_{day:02d}_{epoch:02d}_{data_type}_{dim}'
if exclude_interneuron_spikes:
logging.info('Excluding interneuron spikes...')
epoch_identifier += '_no_interneuron'
group += 'no_interneuron/'
if brain_areas is not None:
area_str = '-'.join(brain_areas)
epoch_identifier += f'_{area_str}'
group += f'{area_str}/'
if use_multiunit_HSE:
epoch_identifier += '_multiunit_HSE'
group += 'classifier/multiunit_HSE/'
replay_times = data['multiunit_HSE_times']
else:
group += 'classifier/ripples/'
replay_times = data['ripple_times']
model_name = os.path.join(
PROCESSED_DATA_DIR, epoch_identifier + '_model.pkl')
try:
if overwrite:
raise FileNotFoundError
results = xr.open_dataset(
os.path.join(
PROCESSED_DATA_DIR, f'{animal}_{day:02}_{epoch:02}.nc'),
group=group)
logging.info('Found existing results. Loading...')
spikes = (((data['multiunit'].sum('features') > 0) * 1.0)
.to_dataframe(name='spikes').unstack())
spikes.columns = data['tetrode_info'].tetrode_id
ripple_spikes = reshape_to_segments(
spikes, replay_times.loc[:, ['start_time', 'end_time']])
classifier = ClusterlessClassifier.load_model(model_name)
logging.info(classifier)
except (FileNotFoundError, OSError):
logging.info('Fitting classifier...')
classifier = ClusterlessClassifier(
place_bin_size=place_bin_size, movement_var=movement_var,
replay_speed=replay_speed,
discrete_transition_diag=discrete_diag,
continuous_transition_types=continuous_transition_types,
model=model, model_kwargs=model_kwargs).fit(
position, data['multiunit'], is_training=is_training,
track_graph=track_graph, center_well_id=center_well_id,
edge_order=EDGE_ORDER, edge_spacing=EDGE_SPACING)
classifier.save_model(model_name)
logging.info(classifier)
# Decode
is_test = ~is_training
test_groups = pd.DataFrame(
{'test_groups': label(is_test.values)[0]}, index=is_test.index)
immobility_results = []
for _, df in tqdm(test_groups.loc[is_test].groupby('test_groups'),
desc='immobility'):
start_time, end_time = df.iloc[0].name, df.iloc[-1].name
test_multiunit = data['multiunit'].sel(
time=slice(start_time, end_time))
immobility_results.append(
classifier.predict(test_multiunit, time=test_multiunit.time))
immobility_results = xr.concat(immobility_results, dim='time')
results = [(immobility_results
.sel(time=slice(df.start_time, df.end_time))
.assign_coords(time=lambda ds: ds.time - ds.time[0]))
for _, df in replay_times.iterrows()]
results = (xr.concat(results, dim=replay_times.index)
.assign_coords(state=lambda ds: ds.state.to_index()
.map(TRANSITION_TO_CATEGORY)))
spikes = ((((~np.isnan(data['multiunit'])).sum('features') > 0) * 1.0)
.to_dataframe(name='spikes').unstack())
spikes.columns = data['tetrode_info'].tetrode_id
ripple_spikes = reshape_to_segments(
spikes, replay_times.loc[:, ['start_time', 'end_time']])
logging.info('Saving results...')
save_xarray(PROCESSED_DATA_DIR, epoch_key,
results.drop(['likelihood', 'causal_posterior']),
group=group)
logging.info('Saving replay_info...')
replay_info = get_replay_info(
results, spikes, replay_times, data['position_info'],
track_graph, SAMPLING_FREQUENCY, PROBABILITY_THRESHOLD, epoch_key,
classifier, data["ripple_consensus_trace_zscore"])
prob = int(PROBABILITY_THRESHOLD * 100)
replay_info_filename = os.path.join(
PROCESSED_DATA_DIR, f'{epoch_identifier}_replay_info_{prob:02d}.csv')
replay_info.to_csv(replay_info_filename)
if plot_ripple_figures:
logging.info('Plotting ripple figures...')
place_field_max = get_place_field_max(classifier)
linear_position_order = place_field_max.argsort(axis=0).squeeze()
ripple_position = reshape_to_segments(
position, replay_times.loc[:, ['start_time', 'end_time']])
for ripple_number in tqdm(replay_times.index, desc='ripple figures'):
try:
posterior = (
results
.acausal_posterior
.sel(ripple_number=ripple_number)
.dropna('time', how='all')
.assign_coords(
time=lambda ds: 1000 * ds.time /
np.timedelta64(1, 's')))
plot_ripple_decode_1D(
posterior, ripple_position.loc[ripple_number],
ripple_spikes.loc[ripple_number], linear_position_order,
data['position_info'], classifier, spike_label='Tetrodes')
plt.suptitle(
f'ripple number = {animal}_{day:02d}_{epoch:02d}_'
f'{ripple_number:04d}')
fig_name = (
f'{animal}_{day:02d}_{epoch:02d}_{ripple_number:04d}_'
f'{data_type}_{dim}_acasual_classification.png')
fig_name = os.path.join(
FIGURE_DIR, 'ripple_classifications', fig_name)
plt.savefig(fig_name, bbox_inches='tight')
plt.close(plt.gcf())
except (ValueError, IndexError):
logging.warn(f'No figure for ripple number {ripple_number}...')
continue
logging.info('Done...\n')
run_analysis = {
('sorted_spikes', '1D'): sorted_spikes_analysis_1D,
('clusterless', '1D'): clusterless_analysis_1D,
}
def get_command_line_arguments():
parser = ArgumentParser()
parser.add_argument('Animal', type=str, help='Short name of animal')
parser.add_argument('Day', type=int, help='Day of recording session')
parser.add_argument('Epoch', type=int,
help='Epoch number of recording session')
parser.add_argument('--data_type', type=str, default='sorted_spikes')
parser.add_argument('--dim', type=str, default='1D')
parser.add_argument('--n_workers', type=int, default=16)
parser.add_argument('--threads_per_worker', type=int, default=1)
parser.add_argument('--plot_ripple_figures', action='store_true')
parser.add_argument('--exclude_interneuron_spikes', action='store_true')
parser.add_argument('--use_multiunit_HSE', action='store_true')
parser.add_argument('--CA1', action='store_true')
parser.add_argument('--overwrite', action='store_true')
parser.add_argument(
'-d', '--debug',
help='More verbose output for debugging',
action='store_const',
dest='log_level',
const=logging.DEBUG,
default=logging.INFO,
)
return parser.parse_args()
def main():
args = get_command_line_arguments()
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT, level=args.log_level)
def _signal_handler(signal_code, frame):
logging.error('***Process killed with signal {signal}***'.format(
signal=signal_code))
exit()
for code in [SIGUSR1, SIGUSR2]:
signal(code, _signal_handler)
epoch_key = (args.Animal, args.Day, args.Epoch)
logging.info(
'Processing epoch: Animal {0}, Day {1}, Epoch #{2}...'.format(
*epoch_key))
logging.info(f'Data type: {args.data_type}, Dim: {args.dim}')
git_hash = run(['git', 'rev-parse', 'HEAD'],
stdout=PIPE, universal_newlines=True).stdout
logging.info('Git Hash: {git_hash}'.format(git_hash=git_hash.rstrip()))
if args.CA1:
brain_areas = ['CA1']
else:
brain_areas = None
# Analysis Code
run_analysis[(args.data_type, args.dim)](
epoch_key,
plot_ripple_figures=args.plot_ripple_figures,
exclude_interneuron_spikes=args.exclude_interneuron_spikes,
use_multiunit_HSE=args.use_multiunit_HSE,
brain_areas=brain_areas,
overwrite=args.overwrite)
if __name__ == '__main__':
sys.exit(main())
|
import calendar
def iterweekdays(calendar): return calendar.iterweekdays()
for calendar in [calendar.Calendar(), calendar.Calendar(firstweekday=0), calendar.Calendar(firstweekday=6)]:
print('-----',calendar,'-----')
print(iterweekdays(calendar))
for weekday in iterweekdays(calendar): print(weekday, end=',')
print()
#print(calendar.Calendar())
#print(calendar.Calendar(firstweekday=0))#月曜日
#print(calendar.Calendar(firstweekday=6))#日曜日
|
# Django settings for dd_auth project.
import os
from django.utils.translation import ugettext_lazy as _
APP_PATH = os.path.dirname(os.path.abspath(__file__))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '%s/dd_auth.db' % APP_PATH, # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Vienna'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('de', _('German')),
('en', _('English')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '%s/../static' % APP_PATH
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/dd_auth_static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '6g^74v__c=-!$e6qiqw1c53is2*y*4ayl#is)btc7w!i45tRXs'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dd_auth.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'dd_auth.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'/%s/templates' % APP_PATH,
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.core.context_processors.i18n'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'jsonrpc',
'south',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
#'allauth.socialaccount.providers.github',
#'allauth.socialaccount.providers.linkedin',
#'allauth.socialaccount.providers.openid',
#'allauth.socialaccount.providers.persona',
#'allauth.socialaccount.providers.soundcloud',
#'allauth.socialaccount.providers.twitter',
'django.contrib.admin',
'dd_auth.dd_user_sync',
'dd_auth.dd_djangomessage_null',
'dd_invitation',
'django_extensions',
)
# django.contrib.messages storage
MESSAGE_STORAGE = 'dd_auth.dd_djangomessage_null.storage.NullStorage'
# Password hashers
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
EMAIL_HOST='some.mail.host'
EMAIL_PORT=25
EMAIL_HOST_USER='username'
EMAIL_HOST_PASSWORD='secret'
EMAIL_USE_TLS=True
DEFAULT_FROM_EMAIL=SERVER_EMAIL='robot@datadealer.com'
LOGIN_REDIRECT_URL='/#load'
#LOGIN_REDIRECT_URLNAME='https://datadealer.com/#load'
ACCOUNT_LOGOUT_ON_GET=True
ACCOUNT_LOGOUT_REDIRECT_URL='/'
### REDIS SESSIONS
SESSION_ENGINE = 'redis_sessions.session'
SESSION_REDIS_HOST = 'localhost'
SESSION_REDIS_PORT = 6379
SESSION_REDIS_DB = 0
#SESSION_REDIS_PASSWORD = 'password'
SESSION_REDIS_PREFIX = 'dd_session'
INVITATION_REQUIRED = True
INVITATION_FAILED = '%s%s' % (ACCOUNT_LOGOUT_REDIRECT_URL, '#access_denied')
# Needed for allauth email authentication
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
### ALLAUTH SETTINGS
ACCOUNT_AUTHENTICATION_METHOD = "username_email"
#ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL='/some/url' # after email confirmation, anonymous
#ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL='/some/url' # after email confirmation, logged in
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 5
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'optional'
ACCOUNT_EMAIL_SUBJECT_PREFIX = "[Data Dealer] "
#ACCOUNT_SIGNUP_FORM_CLASS
ACCOUNT_SIGNUP_PASSWORD_VERIFICATION = True # password twice
ACCOUNT_UNIQUE_EMAIL = True # ensure email uniqueness
#ACCOUNT_USER_DISPLAY (=a callable returning user.username)
ACCOUNT_USERNAME_MIN_LENGTH = 4
ACCOUNT_USERNAME_REQUIRED = True
ACCOUNT_PASSWORD_INPUT_RENDER_VALUE = False
ACCOUNT_PASSWORD_MIN_LENGTH = 8
ACCOUNT_ADAPTER = "dd_auth.adapters.DDAccountAdapter"
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_AUTO_SIGNUP = False
SOCIALACCOUNT_AVATAR_SUPPORT = False
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
'SCOPE': ['email', 'publish_stream'],
'METHOD': 'oauth2' ,
'LOCALE_FUNC': lambda request:'en_US'
},
'google': {
'SCOPE': [
'https://www.googleapis.com/auth/userinfo.profile',
'https://www.googleapis.com/auth/userinfo.email'
],
},
'linkedin': {
'SCOPE': ['r_emailaddress'],
},
'persona': {
'REQUEST_PARAMETERS': {'siteName': 'Data Dealer' },
},
}
RPC4DJANGO_RESTRICT_XMLRPC=True
DD_MONGO_DB = {
'host': 'localhost',
'port': 27017,
'max_pool_size': 32,
'db': 'somedb',
'users_collection': 'users'
}
# Serve static files through django?
DD_SERVE_STATIC = False
ALLOWED_HOSTS = ['.datadealer.net', 'datadealer']
if DEBUG == True:
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = '/tmp/dd_auth/mail'
try:
from dd_auth.settings_local import *
except ImportError:
pass
|
#!/usr/bin/env python
# Lint as: python3
"""This directory contains local site-specific implementations."""
# Local site-specific implementations that have to be imported to be registered
# should be imported in plugins.py.
|
from .ray_util.gif_logger import GIFLogger
def get_trainable(class_name):
if class_name == 'VPredTrainable':
from .trainable_interface import VPredTrainable
return VPredTrainable
if class_name == 'BalancedCamFilter':
from .data_filter import BalancedCamFilter
return BalancedCamFilter
if class_name == 'RobotSetFilter':
from .data_filter import RobotSetFilter
return RobotSetFilter
if class_name == 'RobotObjectFilter':
from .data_filter import RobotObjectFilter
return RobotObjectFilter
if class_name == 'BatchmixFinetuning':
from .finetuning_trainable_interface import BatchmixingVPredTrainable
return BatchmixingVPredTrainable
raise NotImplementedError
|
import abc
from datetime import date, datetime
import json
from wagtail.core.models import Page
from wagtail.documents.models import Document
from wagtail.images.models import Image
from django.forms import ValidationError
from django.forms.models import model_to_dict
from django.contrib import messages
from core import helpers
from core.cache import SERVICE_NAMES_TO_ROOT_PATHS
from export_readiness.snippets import Tag
class AbstractFieldSerializer(abc.ABC):
@property
@abc.abstractmethod
def FIELD_NAME_PREFIX(self):
pass
@classmethod
def serialize_name(cls, name):
return cls.FIELD_NAME_PREFIX + name
@classmethod
def deserialize_name(cls, name):
return name.replace(cls.FIELD_NAME_PREFIX, '')
@classmethod
def serialize(cls, name, value):
return cls.serialize_name(name), cls.serialize_value(value)
@classmethod
def deserialize(cls, name, value):
return cls.deserialize_name(name), cls.deserialize_value(value)
class ImageFieldSerializer(AbstractFieldSerializer):
FIELD_NAME_PREFIX = '(image)'
@classmethod
def serialize_value(cls, value):
return value.file.name
@classmethod
def deserialize_value(cls, value):
return helpers.get_or_create_image(value).pk
class DocumentFieldSerializer(AbstractFieldSerializer):
FIELD_NAME_PREFIX = '(document)'
@classmethod
def serialize_value(cls, value):
return value.file.name
@classmethod
def deserialize_value(cls, value):
return helpers.get_or_create_document(value).pk
class ListFieldSerializer(AbstractFieldSerializer):
FIELD_NAME_PREFIX = '(list)'
@classmethod
def serialize_value(cls, value):
return ','.join(value)
@classmethod
def deserialize_value(cls, value):
return value.split(',')
class DateFieldSerializer(AbstractFieldSerializer):
FIELD_NAME_PREFIX = '(date)'
DATE_FORMAT = '%Y-%m-%d'
@classmethod
def serialize_value(cls, value):
return value.strftime(cls.DATE_FORMAT)
@classmethod
def deserialize_value(cls, value):
return datetime.strptime(value, cls.DATE_FORMAT)
class RelatedPageSerializer(AbstractFieldSerializer):
FIELD_NAME_PREFIX = '(page)'
@classmethod
def serialize_value(cls, value):
return json.dumps({
'slug': value.slug,
'service_name_value': value.specific.service_name_value
})
@classmethod
def deserialize_value(cls, value):
value = json.loads(value)
app_slug = SERVICE_NAMES_TO_ROOT_PATHS[value['service_name_value']]
app_pages = Page.objects.get(slug=app_slug).get_descendants()
try:
return app_pages.get(slug=value['slug']).specific
except Page.DoesNotExist:
raise ValidationError(
f"Related page with the slug {value['slug']} could not be "
"found in this environment. Please create it then "
"add it as one of this page's related pages."
)
class TagsSerializer(ListFieldSerializer):
FIELD_NAME_PREFIX = '(tag)'
@classmethod
def serialize_value(cls, value):
tag_names = [tag.name for tag in value.all()]
return super().serialize_value(tag_names)
@classmethod
def deserialize_value(cls, value):
tag_names = super().deserialize_value(value)
return Tag.objects.filter(name__in=tag_names)
class NoOpFieldSerializer(AbstractFieldSerializer):
FIELD_NAME_PREFIX = ''
@classmethod
def serialize_value(cls, value):
return value
@classmethod
def deserialize_value(cls, value):
return value
class UpstreamModelSerializer:
EMPTY_VALUES = ['', 'None', None]
field_serializers = {
Image: ImageFieldSerializer,
Document: DocumentFieldSerializer,
list: ListFieldSerializer,
date: DateFieldSerializer,
Page: RelatedPageSerializer,
Tag: TagsSerializer,
}
default_field_serializer = NoOpFieldSerializer
@classmethod
def get_field_serializer_by_field_value(cls, value):
if 'RelatedManager' in value.__class__.__name__:
value = value.all().first()
for field_class, serializer in cls.field_serializers.items():
if isinstance(value, field_class):
return serializer
else:
return cls.default_field_serializer
@classmethod
def get_field_serializer_by_field_name(cls, name):
for serializer in cls.field_serializers.values():
if serializer.FIELD_NAME_PREFIX in name:
return serializer
else:
return cls.default_field_serializer
@classmethod
def remove_empty(cls, data):
return {
name: value for name, value in data.items()
if value not in cls.EMPTY_VALUES
}
@classmethod
def serialize(cls, model_instance):
data = model_to_dict(model_instance, exclude=['pk', 'id', 'page_ptr'])
serialized = {}
for name in cls.remove_empty(data):
value = getattr(model_instance, name)
serializer = cls.get_field_serializer_by_field_value(value)
name, value = serializer.serialize(name=name, value=value)
serialized[name] = value
return serialized
@classmethod
def deserialize(cls, serialized_data, request):
deserialized = {}
for name, value in cls.remove_empty(serialized_data).items():
serializer = cls.get_field_serializer_by_field_name(name)
try:
name, value = serializer.deserialize(name=name, value=value)
except ValidationError as e:
messages.error(request, e.message)
else:
deserialized[name] = value
return deserialized
|
import logging
from types import FrameType
from typing import cast
from loguru import logger
class InterceptHandler(logging.Handler):
loglevel_mapping = {
50: 'CRITICAL',
40: 'ERROR',
30: 'WARNING',
20: 'INFO',
10: 'DEBUG',
0: 'NOTSET',
}
def emit(self, record: logging.LogRecord) -> None:
try:
level = logger.level(record.levelname).name
except AttributeError:
level = self.loglevel_mapping[record.levelno]
# except ValueError:
# level = str(record.levelno)
frame, depth = logging.currentframe(), 2
while frame.f_code.co_filename == logging.__file__:
frame = cast(FrameType, frame.f_back)
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level,
record.getMessage(),
)
|
import yaml
import argparse
import pandas as pd
from sklearn.model_selection import train_test_split
from get_data import load_config, load_data
def split(config):
split_config = config['split_data']
feature_config = config['features']
data = pd.read_csv(feature_config['path'])
x_train, x_test = train_test_split(data,
test_size=0.2, random_state=42)
x_train.to_csv(split_config['xtrain'], index = False)
x_test.to_csv(split_config['xtest'], index = False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', help = 'config file path', default= 'params.yaml')
args = parser.parse_args()
config = load_config(args.config)
split(config) |
# -*- coding: utf8 -*-
import sys
from frame import log
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
SEARCH_URL = "https://search.jd.com/Search?"
def good_detail_parser(keyword):
params = {
"keyword": keyword,
"enc": "utf-8"
}
search_page = requests.get(SEARCH_URL, params=params).content
bs = BeautifulSoup(search_page, "lxml")
# get all good detail page href
selector = "div.p-name > a"
div = bs.select(selector)
urls = [item.attrs["href"] for item in div]
urls = [urljoin(SEARCH_URL, url) for url in urls]
return urls[0]
def jd_specification_spider(keyword):
log.info("jd specification spider: %s" % keyword)
detail_url = good_detail_parser(keyword)
detail_page = requests.get(detail_url).content
bs = BeautifulSoup(detail_page, "lxml")
selector = "div.Ptable-item"
div = bs.select(selector)
data = dict()
for table in div:
title = table.find("h3").string
dt = table.find_all("dt", {"class": None})
dd = table.find_all("dd", {"class": None})
res = dict()
for i in range(len(dt)):
desc = {dt[i].string: dd[i].string}
res.update(desc)
data.update({title: res})
return data
if __name__ == '__main__':
if len(sys.argv) < 2:
pass
jd_specification_spider(sys.argv[1])
|
.table
int soma1_param
int soma1_a3
int soma2_param3
int soma_param1
int soma_param2
int soma_result
int life_a
int life_b
int main_var = 2
int main_var2
.code
soma1:
soma1:
soma1_param:
pop soma1_param
mov $2, 1
1:
mov $3, 2
2:
mov $4, 3
3:
mul $5, $3, $4
$3:
add $6, $2, $5
$2:
mov $7, 4
4:
add $8, $6, $7
$6:
mov soma1_a3, $8
$8:
return soma1_a3
soma1_a3:
soma2:
soma2:
soma2_param3:
pop soma2_param3
mov $13, 1
1:
mov $14, 2
2:
mov $15, soma2_param3
soma2_param3:
mul $16, $14, $15
$14:
add $17, $13, $16
$13:
mov $18, 4
4:
mov $19, 2
2:
mul $20, $18, $19
$18:
add $21, $17, $20
$17:
mov $22, 4
4:
mov $23, 4
4:
div $24, $22, $23
$22:
add $25, $21, $24
$21:
mov soma2_param3, $25
$25:
return soma2_param3
soma2_param3:
soma:
soma:
soma_param1:
pop soma_param1
soma_param2:
pop soma_param2
mov $31, soma_param1
soma_param1:
mov $32, soma_param2
soma_param2:
add $33, $31, $32
$31:
mov soma_result, $33
$33:
return soma_result
soma_result:
life:
life:
life_a:
pop life_a
life_b:
pop life_b
return 42
42:
main:
main:
mov $41, 2
2:
mov main_var, $41
$41:
mov $43, 3
3:
mov $44, 2
2:
add $45, $43, $44
$43:
mov main_var, $45
$45:
param 1
1:
param 2
2:
call life, 2
life:
$49:
pop $49
mov main_var, $49
$49:
param main_var
main_var:
call soma2, 1
soma2:
$53:
pop $53
mov main_var2, $53
$53:
return main_var2
main_var2:
|
#!/usr/bin/env python3
"""
Netflix Genre Scraper.
This tool scrapes the Netflix website to gather a list of available genres
and links to their respective pages. Please use sparingly to avoid annoying
the Netflix webservers.
"""
import argparse
import logging
import os.path
import shelve
import sys
from datetime import datetime, timezone
from getpass import getpass
from html.parser import HTMLParser
from urllib.parse import urljoin, urlparse
from requests import Session
LOG_FMT = {
'style': '{',
'format': '{levelname:1.1}: {message}',
}
log = logging.getLogger(__name__)
class FormParser(HTMLParser):
"""Basic serialization of HTML forms."""
def reset(self):
self.form_data = {}
self._current_form = None
super().reset()
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == 'form':
self._current_form = attrs.get('id', len(self.form_data))
self.form_data[self._current_form] = {'attrs': attrs, 'fields': {}}
log.debug('Form %s open', self._current_form)
if self._current_form is not None and 'name' in attrs:
log.debug('Form %s: %r', tag, attrs)
self.form_data[self._current_form]['fields'][attrs['name']] = attrs.get('value')
def handle_endtag(self, tag):
if tag == 'form':
log.debug('Form %s close', self._current_form)
self._current_form = None
class ProfileListParser(HTMLParser):
"""Parse "Who's Watching" profile list."""
def reset(self):
self.profiles = []
self._current_link = None
self._current_name = ''
super().reset()
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == 'a' and 'profile-link' in attrs.get('class', '').split():
self._current_link = attrs['href']
log.debug('Profile link %s open', self._current_link)
def handle_endtag(self, tag):
if tag == 'a' and self._current_link:
log.debug('Profile link %s close', self._current_link)
self.profiles.append((self._current_name.strip(), self._current_link))
self._current_link = None
self._current_name = ''
def handle_data(self, data):
if self._current_link:
self._current_name += data
class CaptureParser(HTMLParser):
"""
Capture data inside a set of tags, chosen according to given criteria.
Subclass and define self.criteria
"""
@staticmethod
def criteria(tag, attrs):
raise NotImplementedError()
def reset(self):
self.strings = []
self._current = ''
self._inside = 0
super().reset()
def handle_starttag(self, tag, attrs):
if self._inside > 0:
self._inside += 1
else:
attrs = dict(attrs)
if self.criteria(tag, attrs):
self._inside += 1
log.debug('%s %s open', self.__class__.__name__[:-6], tag)
def handle_endtag(self, tag):
if self._inside > 0:
self._inside -= 1
if self._inside == 0:
log.debug('%s %s close', self.__class__.__name__[:-6], tag)
self.strings.append(self._current)
self._current = ''
def handle_data(self, data):
if self._inside > 0:
log.debug('Capture %r', data)
self._current += data
class ErrorMessageParser(CaptureParser):
"""Find error messages on page."""
@staticmethod
def criteria(tag, attrs):
return 'ui-message-error' in attrs.get('class', '').split()
class TitleParser(CaptureParser):
"""Find genre title on genre page."""
@staticmethod
def criteria(tag, attrs):
return 'genreTitle' in attrs.get('class', '').split()
class Scraper(object):
"""
The scraping engine.
Initialize with credentials, then run Scraper.genre_scan to scrape genre
pages.
"""
base_url = 'https://www.netflix.com/'
login_path = '/login'
genre_cache_fn = os.path.join(os.path.dirname(__file__), '.genrecache')
def __init__(self, auth, profile=None):
self.auth = auth
self.profile = profile
self.session = Session()
def is_login(self, url):
parsed_url = urlparse(url)
return parsed_url.path.lower().endswith(self.login_path.lower())
def login_if_required(self, response):
if not self.is_login(response.url):
return response
form_parser = FormParser()
form_parser.feed(response.text)
form_parser.close()
forms = form_parser.form_data
for form in forms.values():
if form['fields'].get('action') == 'loginAction':
log.debug('Login form: %r', form)
url = urljoin(response.url, form['attrs']['action'])
data = dict(form['fields'])
data.update({'email': self.auth[0], 'password': self.auth[1]})
response = self.session.request(form['attrs']['method'], url, data=data)
response.raise_for_status()
if self.is_login(response.url):
error_parser = ErrorMessageParser()
error_parser.feed(response.text)
error_parser.close()
raise RuntimeError(error_parser.strings[1]) # 0 is javascript warning
else:
return response
def choose_profile_if_required(self, response):
profile_list_parser = ProfileListParser()
profile_list_parser.feed(response.text)
profile_list_parser.close()
profiles = profile_list_parser.profiles
names = []
for name, path in profiles:
names.append(name)
if self.profile is None or name.lower() == self.profile.lower():
url = urljoin(response.url, path)
log.debug('Choose profile %s (%s)', name, url)
response = self.session.get(url)
response.raise_for_status()
break
else:
if names:
raise ValueError('Profile {} not found in {}'.format(self.profile, names))
return response
def get(self, path):
"""Get an arbitrary page, logging in as necessary, return response."""
url = urljoin(self.base_url, path)
response = self.session.get(url)
response.raise_for_status()
return self.choose_profile_if_required(
self.login_if_required(
response
)
)
def login(self):
"""Perform login."""
log.info('Login')
return self.get(self.login_path)
def genre_scan(self, min=1, max=100000, fresh=False):
"""
Scan for genres.
min and max define range of genre numbers to scan for.
Returns an iterator of (genre number, genre title, url) for each genre
found.
This scan creates and uses a cache to store previously-found genres and
avoid making unnecessary network requests. If you want to refresh the
cache, set fresh=True.
"""
with shelve.open(self.genre_cache_fn) as genre_cache:
if fresh:
genre_cache.clear()
for number in range(min, max):
cache_key = str(number)
try:
value = genre_cache[cache_key] # shelf needs a string key
except Exception as exc:
if not isinstance(exc, KeyError):
log.exception('Error retreiving %s from cache')
path = '/browse/genre/{}'.format(number)
try:
response = self.get(path)
except Exception:
log.warning('GET %s error', path, exc_info=True)
continue
title_parser = TitleParser()
title_parser.feed(response.text)
title_parser.close()
if len(title_parser.strings) > 0:
title = title_parser.strings[0]
log.info('Genre %d %s', number, title)
genre_cache[cache_key] = (title, response.url)
yield number, title, response.url
else:
genre_cache[cache_key] = None
else:
log.debug('Found %s in cache: %r', cache_key, value)
if value:
log.info('Genre %d %s [cached]', number, value[0])
yield (number,) + value
def main():
"""Entrypoint to genre scraper script."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-e', '--email')
arg_parser.add_argument('-p', '--password')
arg_parser.add_argument('-P', '--profile')
arg_parser.add_argument('-v', '--verbose', action='count', default=0)
arg_parser.add_argument('--fresh', action='store_true', default=False)
arg_parser.add_argument('min', type=int, default=1)
arg_parser.add_argument('max', type=int, default=5000)
ns = arg_parser.parse_args()
if ns.email:
email = ns.email
else:
print('Email:', end=' ', file=sys.stderr)
email = input()
password = ns.password or getpass()
profile = ns.profile
log_level = logging.WARNING - 10 * ns.verbose
logging.basicConfig(level=log_level, **LOG_FMT)
scraper = Scraper((email, password), profile=profile)
# preempt login to raise errors early
scraper.login()
started = datetime.now(timezone.utc)
scan = scraper.genre_scan(ns.min, ns.max, fresh=ns.fresh)
print('# Genres {}–{}'.format(ns.min, ns.max))
print('')
try:
for number, name, url in scan:
print('* {} ([#{}]({}))'.format(name, number, url))
except (KeyboardInterrupt, SystemExit):
log.warning('Scan interrupted.')
finally:
print('')
print('_Generated on {:%B %d %Y %H:%M:%S %Z}_'.format(started))
if __name__ == '__main__':
main()
|
from __future__ import unicode_literals
import sys
def get_columns_width(rows):
width = {}
for row in rows:
for (idx, word) in enumerate(map(unicode, row)):
width.setdefault(idx, 0)
width[idx] = max(width[idx], len(word))
return width
def pprint_table(rows):
rows = list(rows)
width = get_columns_width(rows)
def print_separator():
if not rows:
return
sys.stdout.write('+')
for (idx, word) in enumerate(map(unicode, rows[0])):
sys.stdout.write('-{sep}-+'.format(sep=('-' * width[idx])))
print ''
print_separator()
for row_idx, row in enumerate(rows):
sys.stdout.write('|')
for (idx, word) in enumerate(map(unicode, row)):
sys.stdout.write(' {word:{width}} |'.format(word=word, width=(width[idx]))),
print ''
if row_idx == 0:
# We just printed the table header
print_separator()
print_separator()
def pprint_kv(items, separator=':', padding=2, offset=0, skip_empty=True):
if not items:
return
width = max([len(item[0]) for item in items if item[1] or not skip_empty]) + padding
for item in items:
(key, value) = item
if not value:
continue
if isinstance(value, list) or isinstance(value, tuple):
print '{align}{0}:'.format(key, align=' ' * offset)
pprint_kv(value, offset=offset + 2)
else:
print'{align}{key:{width}}{value}'.format(
align=' ' * offset,
key='{0}{1}'.format(key, separator),
value=value,
width=width)
|
import torch
from norse.torch.functional.lif_refrac import LIFRefracParameters
from dataclasses import dataclass
@dataclass
class EILIFParameters:
tau_ex_syn_inv: torch.Tensor = torch.as_tensor(
1 / (20 * 1e-3), dtype=torch.double)
tau_ih_syn_inv: torch.Tensor = torch.as_tensor(
1 / (50 * 1e-3), dtype=torch.double)
tau_mem_inv: torch.Tensor = torch.as_tensor(
1 / (20 * 1e-3), dtype=torch.double)
tau_adaptation_inv: torch.Tensor = torch.as_tensor(
1 / (200 * 1e-3), dtype=torch.double)
R: torch.Tensor = torch.as_tensor(10 * 1e-3, dtype=torch.double)
v_leak: torch.Tensor = torch.as_tensor(-65.0 * 1e-3, dtype=torch.double)
v_th: torch.Tensor = torch.as_tensor(-50.0 * 1e-3, dtype=torch.double)
v_reset: torch.Tensor = torch.as_tensor(-65.0 * 1e-3, dtype=torch.double)
dale: bool = True
ei_ratio: float = 0.8
beta: float = 1.6
sfa_ratio: float = 0.4
rho: float = 1.5
current_base_scale: float = 34
current_base_lower: float = 1.5
current_base_upper: float = 2.5
current_base_mu: float = 2.
current_base_sigma: float = 0.1
rand_current_std: float = 0.0015
rand_voltage_std: float = 0.0015
rand_walk_alpha: float = 1.
method: str = "super"
alpha: float = 1000.
@dataclass
class EILIFRefracParameters(LIFRefracParameters):
lif: EILIFParameters = EILIFParameters()
rho_reset: torch.Tensor = torch.as_tensor(3.0)
@dataclass
class pret_settings:
region: str
scale: float
start_ts: int
end_ts: int |
import torchvision
import torchvision.transforms as transforms
import numpy as np
from .dataset_base import BaseDataset
class IMBALANCECIFAR10(torchvision.datasets.CIFAR10, BaseDataset):
"""Imbalanced Cifar-10 Dataset
References
----------
https://github.com/kaidic/LDAM-DRW/blob/master/imbalance_cifar.py
"""
cls_num = 10
def __init__(self,
root,
imb_type='exp',
imb_factor=0.01,
rand_number=0,
train=True,
transform=None,
target_transform=None,
download=False):
super(IMBALANCECIFAR10, self).__init__(root, train, transform,
target_transform, download)
np.random.seed(rand_number)
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type,
imb_factor)
self.gen_imbalanced_data(img_num_list)
class IMBALANCECIFAR100(IMBALANCECIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
cls_num = 100
if __name__ == '__main__':
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = IMBALANCECIFAR10(root='./data',
train=True,
download=True,
transform=transform)
trainloader = iter(trainset)
data, label = next(trainloader)
import pdb
pdb.set_trace()
|
from django.conf import settings
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect
from django.urls import reverse
from django.views.generic import TemplateView
from django.core.exceptions import ObjectDoesNotExist
from logging import getLogger
from wagtail_airtable.forms import AirtableImportModelForm
from wagtail_airtable.utils import get_model_for_path
from wagtail_airtable.management.commands.import_airtable import Importer
logger = getLogger(__name__)
class AirtableImportListing(TemplateView):
"""
Loads options for importing Airtable data
"""
template_name = "wagtail_airtable/airtable_import_listing.html"
http_method_names = ["get", "post"]
def post(self, request, *args, **kwargs):
form = AirtableImportModelForm(request.POST)
if form.is_valid():
model_label = form.cleaned_data["model"]
importer = Importer(models=[model_label], options={"verbosity": 1})
importer.run()
message = f"{importer.created} items created. {importer.updated} items updated. {importer.skipped} items skipped."
messages.add_message(
request, messages.SUCCESS, f"Import succeeded with {message}"
)
else:
messages.add_message(request, messages.ERROR, "Could not import")
return redirect(reverse("airtable_import_listing"))
def _get_base_model(self, model):
"""
For the given model, return the highest concrete model in the inheritance tree -
e.g. for BlogPage, return Page
"""
if model._meta.parents:
model = model._meta.get_parent_list()[0]
return model
def get_validated_models(self):
"""Get models from AIRTABLE_IMPORT_SETTINGS, validate they exist, and return a list of tuples.
returns:
[
('Credit Card', 'creditcards.CreditCard', <CreditCard: Cardname.=>),
('..', '..'),
]
"""
airtable_settings = getattr(settings, "AIRTABLE_IMPORT_SETTINGS", {})
# Loop through all the models in the settings and create a new dict
# of the unique settings for each model label.
# If settings were used more than once the second (3rd, 4th, etc) common settings
# will be bulked into a "grouped_models" list.
tracked_settings = []
models = {}
for label, model_settings in airtable_settings.items():
if model_settings not in tracked_settings:
tracked_settings.append(model_settings)
models[label] = model_settings
models[label]["grouped_models"] = []
else:
for label2, model_settings2 in models.items():
if model_settings is model_settings2:
models[label2]["grouped_models"].append(label)
# Validated models are models that actually exist.
# This way fake models can't be added.
validated_models = []
for label, model_settings in models.items():
# If this model is allowed to be imported. Default is True.
if model_settings.get("AIRTABLE_IMPORT_ALLOWED", True):
# A temporary variable for holding grouped model names.
# This is added to the validated_models item later.
# This is only used for displaying model names in the import template
_grouped_models = []
# Loop through the grouped_models list in each setting, validate each model,
# then add it to the larger grouped_models
if model_settings.get("grouped_models"):
for grouped_model_label in model_settings.get("grouped_models"):
if "." in grouped_model_label:
model = get_model_for_path(grouped_model_label)
if model:
_grouped_models.append(model._meta.verbose_name_plural)
if "." in label:
model = get_model_for_path(label)
if model:
# Append a triple-tuple to the validated_models with the:
# (1. Models verbose name, 2. Model label, 3. is_airtable_enabled from the model, and 4. List of grouped models)
airtable_enabled_for_model = getattr(
model, "is_airtable_enabled", False
)
validated_models.append(
(
model._meta.verbose_name_plural,
label,
airtable_enabled_for_model,
_grouped_models,
)
)
else:
raise ImproperlyConfigured(
"%r is not recognised as a model name." % label
)
return validated_models
def get_context_data(self, **kwargs):
"""Add validated models from the AIRTABLE_IMPORT_SETTINGS to the context."""
return {"models": self.get_validated_models()}
|
from drf_yasg import openapi
from rest_framework import status
from . import sample_error_response
class SampleDistrictResponses:
def __generate_sample_districts_response(self, status_code):
"""Generates sample response for districts response"""
response = openapi.Response(
description=f"Example of {status_code} Response",
examples={
"application/json": {
"code": status_code,
"status": True,
"districts": [
{
"id": 1,
"district_name": "District Name 1",
"state_id": 1,
},
{
"id": 2,
"district_name": "District Name 1",
"state_id": 1,
},
],
}
},
)
return response
def __generate_sample_district_detail_response(self, status_code):
"""Generates sample response for district detail"""
response = openapi.Response(
description=f"Example of {status_code} Response",
examples={
"application/json": {
"code": status_code,
"status": True,
"district": {
"id": 1,
"state_name": "State Name 1",
},
}
},
)
return response
def get_all_districts_response(self):
"""Sample response for all districts"""
return {
"200": self.__generate_sample_districts_response(status.HTTP_200_OK),
}
def get_district_detail_response(self):
"""Sample response for district detail"""
return {
"200": self.__generate_sample_district_detail_response(status.HTTP_200_OK),
"400": sample_error_response.SampleErrorResponses().generate_sample_single_error_response(
status.HTTP_400_BAD_REQUEST
),
}
|
class Solution:
def XXX(self, nums1: List[int], nums2: List[int]) -> float:
if nums1 or nums2:
nums = sorted(nums1 + nums2)
n = len(nums)
if n%2 == 0:
m = int(n/2 -1)
n = m+1
return (nums[m] + nums[n])/2
else:
m = n//2
return nums[m]
|
import os
import logging
from mkdocs import utils
from mkdocs.config import config_options, Config
from mkdocs.plugins import BasePlugin
log = logging.getLogger('mkdocs')
class LocalSearchPlugin(BasePlugin):
config_scheme = (
('promise_delay', config_options.Type(int, default=0)),
)
def on_post_build(self, config, **kwargs):
if 'search' in config['plugins']:
output_base_path = os.path.join(config['site_dir'], 'search')
json_output_path = os.path.join(output_base_path, 'search_index.json')
js_output_path = os.path.join(output_base_path, 'search_index.js')
# Open JSON search index file
f = open(json_output_path,"r")
# Modify file to provide a Promise resolving with the contents of the search index
search_index = "const local_index = " + f.read() + "; var search = { index: new Promise(resolve => setTimeout(() => resolve(local_index), " + str(self.config['promise_delay']) + ")) }"
# Write to JSON file and rename JSON to JS
utils.write_file(search_index.encode('utf-8'), json_output_path)
f.close()
os.rename(json_output_path, js_output_path)
else:
log.warning('localsearch: Missing search plugin. You must add both search and localsearch to the list of plugins in mkdocs.yml.') |
"""
#What's that ?
A set of script that demonstrate the use of Tensorflow experiments and estimators on different data types for various tasks
@brief : the main script that enables training, validation and serving Tensorflow based models merging all needs in a
single script to train, evaluate, export and serve.
taking large inspirations of official tensorflow demos.
@author : Alexandre Benoit, LISTIC lab, FRANCE
Several ideas are put together:
-estimators to manage training, valiation and export in a easier way
-using moving averages to store parameters with values smoothed along the last training steps (FIXME : ensure those values are used for real by the estimator, actually the graph shows 2 parameter savers...).
-visualization including embeddings projections to observe some data projections on the TensorBoard
-tensorflow-serving api use to serve the model and dynamically load updated models
-some tensorflow-serving client codes to reuse the trained model on single or streaming data
#How tu use it ?
The main script is experiments_manager.py can be used in 3 modes, here are some command examples:
1. train a model in a context specified in a parameters script such as mysettings_1D_experiments.py:
-> python experiments_manager.py --usersettings=mysettings_1D_experiments.py
2. start a tensorflow server on the trained/training model :
2.a if tensorflow_model_server is installed on the system
-> python experiments_manager.py --start_server --model_dir=experiments/1Dsignals_clustering/my_test_2018-01-03--14:40:53
2.b if tensorflow_model_server is installed on a singularity container
-> python experiments_manager.py --start_server --model_dir=experiments/1Dsignals_clustering/my_test_2018-01-03--14:40:53 -psi=/patg/to/tf_server.sif
3. interract with the tensorflow server, sending input buffers and receiving answers
-> python experiments_manager.py --predict --model_dir=experiments/1Dsignals_clustering/my_test_2018-01-03--14\:40\:53/
NOTE : once trained (or along training), start the Tensorbdownscaledoard to parse logs of
the experiments folder (provided example is experiments/1Dsignals_clustering):
from the scripts directory using command: tensorboard --logdir=experiments/1Dsignals_clustering
Then, open a web brwser and reach http://127.0.0.1:6006/ to monitor training
values and observe the obtained embeddings
#DESIGN:
1. The main code for training, validation and prediction is specified in the main script (experiments_manager.py).
2. Most of the use case specific parameters and Input/Output functions have been
moved to a separated settings script such as 'mysettings_1D_experiments.py' that
is targeted when starting the script (this filename is set in var FLAGS.usersettings in the main script).
3. The model to be trained and served is specified in a different script targetted in the settings file.
#KNOWN ISSUES :
This script has some known problems, any suggestion is welcome:
-moving average parameters reloading for model serving is not optimized, this should be enhanced.
-for now tensorflow_server only works on CPU so using GPU only for training and validation. Track : https://github.com/tensorflow/serving/issues/668
#TODO :
To adapt to new use case, just duplicate the closest mysettingsxxx file and adjust the configuration.
For any experiment, the availability of all the required fields in the settings file is checked by the experiments_settings_checker.py script.
You can have a look there to ensure you prepared everything right.
As a reminder, here are the functions prototypes:
-define a model to be trained and served in a specific file and follow this prototype:
--report model name in the settings file using variable name model_file or thecify a premade estimator using variable name premade_estimator
--def model( usersettings) #receives the external parameters that may be used to setup the model (number of classes and so depending on the task)
mode), #mode set to switch between train, validate and inference mode
wrt tf.estimator.tf.estimator.ModeKeys values
=> the returns a tf.keras.Model
NOTE : custom models with specific loss can be used, tutorial here https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit
-def data_preprocess(features, model_placement)
-def postprocessing_before_export_code(code)
-def postprocessing_before_export_predictions(predictions)
-def getOptimizer(model, loss, learning_rate, global_step)
-def get_total_loss(inputs, predictions, labels, embedding_code, weights_loss)
-def get_validation_summaries(inputs, predictions, labels, embedding_code)
-def get_eval_metric_ops(inputs, predictions, labels, embedding_code)
-def get_input_pipeline_train_val(batch_size, raw_data_files_folder, shuffle_batches)
-def get_input_pipeline_serving()
-define the Client_IO class that presents at least those three methods:
---def __init__(self, debugMode):
---def getInputData(self, idx):
---def decodeResponse(self, result):
---def finalize():
-------> Note, the finalize method will be called once the number of expected
iterations is reached and if any StopIteration exception is sent by the client
-OPTIONNAL: add the dictionnary named 'hparams' in this settings file to carry those specific hyperparameters to the model
and to complete the session name folder to facilitate experiments tracking and comparison
Some examples of such functions are put in the README.md and in the versionned examples folder
"""
#script imports
from tools.experiment_settings import ExperimentSettings
import tools.experiments_settings_surgery
import os, shutil
import datetime, time
import tensorflow as tf
import numpy as np
import pandas as pd
import importlib
import imp
import copy
import configparser
try:
import tensorflow_addons as tfa
except:
print('WARNING, tensorflow_addons could not be loaded, this may generate errors for model optimization but should not impact model serving')
federated_learning_available=False
try:
import flwr as fl
federated_learning_available=True
except ModuleNotFoundError as e :
print('WARNING, Flower lib no present, this may impact distributed learning if required. Error=',e)
from tensorflow.keras import mixed_precision
#constants
SETTINGSFILE_COPY_NAME='experiment_settings.py'
WEIGHTS_MOVING_AVERAGE_DECAY=0.998
# Set default flags for the output directories
#manage input commands
import argparse
parser = argparse.ArgumentParser(description='demo_semantic_segmentation')
parser.add_argument("-m","--model_dir", default=None,
help="Output directory for model and training stats.")
parser.add_argument("-d","--debug", action='store_true',
help="set to activate debug mode")
parser.add_argument("-p","--predict", action='store_true',
help="Switch to prediction mode")
parser.add_argument("-l","--predict_stream", default=0, type=int,
help="set the number of successive predictions, infinite loop if <0")
parser.add_argument("-s","--start_server", action='store_true',
help="start the tensorflow server on the machine to run predictions")
parser.add_argument("-psi","--singularity_tf_server_container_path", default='',
help="start the tensorflow server on a singularity container to run predictions")
parser.add_argument("-u","--usersettings",
help="filename of the settings file that defines an experiment")
parser.add_argument("-r","--restart_interrupted", action='store_true',
help="Set to restart an interrupted session, model_dir option should be set")
parser.add_argument("-g","--debug_server_addresses", action='store_true',
default="127.0.0.1:2333",
help="Set here the IP:port to specify where to reach the tensorflow debugger")
parser.add_argument("-pid","--procID", default=None,
help="Specifiy here an ID to identify the process (useful for federated training sessions)")
parser.add_argument("-c","--commands", action='store_true',
help="show command examples")
FLAGS = parser.parse_args()
class MyCustomModelSaverExporterCallback(tf.keras.callbacks.ModelCheckpoint):
def __init__(self,
filepath,
settings,
monitor='val_loss',
verbose=1,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
**kwargs):
#init tf.keras.callbacks.ModelCheckpoint parent
super(MyCustomModelSaverExporterCallback, self).__init__( filepath,
monitor,
verbose,
save_best_only,
save_weights_only,
mode,
save_freq,
)
self.settings=settings
self.settings.model_export_filename=settings.sessionFolder+'/exported_models'
def on_epoch_end(self, epoch, logs=None):
#call parent function
print('on_epoch_end, saving checkpoint...')
super(MyCustomModelSaverExporterCallback, self).on_epoch_end(epoch, logs)
if logs is None:
print('WARNING, no logs dict is provided to ModelCheckpoint.on_epoch_end, checkpointing on best epoch will not work')
if self.save_freq == 'epoch':
try:
if False:#self.model._in_multi_worker_mode():
# Exclude training state variables in user-requested checkpoint file.
with self._training_state.untrack_vars():
self._export_model(epoch=epoch, logs=logs)
else:
self._export_model(epoch=epoch, logs=logs)
except Exception as e:
print('Model exporting failed for some reason',e)
print('Epoch checkpoint save and export processes done.')
def _export_model(self, epoch, logs=None):
print('Exporting model...')
current = logs.get(self.monitor)
if current==self.best:
print('Saving complete keras model thus enabling fitting restart as well as model load and predict')
self.model.save(self.settings.sessionFolder+'/checkpoints/model_epoch{version}'.format(version=epoch))
print('EXPORTING A NEW MODEL VERSION FOR SERVING')
print('Exporting model at epoch {}.'.format(epoch))
exported_module=usersettings.get_served_module(self.model, self.settings.model_name)
if not(hasattr(exported_module, 'served_model')):
raise ValueError('Experiment settings file MUST have \'served_model\' function with @tf.function decoration.')
output_path='{folder}/{version}'.format(folder=self.settings.model_export_filename, version=epoch)
signatures={self.settings.model_name:exported_module.served_model}
try:
print('Exporting serving model relying on tf.saved_model.save')
tf.saved_model.save(
exported_module,
output_path,
signatures=signatures,
options=None
)
print('Export OK')
except Exception as e:
print('Failed to export serving model relying on method:', e)
#new keras approach
'''try:
print('Exporting serving model relying on tf.keras.models.save_model')
tf.keras.models.save_model(
exported_module,
filepath='{folder}v2/{version}'.format(folder=self.settings.model_export_filename, version=epoch),
overwrite=True,
include_optimizer=False,
save_format=None,
signatures=signatures,
options=None,
)
except Exception as e:
print('Failed to export serving model relying on method:',e)
export to TensorRT, WIP
refer to https://www.tensorflow.org/api_docs/python/tf/experimental/tensorrt/Converter
and
https://docs.nvidia.com/deeplearning/frameworks/tf-trt-user-guide/index.html
'''
try:
'''params=None
if True:#precision_mode is not None:
precision_mode='FP16'
#tensorflow doc/issue ConversionParams not found
params = tf.experimental.tensorrt.ConversionParams(
precision_mode=precision_mode,
# Set this to a large enough number so it can cache all the engines.
maximum_cached_engines=16)
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir=output_path,
input_saved_model_signature_key=signatures,
conversion_params=params)
#NVIDIA doc:
from tensorflow.python.compiler.tensorrt import trt_convert as trt
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS
print('conversion_params',conversion_params)
conversion_params = conversion_params._replace(
max_workspace_size_bytes=(1<<32))
conversion_params = conversion_params._replace(precision_mode="FP16")
conversion_params = conversion_params._replace(maximum_cached_engines=100)
print('creating converter')
converter = tf.experimental.tensorrt.Converter(#trt.TrtGraphConverterV2(
input_saved_model_dir=output_path,
input_saved_model_signature_key=signatures,
conversion_params=conversion_params)
print('converting')
converter.convert()
print('saving')
converter.save(output_path+'_trt')
print('Exported model to NVIDIA TensorRT')
'''
except Exception as e:
print('Failed to export to TensorRT but original saved model has been exported. Reported error:',e)
print('Model export OK at epoch {}.'.format(epoch))
older_versions=os.listdir(self.settings.model_export_filename)
print('Available model versions:',older_versions)
else:
print('Model was not exported since no performance increase has been reported')
# Custom generic functions applied when running experiments
def check_GPU_available(usersettings):
'''check GPU requirements vs availability: if usersettings.used_gpu_IDs is not empty, then check GPU availability accordingly
Args:
usersettings, the experiments settings defined by the user
Raises SystemError if no GPU available
'''
gpu_workers_nb=0
print()
print('*** GPU devices detection ***')
# let ensorFlow automatically choose an existing and supported device to run the operations in case the specified one doesn't exist
tf.config.set_soft_device_placement(True)
if len(usersettings.used_gpu_IDs)>0:
device_name = tf.test.gpu_device_name()
print('Found GPU at: {}'.format(device_name))
gpus = tf.config.list_physical_devices('GPU')
#-> first check availability
if len(gpus) ==0 and len(usersettings.used_gpu_IDs):
print('Could not find any GPU, trying to reload driver...')
#-> first try to wake it up
os.system("nvidia-modprobe -u -c=0")
gpus = tf.config.list_physical_devices('GPU')
if len(gpus) ==0 and len(usersettings.used_gpu_IDs):
print('No GPU found')
raise SystemError('Required GPU(s) not found')
print('Found GPU devices:', gpus)
if gpus:
# Restrict TensorFlow to only use the first GPU
visible_devices=[gpus[id] for id in usersettings.used_gpu_IDs]
print('Setting visible devices:',visible_devices)
try:
tf.config.set_visible_devices(visible_devices, 'GPU')
logical_gpus = tf.config.list_logical_devices('GPU')
for gpuID in range(len(gpus)):
print('Found GPU:', gpuID)
#tf.config.experimental.set_memory_growth(gpus[gpuID], True)
if tf.test.gpu_device_name() != '/device:GPU:0':
print('WARNING: GPU device not found.')
else:
print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name()))
#for gpuID in range(len(gpus)):
# tf.config.experimental.set_memory_growth(gpus[gpuID], True)
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
gpu_workers_nb=len(logical_gpus)
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(e)
else:
print('No GPU required for this experiment (usersettings.used_gpu_IDs is empty)')
return gpu_workers_nb
def loadModel_def_file(sessionFolder):
''' basic method to load the model targeted by usersettings.model_file
Args: sessionFolder, the path to the model file
Returns: a keras model
'''
model_path=os.path.join(sessionFolder,os.path.basename(usersettings.model_file))
try:
model_def=imp.load_source('model_def', model_path)#importlib.import_module("".join(model_path.split('.')[:-1]))#
except Exception as e:
raise ValueError('loadModel_def_file: Failed to load model file {model} from sessionFolder {sess}, error message={err}'.format(model=usersettings.model_file, sess=sessionFolder, err=e))
model=model_def.model
print('loaded model file {file}'.format(file=model_path))
return model
# Define and run experiment ###############################
def run_experiment(usersettings):
print('Running an experiment....')
# import and load tensorflow tools here
from tensorflow.python import debug as tf_debug
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import plot_model
from tensorboard.plugins.hparams import api as hp
#####################################
#check GPU requirements vs availability
if usersettings.debug:
tf.debugging.set_log_device_placement(True)
gpu_workers_nb=check_GPU_available(usersettings)
#####################################
#prepare session
tf.keras.backend.clear_session() # We need to clear the session to enable JIT in the middle of the program.
tf.random.set_seed(usersettings.random_seed)
#(de)activate XLA graph optimization
tf.config.optimizer.set_jit(usersettings.useXLA)
if usersettings.useXLA:
print('Forcing XLA on the CPU side')
os.environ['TF_XLA_FLAGS']='--tf_xla_cpu_global_jit'
if usersettings.debug and usersettings.use_profiling:
os.environ['XLA_FLAGS']='--xla_hlo_profile'
else:
os.environ['TF_XLA_FLAGS']=''
#####################################
# define the input pipepelines (train/val)
with tf.name_scope('Input_pipeline'):
train_data =usersettings.get_input_pipeline(raw_data_files_folder=usersettings.raw_data_dir_train,
isTraining=True)
val_data = usersettings.get_input_pipeline(raw_data_files_folder=usersettings.raw_data_dir_val,
isTraining=False)
try:
print('Train dataset size=', train_data.cardinality().numpy())
print('Validation dataset size=', val_data.cardinality().numpy())
train_iterations_per_epoch=train_data.n//usersettings.batch_size
val_iterations_per_epoch=val_data.n//usersettings.batch_size
except Exception as e:
print('Could not estimate dataset sizes from input data pipeline, relying on settings nb_train_samples and nb_val_samples.')
train_iterations_per_epoch=usersettings.nb_train_samples//usersettings.batch_size
val_iterations_per_epoch=usersettings.nb_val_samples//usersettings.batch_size
#####################################
#create the model from the user defined model file
# -> (script targeted by usersettings.model_file)
if usersettings.recoverFromCheckpoint is False:
print('**** Training from scratch...')
model_scope=tf.name_scope('model')
if gpu_workers_nb>1:
print('Deploying model in a multi/distributed GPU training scheme')
distribution_strategy=getattr( tf.distribute, usersettings.distribution_strategy)()
print('-> Chosen distribution strategy :',distribution_strategy)
model_scope=distribution_strategy.scope()#(model_scope, distribution_strategy.scope())
usersettings.summary()
with model_scope:
#load model
model=loadModel_def_file(usersettings.sessionFolder)(usersettings)
#setup training
learning_rate=usersettings.get_learningRate()
loss=usersettings.get_total_loss(model)
optimizer=usersettings.get_optimizer(model, loss, learning_rate)
if usersettings.weights_moving_averages:
optimizer=tfa.optimizers.MovingAverage(optimizer, average_decay=0.9999, name='weights_ema')
metrics=usersettings.get_metrics(model, loss)
if usersettings.enable_mixed_precision:
# use AMP
print('Using Automatic Mixed Precision along the optimization process')
print('### HINT : to make sure Tensor cores are used, and obtain faster processing, ensure that your kernels are multiples of 8 !')
mixed_precision.set_global_policy('mixed_float16')
#optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer)
print('Compiling model...')
exp_loss_weights=None
if hasattr(usersettings, 'get_loss_weights'):
exp_loss_weights=usersettings.get_loss_weights()
model.compile(optimizer=optimizer,
loss=loss,# you can use a different loss on each output by passing a dictionary or a list of losses
loss_weights=exp_loss_weights,
metrics=metrics) #can specify per output metrics : metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}
try:
init_model_name=usersettings.sessionFolder+'/checkpoints/model_epoch0'
print('******************************************')
print('Saving the model at init state in ',init_model_name, 'issues at this point should warn you before moving to long training sessions...')
model.save(init_model_name)
print('initial model saving done')
print('******************************************')
except Exception as e:
raise ValueError('Could not serialize the model, did all model elements defined in the model prior model.compile are serialisable and have their get_config(self) method ? Error message=',e)
else:#recovering from checkpoint
print('**** Restoring from checkpoint...')
available_models=os.listdir(usersettings.sessionFolder+'/checkpoints')
print('All available model=', available_models)
loaded_model=usersettings.sessionFolder+'/checkpoints/'+available_models[-1]
print('loading ',loaded_model)
model = tf.keras.models.load_model(loaded_model)
# generate the model description
#-> as an image in the session folder
model_name_str=usersettings.model_name
try:
'''from tensorflow.keras.layers import Layer
model._layers = [
layer for layer in model._layers if isinstance(layer, Layer)
]'''
plot_model(model,
to_file=usersettings.sessionFolder+'/'+model_name_str+'.png',
show_shapes=True)
input('Graph drawn, written here', usersettings.sessionFolder+'/'+model_name_str+'.png')
except Exception as e:
print('Could not plot model, error:',e)
#-> as a printed log and write the network summary to file in the session folder
with open(usersettings.sessionFolder+'/'+model_name_str+'.txt','w') as fh:
# Pass the file handle in as a lambda function to make it callable
print(model.summary())
model.summary(print_fn=lambda x: fh.write(x + '\n'))
'''try:
receptive_field_info=tf.contrib.receptive_field.compute_receptive_field_from_graph_def(
model,
input_node,
output_node,
stop_propagation=None,
input_resolution=None
)
except Exception as e:
print('Receptive field computation failed, reason=',e)
'''
#####################################
# prepare all standard callbacks
all_callbacks=[]
#add the history callback
class CustomHistory(tf.keras.callbacks.History):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def __init__(self):
super(CustomHistory, self).__init__()
self.history = {}
self.epoch = [] #moved from on_train_begin
def on_train_begin(self, logs=None):
print('******** HISTORY starting a training session...')
def on_epoch_end(self, epoch, logs=None):
print('******** HISTORY on_epoch_end...')
super(CustomHistory, self).on_epoch_end(epoch, logs)
history_callback=CustomHistory()#tf.keras.callbacks.History()
all_callbacks.append(history_callback)
# -> terminate on NaN loss values
all_callbacks.append(tf.keras.callbacks.TerminateOnNaN())
# -> apply early stopping
early_stopping_patience=usersettings.early_stopping_patience if hasattr(usersettings, 'early_stopping_patience') else 5
earlystopping_callback=tf.keras.callbacks.EarlyStopping(
monitor=usersettings.monitored_loss_name,
patience=early_stopping_patience
)
all_callbacks.append(earlystopping_callback)
reduceLROnPlateau_callback=tf.keras.callbacks.ReduceLROnPlateau(monitor=usersettings.monitored_loss_name, factor=0.1,
patience=(early_stopping_patience*2)//3, min_lr=0.000001, verbose=True)
all_callbacks.append(reduceLROnPlateau_callback)
#-> checkpoint each epoch
checkpoint_callback=MyCustomModelSaverExporterCallback(#tf.keras.callbacks.ModelCheckpoint(
usersettings.sessionFolder+'/checkpoints/',
usersettings,
monitor=usersettings.monitored_loss_name,
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
save_freq='epoch')
all_callbacks.append(checkpoint_callback)
if usersettings.debug:
#TODO to be tested
print('TODO: check this for tf2 migration...')
#tf_debug.TensorBoardDebugWrapperSession(tf.Session(), usersettings.debug_server_addresses) #"[[_host]]:[[_port]]")
'''all_callbacks.append(tf_debug.TensorBoardDebugHook(usersettings.debug_server_addresses,
send_traceback_and_source_code=True,
log_usage=False)
)
'''
#complete generic callbacks by user defined ones
all_callbacks+=usersettings.addon_callbacks(model, train_data, val_data)
#-> classical logging on Tensorboard (scalars, hostorams, and so on)
log_dir=usersettings.sessionFolder+"/logs/"# + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
#-> activate profiling if required
profile_batch =0
if usersettings.use_profiling is True:
profile_batch = 3
print('Profiling is applied, for more details and log analysis, check : https://www.tensorflow.org/tensorboard/tensorboard_profiling_keras')
# -> set embeddings to be logged
embeddings_layer_names={output.name:output.name+'.tsv' for output in model.outputs}
print('Model outputs:',embeddings_layer_names)
#-> set tensorboard logging
#FIXME: https://github.com/tensorflow/tensorboard/issues/2471
tensorboard_callback=tf.keras.callbacks.TensorBoard(log_dir,
histogram_freq=1,
write_graph=True,
write_images=False,#True,
update_freq='epoch',
profile_batch=profile_batch,
embeddings_freq=10,
#embeddings_metadata='metadata.tsv',
#embeddings_layer_names=list(embeddings_layer_names.keys()),#'embedding',
#embeddings_data='stuff'
)
all_callbacks.append(tensorboard_callback)
#-> add the hyperparameters callback for experiments comparison
all_callbacks.append(hp.KerasCallback(log_dir, usersettings.hparams))
#-> export saved_model (for serving) each epoch
#####################################
# train the model
use_multiprocessing=False
workers=1
if False:#gpu_workers_nb>1:
workers*=gpu_workers_nb
use_multiprocessing=True
print('Fitting model:')
print('* use_multiprocessing=',use_multiprocessing)
if use_multiprocessing:
print('* multiprocessing workers=',workers)
history = None
#manage epoch index in case of fitting interruption/restart
epoch_start_index=0
#print XLA mode
print('*** XLA optimization state : TF_XLA_FLAGS =', os.environ['TF_XLA_FLAGS'])
'''#FIXME: cannot evaluate model.optimizer.iterations to recover epoch index...
if usersettings.recoverFromCheckpoint:
epoch_start_index=tf.keras.backend.eval(model.optimizer.iterations)//train_iterations_per_epoch
print('RESTARTING FITTING FROM CHECKPOINT:')
print('==> Last training iteration was {lastIt} => setting current epoch to {epoch} '.format(lastIt=model.optimizer.iterations,
epoch=epoch_start_index))
'''
#-> train with (in memory) input data pipelines
print()
if usersettings.federated_learning is False or federated_learning_available is False:
print('Now starting a classical training session...')
history = model.fit(
x=train_data,
y=None,#train_data,
batch_size=None,#usersettings.batch_size, #bath size must be specified
epochs=usersettings.nbEpoch,
verbose=True,
callbacks=all_callbacks,
validation_split=0.0,
validation_data=val_data,# val_ref),
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=epoch_start_index,
steps_per_epoch=train_iterations_per_epoch,
validation_steps=val_iterations_per_epoch,
validation_freq=1,
max_queue_size=workers*100,
workers=workers,
use_multiprocessing=use_multiprocessing,
)
else:
print('Now starting a federated training session...')
# Define Flower client
class FlClient(fl.client.NumPyClient):
def __init__(self):
self.history=None
self.round=0
self.last_val_loss=np.inf
self.tensorboard_callback=tensorboard_callback
def get_parameters(self):
return model.get_weights()
def fit(self, parameters, config):
print('#################### FlClient.fit new round', self.round)
#set updated weights
model.set_weights(parameters)
#tensorboard_callback.on_train_begin(self.round)
if self.round==0:
callbacks=all_callbacks
else:
#FIXME workaround to force tensorboard to display data tracking along rounds/epochs
self.tensorboard_callback=tf.keras.callbacks.TensorBoard(log_dir,
histogram_freq=1,
write_graph=True,
write_images=False,#True,
update_freq='epoch',
profile_batch=profile_batch,
embeddings_freq=10,
#embeddings_metadata='metadata.tsv',
#embeddings_layer_names=list(embeddings_layer_names.keys()),#'embedding',
#embeddings_data='stuff'
)
callbacks=self.tensorboard_callback
#training for one epoch
history=model.fit(
x=train_data,
y=None,#train_data,
batch_size=None,#usersettings.batch_size, #bath size must be specified
epochs=1*(self.round+1),
verbose=1,
callbacks=callbacks,
validation_split=0.0,
validation_data=val_data, #=> done at the evaluate method level
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=1*(self.round),
steps_per_epoch=train_iterations_per_epoch,
validation_steps=val_iterations_per_epoch,
validation_freq=1,
max_queue_size=workers*100,
workers=workers,
use_multiprocessing=use_multiprocessing,
)
print('FlClient.fit round result, history=', self.round,history.history)
logs_last_epoch={ key:history.history[key][-1] for key in history.history.keys()}
print('==> last history=', logs_last_epoch)
checkpoint_callback.on_epoch_end(self.round, logs_last_epoch)
earlystopping_callback.on_epoch_end(self.round, logs_last_epoch)
#history_callback.on_epoch_end(self.round, logs_last_epoch)
self.tensorboard_callback.on_train_end(logs_last_epoch) #FIXME workaround to force tensorboard to display data tracking along rounds/epochs
#reduceLROnPlateau_callback.on_epoch_end(self.round, logs_last_epoch)
if len(history.history)>0:
self.history=history
# avoiding to reuse callbacks : only affect them on the first round
self.round+=1
return model.get_weights(), train_iterations_per_epoch*usersettings.batch_size, logs_last_epoch#{}
def evaluate(self, parameters, config):
print('Evaluating model from received parameters...')
model.set_weights(parameters)
losses = model.evaluate(x=val_data,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=val_iterations_per_epoch,
callbacks=all_callbacks,
max_queue_size=10,
workers=workers,
use_multiprocessing=False,
return_dict=True
)
print('FlClient.evaluate losses:',losses)
return losses[usersettings.monitored_loss_name], val_iterations_per_epoch*usersettings.batch_size, losses#self.history.history#loss_dict
# Start Flower client
federated_learner=FlClient()
fl.client.start_numpy_client(server_address=usersettings.federated_learning_server_address, client=federated_learner)
history=federated_learner.history
print('\nhistory dict:', history.history)
print('Training session end, loss={loss} '.format(loss=history.history['loss'][-1]))
print('Have a look at the experiments details saved in folder ', usersettings.sessionFolder)
final_result=None
if final_result is None:
final_result={'loss':history.history['loss'][-1]}
return final_result, usersettings.model_export_filename
###########################################################
## INFERENCE SECTION : talking to a tensorflow-server
#inspired from https://github.com/tensorflow/serving/blob/master/tensorflow_serving/example/mnist_client.py
def WaitForServerReady(host, port):
#inspired from https://github.com/tensorflow/serving/blob/master/tensorflow_serving/model_servers/tensorflow_model_server_test.py
"""Waits for a server on the localhost to become ready.
returns True if server is ready or False on timeout
Args:
host:tensorfow server address
port: port address of the PredictionService.
"""
from grpc import implementations
from grpc.framework.interfaces.face import face
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
for _ in range(0, usersettings.wait_for_server_ready_int_secs):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'server_not_real_model_name'
try:
# Send empty request to missing model
print('Trying to reach tensorflow-server {srv} on port {port} for {delay} seconds'.format(srv=host,
port=port,
delay=usersettings.wait_for_server_ready_int_secs))
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.PredictionServiceStub(channel)
stub.Predict(request, 1)
except face.AbortionError as error:
# Missing model error will have details containing 'Servable'
if 'Servable' in error.details:
print('Server is ready')
return True
else:
print('Error:'+str(error.details))
return False
time.sleep(1)
def _create_rpc_callback(client, debug):
"""Creates RPC callback function.
Args:
Returns:
The callback function.
"""
def _callback(result_future):
"""Callback function.
Calculates the statistics for the prediction result.
Args:
result_future: Result future of the RPC.
"""
print('Received response:'+str(result_future))
exception = result_future.exception()
if exception:
#result_counter.inc_error()
print(exception)
else:
try:
if FLAGS.debug:
print(result_future.result())
client.decodeResponse(result_future.result())
except Exception as e:
raise ValueError('Exception encountered on client callback : '.format(error=e))
return _callback
def do_inference(experiment_settings, host, port, model_name, clientIO_InitSpecs, concurrency, num_tests):
"""Tests PredictionService with concurrent requests.
Args:
experiment_settings: the experiment settings loaded from function loadExperimentsSettings
host:tensorfow server address
port: port address of the PredictionService.
model_name: the model name ID
clientIO_InitSpecs: a dictionnary to pass to the ClientIO constructor
concurrency: Maximum number of concurrent requests.
num_tests: Number of test images to use, infinite prediction loop if <0.
Raises:
IOError: An error occurred processing test data set.
Hint : have a look here to track api use updates : https://github.com/tensorflow/serving/blob/master/tensorflow_serving/example/mnist_client.py
"""
from tensorflow_serving.apis import predict_pb2 #for single head models
from tensorflow_serving.apis import inference_pb2 #for multi head models
from tensorflow_serving.apis import prediction_service_pb2_grpc
import grpc
print('Trying to interract with server:{srv} on port {port} for prediction...'.format(srv=host,
port=port))
''' test scripts may help : https://github.com/tensorflow/serving/blob/master/tensorflow_serving/model_servers/tensorflow_model_server_test.py
'''
server=host+':'+str(port)
# specify option to support messages larger than alloed by default
grpc_options=None
if usersettings.grpc_max_message_length !=0:
grpc_options = [('grpc.max_send_message_length', usersettings.grpc_max_message_length)]
grpc_options = [('grpc.max_receive_message_length', usersettings.grpc_max_message_length)]
channel = grpc.insecure_channel(server, options=grpc_options)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
#allocate a clientIO instance defined for the experiment
client_io=experiment_settings.Client_IO(clientIO_InitSpecs, FLAGS.debug)
notDone=True
predictionIdx=0
while notDone:
try:
predictionIdx=predictionIdx+1
start_time=time.time()
sample=client_io.getInputData(predictionIdx)
if not(isinstance(sample, dict)):
raise ValueError('Expecting a dictionnary of values that will further be converted to proto buffers. Dictionnary keys must correspond to the usersettings.served_input_names strings list')
if FLAGS.debug:
print('Input data is ready, data=',sample)
print('Time to prepare collect data request:',round(time.time() - start_time, 2))
start_time=time.time()
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.model_spec.signature_name = model_name#experiment_settings.served_head_names[0]
for inputname in usersettings.served_input_names:
feature=sample[inputname]
feature_proto=tf.make_tensor_proto(feature, shape=feature.shape)
request.inputs[inputname].CopyFrom(feature_proto)
if FLAGS.debug:
print('Time to prepare request:',round(time.time() - start_time, 2))
except StopIteration:
print('End of the process detection, running the ClientIO::finalize method')
notDone=True
break
#asynchronous message reception, may hide some AbortionError details and only provide CancellationError(code=StatusCode.CANCELLED, details="Cancelled")
if hasattr(experiment_settings, 'client_async'):
result_future = stub.Predict.future(request, experiment_settings.serving_client_timeout_int_secs) # 5 seconds
result_future.add_done_callback(
_create_rpc_callback(client_io, FLAGS.debug))
else:
#synchronous approach... that may provide more details on AbortionError
if FLAGS.debug:
start_time=time.time()
answer=stub.Predict(request, experiment_settings.serving_client_timeout_int_secs)
if FLAGS.debug:
print('Time to send request/decode response:',round(time.time() - start_time, 2))
start_time=time.time()
client_io.decodeResponse(answer)
if FLAGS.debug:
print('Time to decode response:',round(time.time() - start_time, 2))
if num_tests>=0:
if predictionIdx>=num_tests:
notDone=False
client_io.finalize()
return 0
def loadExperimentsSettings(filename, restart_from_sessionFolder=None, isServingModel=False):
''' load experiments parameters from the mysettingsxxx.py script
also mask GPUs to only use the ones specified in the settings file
Args:
filename: the settings file, if restarting an interrupted training session, you should target the experiments_settings.py copy available in the experiment folder to restart"
restart_from_sessionFolder: [OPTIONNAL] set the session folder of a previously interrupted training session to restart
isServingModel: [OPTIONNAL] set True in the case of using model serving (server or client mode) so that some settings are not checked
'''
if restart_from_sessionFolder is not None:
if os.path.exists(restart_from_sessionFolder):
print('Attempting to restart a previously ran training job...')
sessionFolder=restart_from_sessionFolder
#target the initial experiments settings file
filename=os.path.join(restart_from_sessionFolder, SETTINGSFILE_COPY_NAME)
print('From working folder'+str(os.getcwd()))
print('looking for '+str(filename))
if os.path.exists(filename):
print('Found')
else:
raise ValueError('Could not find experiment_settings.py file in the experiment folder:'+str(sessionFolder))
else:
raise ValueError('Could not restart interrupted training session, working folder not found:'+str(restart_from_sessionFolder))
else:
print('Process starts...')
usersettings=ExperimentSettings(filename, isServingModel)
if isServingModel:
sessionFolder=os.path.dirname(filename)
#manage the working folder in the case of a new experiment
workingFolder=usersettings.workingFolder
if restart_from_sessionFolder is None:
sessionFolder=os.path.join(workingFolder, usersettings.session_name+'_'+datetime.datetime.now().strftime("%Y-%m-%d--%H:%M:%S"))
print('Considered usersettings.hparams=',str(usersettings.hparams))
return usersettings, sessionFolder
def get_served_model_info(one_model_path, expected_model_name):
''' basic function that checks served model behaviors
Args:
one_model_path: the path to a servable model directory
expected_model_name: the model name that is expected to be found on the server
Returns:
Nothing for now
'''
import subprocess
#get the first subfolder of the served models directory
served_model_info_cmd='saved_model_cli show --dir {target_model} --tag_set serve --signature_def {model_name}'.format(target_model=one_model_path,
model_name=expected_model_name)
print('Checking served model available signatures using command '+served_model_info_cmd)
cmd_result=subprocess.check_output(served_model_info_cmd.split())
print('Answer:')
print(cmd_result.decode())
if expected_model_name in cmd_result.decode():
print('Target model {target} name found in the command answer'.format(target=expected_model_name))
else:
raise ValueError('Target model {target} name NOT found in the command answer'.format(target=expected_model_name))
# Run script ##############################################
def run(train_config_script=None, external_hparams={}):
''' the main script function that can receive hyperparameters as a dictionnary to run an experiment
can start training, model serving or model client requesting depending on the FLAGS values:
-> if FLAGS.start_server is True : starts a server that hosts the target model
-> if FLAGS.predict is TRUE : starts a client that will send requests to a model threw gRPC
-> else, start a training session relying on a sesttings file provided by train_config_script or FLAGS.usersettings
--> in this mode, function returns a dictionnary of that summarizes the last model states at the end of the training
--> if calling with non empty train_config_script and with non empty external_hparams,
then external_hparams will update hyperparameters already specified in the train_config_script script
'''
global usersettings
experiments_output=None
#tf.reset_default_graph()
usersettings=None#ensure to clear this object prior any new trial
''' main function that starts the experiment in the chosen mode '''
scripts_WD=os.getcwd() #to locate the mysettings*.py file
if FLAGS.debug is True:
print('Running in debug mode. Press Enter to continue...')
if FLAGS.start_server is True :
print('### START TENSORFLOW SERVER MODE ###')
print('WARNING, this function expects some libraries to be installed, mostly dedicated to the training processing.')
print('-> to run tensorflow model server on minimal install run start_model_serving.py')
usersettings, sessionFolder = loadExperimentsSettings(os.path.join(FLAGS.model_dir,SETTINGSFILE_COPY_NAME), isServingModel=True)
#target the served models folder
model_folder=os.path.join(scripts_WD,FLAGS.model_dir,'exported_models')
print('Considering served model parent directory:'+model_folder)
#check if at least one served model exists in the target models directory
stillWait=True
while stillWait is True:
print('Looking for a servable model in '+model_folder)
try:
#check served model existance
if not(os.path.exists(model_folder)):
raise ValueError('served models directory not found : '+model_folder)
#look for a model in the directory
one_model=next(os.walk(model_folder))[1][0]
one_model_path=os.path.join(model_folder, one_model)
if not(os.path.exists(one_model_path)):
raise ValueError('served models directory not found : '+one_model_path)
print('Found at least one servable model directory '+str(one_model_path))
stillWait=False
except Exception as e:
raise ValueError('Could not find servable model, error='+str(e.message))
get_served_model_info(one_model_path, usersettings.model_name)
tensorflow_start_cmd=" --port={port} --model_name={model} --model_base_path={model_dir}".format(port=usersettings.tensorflow_server_port,
model=usersettings.model_name,
model_dir=model_folder)
if len(FLAGS.singularity_tf_server_container_path)>0:
print('Starting Tensorflow model server from provided singularity container : '+FLAGS.singularity_tf_server_container_path)
tensorflow_start_cmd='singularity run --nv '+FLAGS.singularity_tf_server_container_path+tensorflow_start_cmd
else:
print('Starting Tensorflow model server installed on system')
tensorflow_start_cmd='tensorflow_model_server '+tensorflow_start_cmd
print('Starting tensorflow server with command :'+tensorflow_start_cmd)
os.system(tensorflow_start_cmd)
elif FLAGS.predict is True or FLAGS.predict_stream !=0:
print('### PREDICT MODE, interacting with a tensorflow server ###')
print('If necessary, check the served model behaviors using command line cli : saved_model_cli show --dir path/to/export/model/latest_model/1534610225/ --tag_set serve to get the MODEL_NAME(S)\n to get more details on the target MODEL_NAME, you can then add option --signature_def MODEL_NAME')
usersettings, sessionFolder = loadExperimentsSettings(os.path.join(FLAGS.model_dir,SETTINGSFILE_COPY_NAME), isServingModel=True)
#FIXME errors reported on gRPC: https://github.com/grpc/grpc/issues/13752 ... stay tuned, had to install a specific gpio version (pip install grpcio==1.7.3)
'''server_ready=WaitForServerReady(usersettings.tensorflow_server_address, usersettings.tensorflow_server_port)
if server_ready is False:
raise ValueError('Could not reach tensorflow server')
'''
print('Prediction mode using model : '+FLAGS.model_dir+' with model '+usersettings.model_name)
predictions_dir=os.path.join(FLAGS.model_dir,
'predictions_'+datetime.datetime.now().strftime("%Y-%m-%d--%H:%M:%S"))
os.mkdir(predictions_dir)
os.chdir(predictions_dir)
print('Current working directory = '+os.getcwd())
print('In case of GRPC errors, check codes at https://developers.google.com/maps-booking/reference/grpc-api/status_codes')
do_inference(experiment_settings=usersettings, host=usersettings.tensorflow_server_address,
port=usersettings.tensorflow_server_port,
model_name=usersettings.model_name,
clientIO_InitSpecs={},
concurrency=0,
num_tests=FLAGS.predict_stream)
elif FLAGS.commands is True :
print('Here are some command examples')
print('1. train a model (once the mysettings_1D_experiments.py is set):')
print('-> python experiments_manager.py --usersettings=mysettings_1D_experiments.py')
print('2. start a tensorflow server on the trained/training model :')
print('-> python experiments_manager.py --start_server --model_dir=experiments/1Dsignals_clustering/my_test_2018-01-03--14:40:53')
print('3. interract with the tensorflow server, sending input buffers and receiving answers')
print('-> python experiments_manager.py --predict --model_dir=experiments/1Dsignals_clustering/my_test_2018-01-03--14\:40\:53/')
print('4. restart an interrupted training session')
print('-> python experiments_manager.py --restart_interrupted --model_dir=experiments/1Dsignals_clustering/my_test_2018-01-03--14\:40\:53/')
else:
print('### TRAINING MODE ###')
""" setting up default values from command line """
settings_file=FLAGS.usersettings
""" updating default values if running function from an upper level """
# manage eventual external custom settings and hyperparameters
custom_settings=False
if train_config_script is not None:
print('--> Training from setup file {file} with the following external hyperparameters {params}'.format(file=train_config_script, params=external_hparams))
settings_file=train_config_script
custom_settings=True
if FLAGS.procID is not None:
external_hparams['procID']=FLAGS.procID
if len(external_hparams)>0:
custom_settings=True
if custom_settings:
print('Some custom settings have been specified : training from setup file {file} with the following external hyperparameters {params}'.format(file=train_config_script, params=external_hparams))
settings_file=tools.experiments_settings_surgery.insert_additionnal_hparams(settings_file, external_hparams)
print('-> created a temporary experiments settings file : '+settings_file)
#loading the experiment setup script
usersettings, sessionFolder = loadExperimentsSettings(settings_file,
restart_from_sessionFolder=FLAGS.model_dir,
isServingModel=False)
#update hparams structure with external parameters
usersettings.debug=FLAGS.debug
usersettings.debug_server_addresses=FLAGS.debug_server_addresses
#add additionnal hyperparams coming from an optionnal
if hasattr(usersettings, 'hparams'):
print('adding hypermarameters declared from the experiment settings script:'+str(usersettings.hparams))
#update sessionFolder name string
if not FLAGS.restart_interrupted:
sessionFolder_splits=sessionFolder.split('_')
sessionFolder_addon=''
for key, value in usersettings.hparams.items():
sessionFolder_addon+='_'+key+str(value)
#insert sessionname addons in the original one
sessionFolder=''
for str_ in sessionFolder_splits[:-1]:
sessionFolder+=str_+'_'
sessionFolder=sessionFolder[:-1]#remove the last '_'
sessionFolder+=sessionFolder_addon+'_'+sessionFolder_splits[-1]
usersettings.sessionFolder=sessionFolder
print('Found hparams: '+str(usersettings.hparams))
else:
print('No hparams dictionnary found in the experiment settings file')
print('Experiment session folder : '+sessionFolder)
#deduce the experiments settings copy filename that is versionned in the experiment folder
settings_copy_fullpath=os.path.join(sessionFolder, SETTINGSFILE_COPY_NAME)
#copy settings and model file to the working folder
usersettings.sessionFolder=sessionFolder
if not FLAGS.restart_interrupted:
os.makedirs(sessionFolder)
if hasattr(usersettings, 'model_file'):
shutil.copyfile(usersettings.model_file, os.path.join(sessionFolder, os.path.basename(usersettings.model_file)))
settings_src_file=settings_file
print('Willing to copy {src} to {dst}'.format(src=settings_src_file, dst=settings_copy_fullpath))
shutil.copyfile(settings_src_file, settings_copy_fullpath)
#prepare a config file for model serving
serving_config = configparser.ConfigParser()
serving_config['SERVER'] = { 'host': usersettings.tensorflow_server_address,
'port': usersettings.tensorflow_server_port,
'model_name': usersettings.model_name,
}
with open(os.path.join(sessionFolder, 'model_serving_setup.ini'), 'w') as configfile:
serving_config.write(configfile)
else:
usersettings.recoverFromCheckpoint=True
res, last_exported_model=run_experiment(usersettings)
#refactor result in a single updated dictionnary
experiments_output=res
experiments_output.update({'last_exported_model':last_exported_model, 'sessionFolder':sessionFolder})
return experiments_output
if __name__ == "__main__":
run()
|
import pandas as pd
import numpy as np
import utils
'''
MIT License
Copyright (c) 2020 Faviola Molina - dLab - Fundación Ciencia y Vida
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def readingData(inputFile):
# This function reads the input file. This is a raw file of the GIT repository
# The file contains the history of the data for coronavirus in the US for all counties
df = pd.read_csv(inputFile)
print(df.columns.to_list())
return df
def selectingData(df,source):
# This function reads the list of the counties searched and ordered by C. Valdenegro
# for the state of New York. This file is a modification of the original,
# considering that NYT source consider the NY city as a unit. The FIPS for
# the City of New York was assigned as 99999 for convenience
# (FIPS for this unit are empty from source). Moreover, the FIPS for the
# "Unknown" county is chosen as 90036 for keeping consistency with JH data ("Unnasigned" county)
county = utils.counties(source)
temp = df.loc[df['state'] == 'New York'].copy()
temp.drop(columns = 'state', inplace = True)
aux1 = temp['fips'].loc[temp['county'] == 'Unknown'].fillna(90036)
temp['fips'].loc[temp['county'] == 'Unknown'] = aux1.copy()
aux2 = temp['fips'].loc[temp['county'] == 'New York City'].fillna(99999)
temp['fips'].loc[temp['county'] == 'New York City'] = aux2.copy()
date_list = temp['date'].unique()
print(temp['county'].unique())
# Data in the source is reported for counties and dates with infected cases. Counties
# with no infected cases (in a certain date) does not appeared in the file.
# For consistency and standardization counties with no infected people at a given date
# are filled with zeroes
k = 0
for i in date_list:
t = [i] * len(county)
df_date = pd.DataFrame(data={'date': t})
df = pd.DataFrame(np.zeros((len(county), 2)), columns=['cases', 'deaths'])
cases_county = temp['cases'].loc[temp['date'] == i].to_list()
deaths_county = temp['deaths'].loc[temp['date'] == i].to_list()
county_list = temp['county'].loc[temp['date'] == i].copy()
j = 0
for row in county_list:
idx = county.loc[county['county'] == row].index.values
if idx.size > 0:
df.loc[idx, 'cases'] = cases_county[j]
df.loc[idx, 'deaths'] = deaths_county[j]
j += 1
aux = pd.concat([df_date, county, df.astype(int)], axis=1)
if k == 0:
df_ny = aux.copy()
else:
df_ny = pd.concat([df_ny,aux], axis=0)
k += 1
# The data is concatened in a single file
df_ny.reset_index(inplace = True, drop = True)
return df_ny
def writingData(df,outfile):
# data is written to a single output file
df.to_csv(outfile, index = False)
if __name__ == '__main__':
# Establish the source to read
source = 'NYT'
# provide the URL of the raw file of the github repository to read
df = readingData('https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv')
df_ny = selectingData(df,source)
# provide the name of the output file
writingData(df_ny,'../output/NYTimes/NYTraw_epidemiology_NY_std.csv')
|
from flask import Blueprint
from flask_restful import Api
from .resources import ImageAiResource
from .sentences import SentenceResource, SentenceItemResource
bp = Blueprint("restapi", __name__, url_prefix="/api/v1")
api = Api(bp)
def init_app(app):
api.add_resource(ImageAiResource, "/upload/")
api.add_resource(SentenceResource, "/sentence/")
api.add_resource(SentenceItemResource, "/sentence/<string:id>")
app.register_blueprint(bp)
|
begin_unit
comment|'# Copyright 2015 IBM Corp.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'mock'
newline|'\n'
nl|'\n'
name|'from'
name|'oslo_config'
name|'import'
name|'cfg'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'tests'
op|'.'
name|'functional'
op|'.'
name|'api_sample_tests'
name|'import'
name|'api_sample_base'
newline|'\n'
nl|'\n'
DECL|variable|CONF
name|'CONF'
op|'='
name|'cfg'
op|'.'
name|'CONF'
newline|'\n'
name|'CONF'
op|'.'
name|'import_opt'
op|'('
string|"'osapi_compute_extension'"
op|','
nl|'\n'
string|"'nova.api.openstack.compute.legacy_v2.extensions'"
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|FakeNode
name|'class'
name|'FakeNode'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
name|'uuid'
op|'='
string|"'058d27fa-241b-445a-a386-08c04f96db43'"
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'uuid'
op|'='
name|'uuid'
newline|'\n'
name|'self'
op|'.'
name|'provision_state'
op|'='
string|"'active'"
newline|'\n'
name|'self'
op|'.'
name|'properties'
op|'='
op|'{'
string|"'cpus'"
op|':'
string|"'2'"
op|','
nl|'\n'
string|"'memory_mb'"
op|':'
string|"'1024'"
op|','
nl|'\n'
string|"'local_gb'"
op|':'
string|"'10'"
op|'}'
newline|'\n'
name|'self'
op|'.'
name|'instance_uuid'
op|'='
string|"'1ea4e53e-149a-4f02-9515-590c9fb2315a'"
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|NodeManager
dedent|''
dedent|''
name|'class'
name|'NodeManager'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
DECL|member|list
indent|' '
name|'def'
name|'list'
op|'('
name|'self'
op|','
name|'detail'
op|'='
name|'False'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
name|'FakeNode'
op|'('
op|')'
op|','
name|'FakeNode'
op|'('
string|"'e2025409-f3ce-4d6a-9788-c565cf3b1b1c'"
op|')'
op|']'
newline|'\n'
nl|'\n'
DECL|member|get
dedent|''
name|'def'
name|'get'
op|'('
name|'self'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'FakeNode'
op|'('
name|'id'
op|')'
newline|'\n'
nl|'\n'
DECL|member|list_ports
dedent|''
name|'def'
name|'list_ports'
op|'('
name|'self'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
op|'['
op|']'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|fake_client
dedent|''
dedent|''
name|'class'
name|'fake_client'
op|'('
name|'object'
op|')'
op|':'
newline|'\n'
DECL|variable|node
indent|' '
name|'node'
op|'='
name|'NodeManager'
op|'('
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|BareMetalNodesSampleJsonTest
dedent|''
name|'class'
name|'BareMetalNodesSampleJsonTest'
op|'('
name|'api_sample_base'
op|'.'
name|'ApiSampleTestBaseV21'
op|')'
op|':'
newline|'\n'
DECL|variable|ADMIN_API
indent|' '
name|'ADMIN_API'
op|'='
name|'True'
newline|'\n'
DECL|variable|extension_name
name|'extension_name'
op|'='
string|'"os-baremetal-nodes"'
newline|'\n'
nl|'\n'
DECL|member|_get_flags
name|'def'
name|'_get_flags'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'f'
op|'='
name|'super'
op|'('
name|'BareMetalNodesSampleJsonTest'
op|','
name|'self'
op|')'
op|'.'
name|'_get_flags'
op|'('
op|')'
newline|'\n'
name|'f'
op|'['
string|"'osapi_compute_extension'"
op|']'
op|'='
name|'CONF'
op|'.'
name|'osapi_compute_extension'
op|'['
op|':'
op|']'
newline|'\n'
name|'f'
op|'['
string|"'osapi_compute_extension'"
op|']'
op|'.'
name|'append'
op|'('
string|"'nova.api.openstack.compute.'"
nl|'\n'
string|"'contrib.baremetal_nodes.Baremetal_nodes'"
op|')'
newline|'\n'
name|'return'
name|'f'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|'"nova.api.openstack.compute.baremetal_nodes"'
nl|'\n'
string|'"._get_ironic_client"'
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|'"nova.api.openstack.compute.legacy_v2.contrib.baremetal_nodes"'
nl|'\n'
string|'"._get_ironic_client"'
op|')'
newline|'\n'
DECL|member|test_baremetal_nodes_list
name|'def'
name|'test_baremetal_nodes_list'
op|'('
name|'self'
op|','
name|'mock_get_irc'
op|','
name|'v2_1_mock_get_irc'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mock_get_irc'
op|'.'
name|'return_value'
op|'='
name|'fake_client'
op|'('
op|')'
newline|'\n'
name|'v2_1_mock_get_irc'
op|'.'
name|'return_value'
op|'='
name|'fake_client'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'_do_get'
op|'('
string|"'os-baremetal-nodes'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_response'
op|'('
string|"'baremetal-node-list-resp'"
op|','
op|'{'
op|'}'
op|','
name|'response'
op|','
number|'200'
op|')'
newline|'\n'
nl|'\n'
dedent|''
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|'"nova.api.openstack.compute.baremetal_nodes"'
nl|'\n'
string|'"._get_ironic_client"'
op|')'
newline|'\n'
op|'@'
name|'mock'
op|'.'
name|'patch'
op|'('
string|'"nova.api.openstack.compute.legacy_v2.contrib.baremetal_nodes"'
nl|'\n'
string|'"._get_ironic_client"'
op|')'
newline|'\n'
DECL|member|test_baremetal_nodes_get
name|'def'
name|'test_baremetal_nodes_get'
op|'('
name|'self'
op|','
name|'mock_get_irc'
op|','
name|'v2_1_mock_get_irc'
op|')'
op|':'
newline|'\n'
indent|' '
name|'mock_get_irc'
op|'.'
name|'return_value'
op|'='
name|'fake_client'
op|'('
op|')'
newline|'\n'
name|'v2_1_mock_get_irc'
op|'.'
name|'return_value'
op|'='
name|'fake_client'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'response'
op|'='
name|'self'
op|'.'
name|'_do_get'
op|'('
string|"'os-baremetal-nodes/'"
nl|'\n'
string|"'058d27fa-241b-445a-a386-08c04f96db43'"
op|')'
newline|'\n'
name|'self'
op|'.'
name|'_verify_response'
op|'('
string|"'baremetal-node-get-resp'"
op|','
op|'{'
op|'}'
op|','
name|'response'
op|','
number|'200'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
|
import urllib
from bs4 import BeautifulSoup
import urlparse
import mechanize
import pickle
import re
try:
import sys
if 'threading' in sys.modules:
del sys.modules['threading']
print('threading module loaded before patching!')
print('threading module deleted from sys.modules!\n')
import gevent
from gevent import monkey, pool
monkey.patch_all()
gevent_installed = True
except:
print("Gevent does not installed. Parsing process will be slower.")
gevent_installed = False
class Crawler:
def __init__(self, url, outputfile='sitemap.xml', logfile='error.log', oformat='xml'):
self.url = url
self.logfile = open(logfile, 'a')
self.oformat = oformat
self.outputfile = outputfile
# create lists for the urls in que and visited urls
self.urls = set([url])
self.visited = set([url])
self.exts = ['htm', 'php']
self.allowed_regex = '\.((?!htm)(?!php)\w+)$'
def set_exts(self, exts):
self.exts = exts
def allow_regex(self, regex=None):
if not regex is None:
self.allowed_regex = regex
else:
allowed_regex = ''
for ext in self.exts:
allowed_regex += '(!{})'.format(ext)
self.allowed_regex = '\.({}\w+)$'.format(allowed_regex)
def crawl(self, echo=False, pool_size=1):
self.echo = echo
self.regex = re.compile(self.allowed_regex)
if gevent_installed and pool_size > 1:
self.pool = pool.Pool(pool_size)
self.pool.spawn(self.parse_gevent)
self.pool.join()
else:
while len(self.urls) > 0:
self.parse()
if self.oformat == 'xml':
self.write_xml()
elif self.oformat == 'txt':
self.write_txt()
def parse_gevent(self):
self.parse()
while len(self.urls) > 0 and not self.pool.full():
self.pool.spawn(self.parse_gevent)
def parse(self):
if self.echo:
if not gevent_installed:
print('{} pages parsed :: {} pages in the queue'.format(len(self.visited), len(self.urls)))
else:
print('{} pages parsed :: {} parsing processes :: {} pages in the queue'.format(len(self.visited), len(self.pool), len(self.urls)))
# Set the startingpoint for the spider and initialize
# the a mechanize browser object
if not self.urls:
return
else:
url = self.urls.pop()
br = mechanize.Browser()
try:
response = br.open(url)
if response.code >= 400:
self.errlog("Error {} at url {}".format(response.code, url))
return
for link in br.links():
newurl = urlparse.urljoin(link.base_url, link.url)
#print newurl
if self.is_valid(newurl):
self.visited.update([newurl])
self.urls.update([newurl])
except Exception, e:
self.errlog(e.message)
br.close()
del(br)
def is_valid(self, url):
valid = False
if '#' in url:
url = url[:url.find('#')]
if url in self.visited:
return False
if not self.url in url:
return False
if re.search(self.regex, url):
return False
return True
def errlog(self, msg):
self.logfile.write(msg)
self.logfile.write('\n')
def write_xml(self):
of = open(self.outputfile, 'w')
of.write('<?xml version="1.0" encoding="utf-8"?>\n')
of.write('<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">\n')
url_str = '<url><loc>{}</loc></url>\n'
while self.visited:
of.write(url_str.format(self.visited.pop()))
of.write('</urlset>')
of.close()
def write_txt(self):
of = open(self.outputfile, 'w')
url_str = '{}\n'
while self.visited:
of.write(url_str.format(self.visited.pop()))
of.close() |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-03 09:41
from __future__ import unicode_literals
import company.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to=company.models.company_file_path)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='date created')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='date updated')),
],
),
migrations.CreateModel(
name='Branch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('CO', 'Computer Engineering'), ('ME', 'Mechanical Engineering'), ('CE', 'Civil Engineering'), ('EE', 'Electrical Engineering'), ('CH', 'Chemical Engineering'), ('EC', 'Electronics and Communication Engineering'), ('PHY', 'Physics'), ('CHEM', 'Chemistry'), ('MATH', 'Mathematics'), ('ALL', 'All Branches')], max_length=4)),
('degree', models.CharField(choices=[('BTECH', 'BTech'), ('MTECH', 'MTech'), ('MSC', 'MSc')], default='BTECH', max_length=5)),
],
),
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('slug', models.SlugField()),
('website', models.URLField(blank=True, max_length=255, null=True)),
('about', models.TextField(blank=True, max_length=5000, null=True)),
('perks', models.TextField(blank=True, max_length=5000, null=True)),
('infra_req', models.TextField(blank=True, max_length=5000, null=True)),
('other', models.TextField(blank=True, max_length=5000, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='date created')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='date updated')),
],
),
migrations.CreateModel(
name='CRPDate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datatype', models.CharField(choices=[('MON', 'Month'), ('WOM', 'Week of Month'), ('DAT', 'Date')], max_length=3)),
('month', models.IntegerField(blank=True, null=True)),
('date', models.DateField(blank=True, null=True)),
('week_number', models.CharField(blank=True, choices=[('F', 'First'), ('S', 'Second'), ('T', 'Third'), ('L', 'Last')], max_length=1, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='date created')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='date updated')),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('designation', models.CharField(max_length=255, unique=True)),
('description', models.TextField(blank=True, max_length=5000, null=True)),
('requirements', models.TextField(blank=True, max_length=5000, null=True)),
('eligibility_criteria', models.TextField(blank=True, max_length=5000, null=True)),
('ctc', models.DecimalField(blank=True, decimal_places=2, max_digits=4)),
('ctc_details', models.TextField(blank=True, max_length=5000, null=True)),
('bond_details', models.CharField(blank=True, max_length=255, null=True, unique=True)),
('number_of_selections', models.IntegerField(blank=True, null=True)),
('other', models.TextField(blank=True, max_length=5000, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='date created')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='date updated')),
],
),
migrations.CreateModel(
name='JobLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(max_length=255)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='job_location', to='company.Job')),
],
),
migrations.CreateModel(
name='JobType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('job_domain', models.CharField(choices=[('C', 'Core'), ('N', 'Non-Core')], max_length=1)),
('job_type', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='PlacementCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('ctc_range', models.CharField(max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='SelectionProcedure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('procedure', models.CharField(max_length=255)),
],
),
migrations.AlterUniqueTogether(
name='jobtype',
unique_together=set([('job_domain', 'job_type')]),
),
migrations.AddField(
model_name='job',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='job', to='company.PlacementCategory'),
),
migrations.AddField(
model_name='job',
name='company',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='job', to='company.Company'),
),
migrations.AddField(
model_name='job',
name='eligible_branches',
field=models.ManyToManyField(to='company.Branch'),
),
migrations.AddField(
model_name='job',
name='job_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='job', to='company.JobType'),
),
migrations.AddField(
model_name='job',
name='selection_procedure',
field=models.ManyToManyField(to='company.SelectionProcedure'),
),
migrations.AddField(
model_name='crpdate',
name='job',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='crpdate', to='company.Job'),
),
migrations.AlterUniqueTogether(
name='branch',
unique_together=set([('name', 'degree')]),
),
migrations.AddField(
model_name='attachment',
name='job',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attachment', to='company.Job'),
),
migrations.AlterUniqueTogether(
name='joblocation',
unique_together=set([('job', 'location')]),
),
]
|
try:
## Per semplificarci la vita con l'html
from bs4 import BeautifulSoup
except:
print("Devi installare bs4 per avviare questo programma.\nPer installarlo: pip install -U bs4")
try:
## Per ricevere il codice sorgente delle pagine
import requests
except:
print("Devi installare requests per avviare questo programma.\nPer installarlo: pip install -U requests")
## Per le varie casualità
import random
## Per la gestione dei file csv
import csv
## Per leggere il dataset dei nomi
import pandas as pd
## Per rimuovere le consonanti
import re
## Classe che gestisce il tutto
class gestore:
def __init__(self):
## Preparo i dati
self.cont = {"parole": set(), "cognomi": set(), "nomi" : set()}
self.aggiungi_malattie()
## Preparo il dataset con i nomi
self.getNomi_pre()
## Prendo tutti i reparti
self.getReparti()
def getNomi_pre(self):
self.nomi = pd.read_csv('https://query.data.world/s/5xdo7ixiohmuq4yj5sdslgolah6qxh')
self.nomi = self.nomi.replace("boy","maschio").replace("girl","femmina")
## Aggiungo le parole, i cognomi e i nomi
def aggiungi(self):
for i in self.cont.keys():
## Fà riferimento a una funzione (quella sotto)
self.setting(i)
def aggiungi_malattie(self):
self.cont["malattie"] = []
with open("./dataset/malattie.txt", "r") as rd:
for i in rd.read().split("\n"):
self.cont["malattie"].append(i)
self.coronavirus = "La malattia da coronavirus (COVID-19) è una malattia infettiva causata da un nuovo virus. Il virus causa una malattia respiratoria (come l'influenza) con sintomi quali tosse febbre e nei casi più gravi difficoltà respiratorie. Puoi proteggerti lavandoti spesso le mani evitando di toccarti il viso ed evitando il contatto ravvicinato con persone che non stanno bene (mantieni un metro di distanza)."
## Prende la parola, la cerca nel dataset per poi metterla tutta nel set
def setting(self, parola):
if parola != "malattie":
## Aggiungo tutti i vari dataset ai rispettivi dizionari
with open("./dataset/{}.txt".format(parola), "r") as rd:
## Apro, leggo, splitto
for i in rd.read().split():
## Aggiungo
self.cont[parola].add(i)
## Ritorna una stringa, un numero di telefono univoco
def nuovoNum(self):
## Prendo il valore
numero = self.getNumero()
## Continuo finchè non esiste
while (self.numero_telefono.__contains__(numero)):
numero = self.getNumero()
## Aggiungo
self.numero_telefono.add(numero)
## ritorno
return list(self.numero_telefono)[-1]
## Crea i file medici e pazienti
def creaFile(self):
## Creo la lista univoca vuota
self.numero_telefono = set()
print("Inizia creazione file")
## Apro il file
## OLtre ai medici ci metto anche i pazienti però in un file separato
## Inizio con lo settings dei pazienti
file_paz = open("./dataset/pazienti.csv", "w")
writer_paz = csv.writer(file_paz, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer_paz.writerow(["Nome", "Cognome", "sesso", "Data di nascita", "padiglione", "reparto", "data ricovero",
"numero della stanza", "numero del letto", "descrizione malattia", "dimissione/decesso",
"tessera sanitaria"])
with open("./dataset/medici.csv", "w") as filecsv:
## Settings predefiniti
writer = csv.writer(filecsv, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
## Prima riga
writer.writerow(["Nome", "Cognome", "Ruolo", "padiglione", "reparto", "piano reparto", "Numero di telefono",
"Abitazione", "data di nascita", "tessera_sanitaria"])
## Trasformo tutti i dataset da set a lista
for i in self.cont.keys():
if not isinstance(i, list):
self.cont[i] = list(self.cont[i])
## Liste dei nomi e congomi
nomi = []
cognomi = []
## Contatore di dove siamo arrivati
cont = 0
## Estraggo tutti i nomi e li divido
for i in self.Nomi:
j = 1
if i[1].split()[1] == "dott.":
j = 2
nomi.append(i[1].split()[j])
cognomi.append(i[1].split()[-1])
## Itero in una lista numerata, divido in piani
conto = 0
for posto, i in enumerate(self.equip_medica):
print("Scrittura del piano {0}".format(chr(posto+97)) )
## Itero in una lista numerata, divido in reparti
for cont_,j in enumerate(i):
## Scrivo i pazienti
self.scrivi_paziente(writer_paz,str(chr(posto + 97)), self.Nomi[conto][0])
self.writer_main(writer, nomi[conto],cognomi[conto],"Capo reparto",str(chr(posto + 97)), self.Nomi[conto][0],self.Nomi[conto][-1])
## Itero dentro al reparto
for k in j:
if k.__contains__(","):
for h in k.split(","):
try:
self.writer_main(writer, h.split()[0], h.split()[1], "Equip",
str(chr(posto + 97)), self.Nomi[conto][0], self.Nomi[conto][-1],)
except:
pass
break
else:
self.writer_main(writer, k.split()[0], k.split()[1], "Equip",
str(chr(posto + 97)), self.Nomi[conto][0], self.Nomi[conto][-1])
conto += 1
print("Piano finito")
def getNome(self):
return self.nomi.sample().__array__()[0]
def scrivi_paziente(self, writer, padiglione, reparto):
malattia = ""
for i in range(random.randint(5, 20)):
## Scriviamo
nomi_g = self.getNome()
cognome = self.cont["cognomi"][random.randint(0, len(self.cont["cognomi"]) - 1)]
data_nascita = self.getData()
## Diamo 4/5 di possibilità che sia il coronavirus
if random.randint(0,4) == 0:
malattia = self.cont["malattie"][random.randint(0, len(self.cont["malattie"]) - 1)]
else:
malattia = self.coronavirus
writer.writerow([nomi_g[0],
cognome,
nomi_g[1],
data_nascita,
padiglione,
reparto,
self.getDataRicovero(),
random.randint(0, 40),
random.randint(0, 10),
malattia.replace("|",""),
self.getDataRecDec(),
self.getTesseraSanitaria(nomi_g[0], cognome, nomi_g[1], data_nascita)])
def getTesseraSanitaria(self,nome,cognome,sesso,data_nascita):
if sesso == "maschio":
sesso = "m"
else:
sesso = "f"
return rem_vowel(nome).lower() + rem_vowel(cognome).lower() + sesso + data_nascita.replace("/","") + str(random.randint(0,100))
## Gestisce la scrittura del primo caso
def writer_main(self, writer, nome, cognome, posizione, piano, reparto, piano_reparto):
data_nascita = self.getData()
sesso = "m"
if random.randint(0,1) == 0:
sesso = "f"
writer.writerow([nome, cognome, posizione, piano, reparto,
piano_reparto, self.nuovoNum(), self.getVia(), data_nascita, self.getTesseraSanitaria(nome,cognome,sesso,data_nascita)])
## Serve per ottenere la via del medico
def getVia(self):
return "via " + self.cont["nomi"][random.randint(0, len(self.cont["nomi"]) - 1)] + " n^" + str(random.randint(0, 100))
## Serve per ottenere il numero di un medico
def getNumero(self):
return "39" + str(random.randint(3000000000, 3990000000))
## Serve per ottenere la data di nascita del medico
def getData(self):
return str(random.randint(1, 30)) + "/" + str(random.randint(1, 12)) + "/" + str(random.randint(1980, 2000))
def getDataRicovero(self):
return str(random.randint(1, 30)) + "/" + str(random.randint(1, 12)) + "/" + str(random.randint(2010, 2019))
def getDataRecDec(self):
out = ""
if random.randint(0,1) == 0:
out = "decesso "
else:
out = "dimisso "
return out + str(random.randint(1, 30)) + "/" + str(random.randint(1, 12)) + "/2020"
## Prendo tutti i reparti direttamente dal sito ufficiale di novara
def getReparti(self):
## Link base
testo_base = "http://www.maggioreosp.novara.it/attivita-assistenziale/strutture-sanitarie/elenco-delle-strutture-sanitarie/padiglione-"
## Contenitore di: capo reparto, piano, reparto
self.Nomi = []
## Contenitore dell'equip medica
self.equip_medica = []
## Vari piani. il -2 è il piano complesso
piani = {"terzo": 3, "seminterrato": -1, "terra": 0, "terreno": 0, "secondo": 2, "quarto": 4, "primo": 1,
"rialzato": 0}
l = 0
while (True):
## A = ascii 97
lettera = chr(l + 97)
## Faccio richesta
response = requests.get(testo_base + lettera)
## Trasformo
soup = BeautifulSoup(response.content, "html.parser")
## Controllo se il sito esiste
if soup.title.getText().__contains__("Pagina non trovata"):
break
else:
## Se si allora dì
print("Controllo piano {0}".format(lettera))
## E dividi in sezioni
prova = soup.findAll("div", {"class", "siteorigin-widget-tinymce textwidget"})
## la disposizione sarà: Reparto - Nome capo reparto - piani
for i in prova[2:]:
## Il testo effettivo
testo = i.getText().split("\n")
## Piano deffault
piano = str(random.randint(-1,4))
## Ricavo il piano
for j in piani.keys():
## Se contiene la key
if testo[2].__contains__(j):
## Scrivi il piano e esci
piano = piani[j]
break
## Uso try except siccome non voglio rendere ancora più complicato il codice.
## Il reparto "mensa" non contiene nessun gestore, allora gliene dò uno a mia scelta
try:
## Qui aggiungo alla lista Nomi la seguente cosa:
## Il primo li aggiungo semplicemente il reparto
## Prendo il testo, li tolgo tutti gli spazi vuoti, una volta fatto prendo la parte che ci interessa cioè il capo reparto.
## Il piano
self.Nomi.append([testo[1], list(filter(lambda val: val.strip().__len__() != 0, testo))[2].split(":")[1], piano])
## Nel caso sia la mensa
except IndexError:
self.Nomi.append([testo[1], " Dottor Lucreazia Grazie", piano])
## Estraggo l'equipe medica.
## per ogni link che abbiamo, te invialo a get_equip e poi aggiungilo alla lista. Bisogna togliere i primi due valori siccome non ci interessano
self.equip_medica.append([self.get_equip(link) for link in [exc.find('a')["href"] for exc in prova[2:]]])
l += 1
## La funzione prende come un input un link e restituisce una lista.
def get_equip(self, link):
## La lista che restituiremo
equip = []
## Faccio la richiesta
response = requests.get(link)
## E' tutto in try e expect siccome potrebbe essere che non abbiamo nessuna equip. Per come è fatto, non possiamo
## Fare controlli. L'unica maniera è un try except
try:
## Prendo il testo che contiene l'equipe
prima_parte = \
BeautifulSoup(response.content, "html.parser").find(id="accordion-content-equipe-%c2%bb").contents[
1].getText()
## Se contiene i : allora vuol dire che è semplice come struttura
if prima_parte.__contains__(":"):
## Per non fare un codice stra lungo ( e siccome sono solamente 2 le pagine quelle diverse ) faccio questa cosa dove le toglie i due casi speciali
if prima_parte.__len__() < 3000 and prima_parte.__contains__("Antonio RAMPONI") == False:
## Nel caso normale,
## Prendo la prima parte, la divido in parti a seconda del :, prendo ciò che ci interessa e la ridivido per \n. Una volta questo,
## Itero ogni parte della lista e ci toglio le cose in più per poi toglierli le celle vuote e le varie eccezioni.
## Lo rendo una stringa per poi dividerlo ogni ,
equip = (list(filter( lambda val: val.split().__len__() != 1, list(filter(
lambda val: val.__len__() != 0 and val != "Struttura semplice" and val != "Strutture semplici" and val != "Coordinatore Infermieristico", list(
map(lambda val: val.strip(), prima_parte.split(":")[1].split("\n"))))))))
## In questi 2 casi, li aggiungo "manualmente"
elif prima_parte.__contains__("Antonio RAMPONI"):
equip = ["Cristiana BOZZOLA", "Francesca FOTI", "Angela GIACALONE", "Monica LEUTNER", "Emanuela UGLIETTI", "Guido VALENTE"]
else:
equip = ["Patrizia NOTARI", "Matteo VIDALI", "Vessellina KRUOMOVA", "Giuseppina ANTONINI", "Ilaria CRESPI", "Luisa DI TRAPANI", "Lucia FRANCHINI", "Roberta Rolla", "Marco Bagnati", "Patrizia PERGOLONI"]
else:
## Nel caso non abbia i :, allora ce la caviamo semplicemente così
equip = \
prima_parte.strip().split(",")
except AttributeError:
pass
## Per risolvere un errore
if equip.__len__() == 1 and equip[0].__len__() > 20:
equip = equip[0].split(',')
## Chiudo la connessione
response.close()
## Ritorno l'array
return equip
def rem_vowel(string):
return (re.sub("[aeiouAEIOU]","",string))
def main():
## Creo le varie variabili
main_class = gestore()
## Aggiungo i vari dati
main_class.aggiungi()
## Creo i file
main_class.creaFile()
## Avvio solamente se fatto da chiamata diretta
if __name__ == "__main__":
main()
|
from . import oracle
Oracle = oracle.Oracle
__version__ = "1.0.1"
__all__ = ['oracle']
|
# Generated by Django 3.0 on 2019-12-25 18:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trips', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='trip',
name='terminal',
field=models.CharField(choices=[('G', 'International Terminal G'), ('A', 'International Terminal A'), ('2D', 'Terminal 2'), ('1B', 'Harvey Milk Terminal 1B'), ('1C', 'Harvey Milk Terminal 1C'), ('3', 'Terminal 3')], help_text='Check https://www.flysfo.com/flight-info/airlines-at-sfo to see which Terminal you need to check in at for your flight.', max_length=2),
),
]
|
#!/bin/env python
# -*- coding: utf-8 -*-
from io import open
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
# From https://docs.pytest.org/en/latest/goodpractices.html#manual-integration
# See if we could instead use pytest-runner
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
setup(
name="lgr-core",
version='2.0.0',
author='Viagénie and Wil Tan',
author_email='support@viagenie.ca',
description="API for manipulating Label Generation Rules",
long_description=open('README.md', encoding='utf-8').read(),
license="TBD",
install_requires=['lxml', 'language-tags', 'munidata', 'picu'],
packages=find_packages(),
scripts=[
'tools/lgr_cli.py',
'tools/lgr_validate.py',
'tools/rfc4290_dump.py',
'tools/one_per_line_dump.py',
'tools/rfc3743_dump.py',
'tools/xml_dump.py',
'tools/make_idna_repertoire.py',
'tools/lgr_annotate.py',
'tools/lgr_collision.py',
'tools/lgr_compare.py',
'tools/lgr_diff_collisions.py',
'tools/lgr_merge_set.py',
'tools/lgr_cross_script_variants.py',
'tools/lgr_harmonize'
],
tests_require=['pytest'],
cmdclass={'test': PyTest},
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries'
]
)
|
"""Generated client library for dataproc version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.dataproc.v1 import dataproc_v1_messages as messages
class DataprocV1(base_api.BaseApiClient):
"""Generated client library for service dataproc version v1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://dataproc.googleapis.com/'
MTLS_BASE_URL = 'https://dataproc.mtls.googleapis.com/'
_PACKAGE = 'dataproc'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'DataprocV1'
_URL_VERSION = 'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new dataproc handle."""
url = url or self.BASE_URL
super(DataprocV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_locations_autoscalingPolicies = self.ProjectsLocationsAutoscalingPoliciesService(self)
self.projects_locations_batches = self.ProjectsLocationsBatchesService(self)
self.projects_locations_sessions = self.ProjectsLocationsSessionsService(self)
self.projects_locations_workflowTemplates = self.ProjectsLocationsWorkflowTemplatesService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects_regions_autoscalingPolicies = self.ProjectsRegionsAutoscalingPoliciesService(self)
self.projects_regions_clusters = self.ProjectsRegionsClustersService(self)
self.projects_regions_jobs = self.ProjectsRegionsJobsService(self)
self.projects_regions_operations = self.ProjectsRegionsOperationsService(self)
self.projects_regions_workflowTemplates = self.ProjectsRegionsWorkflowTemplatesService(self)
self.projects_regions = self.ProjectsRegionsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsAutoscalingPoliciesService(base_api.BaseApiService):
"""Service class for the projects_locations_autoscalingPolicies resource."""
_NAME = 'projects_locations_autoscalingPolicies'
def __init__(self, client):
super(DataprocV1.ProjectsLocationsAutoscalingPoliciesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates new autoscaling policy.
Args:
request: (DataprocProjectsLocationsAutoscalingPoliciesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AutoscalingPolicy) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies',
http_method='POST',
method_id='dataproc.projects.locations.autoscalingPolicies.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/autoscalingPolicies',
request_field='autoscalingPolicy',
request_type_name='DataprocProjectsLocationsAutoscalingPoliciesCreateRequest',
response_type_name='AutoscalingPolicy',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes an autoscaling policy. It is an error to delete an autoscaling policy that is in use by one or more clusters.
Args:
request: (DataprocProjectsLocationsAutoscalingPoliciesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies/{autoscalingPoliciesId}',
http_method='DELETE',
method_id='dataproc.projects.locations.autoscalingPolicies.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsLocationsAutoscalingPoliciesDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves autoscaling policy.
Args:
request: (DataprocProjectsLocationsAutoscalingPoliciesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AutoscalingPolicy) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies/{autoscalingPoliciesId}',
http_method='GET',
method_id='dataproc.projects.locations.autoscalingPolicies.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsLocationsAutoscalingPoliciesGetRequest',
response_type_name='AutoscalingPolicy',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (DataprocProjectsLocationsAutoscalingPoliciesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies/{autoscalingPoliciesId}:getIamPolicy',
http_method='POST',
method_id='dataproc.projects.locations.autoscalingPolicies.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:getIamPolicy',
request_field='getIamPolicyRequest',
request_type_name='DataprocProjectsLocationsAutoscalingPoliciesGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists autoscaling policies in the project.
Args:
request: (DataprocProjectsLocationsAutoscalingPoliciesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListAutoscalingPoliciesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies',
http_method='GET',
method_id='dataproc.projects.locations.autoscalingPolicies.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v1/{+parent}/autoscalingPolicies',
request_field='',
request_type_name='DataprocProjectsLocationsAutoscalingPoliciesListRequest',
response_type_name='ListAutoscalingPoliciesResponse',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.
Args:
request: (DataprocProjectsLocationsAutoscalingPoliciesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy',
http_method='POST',
method_id='dataproc.projects.locations.autoscalingPolicies.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='DataprocProjectsLocationsAutoscalingPoliciesSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (DataprocProjectsLocationsAutoscalingPoliciesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies/{autoscalingPoliciesId}:testIamPermissions',
http_method='POST',
method_id='dataproc.projects.locations.autoscalingPolicies.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='DataprocProjectsLocationsAutoscalingPoliciesTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates (replaces) autoscaling policy.Disabled check for update_mask, because all updates will be full replacements.
Args:
request: (AutoscalingPolicy) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AutoscalingPolicy) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/autoscalingPolicies/{autoscalingPoliciesId}',
http_method='PUT',
method_id='dataproc.projects.locations.autoscalingPolicies.update',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='<request>',
request_type_name='AutoscalingPolicy',
response_type_name='AutoscalingPolicy',
supports_download=False,
)
class ProjectsLocationsBatchesService(base_api.BaseApiService):
"""Service class for the projects_locations_batches resource."""
_NAME = 'projects_locations_batches'
def __init__(self, client):
super(DataprocV1.ProjectsLocationsBatchesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a batch workload that executes asynchronously.
Args:
request: (DataprocProjectsLocationsBatchesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/batches',
http_method='POST',
method_id='dataproc.projects.locations.batches.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['batchId', 'requestId'],
relative_path='v1/{+parent}/batches',
request_field='batch',
request_type_name='DataprocProjectsLocationsBatchesCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes the batch workload resource. If the batch is not in terminal state, the delete fails and the response returns FAILED_PRECONDITION.
Args:
request: (DataprocProjectsLocationsBatchesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}',
http_method='DELETE',
method_id='dataproc.projects.locations.batches.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsLocationsBatchesDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the batch workload resource representation.
Args:
request: (DataprocProjectsLocationsBatchesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Batch) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/batches/{batchesId}',
http_method='GET',
method_id='dataproc.projects.locations.batches.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsLocationsBatchesGetRequest',
response_type_name='Batch',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists batch workloads.
Args:
request: (DataprocProjectsLocationsBatchesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListBatchesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/batches',
http_method='GET',
method_id='dataproc.projects.locations.batches.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+parent}/batches',
request_field='',
request_type_name='DataprocProjectsLocationsBatchesListRequest',
response_type_name='ListBatchesResponse',
supports_download=False,
)
class ProjectsLocationsSessionsService(base_api.BaseApiService):
"""Service class for the projects_locations_sessions resource."""
_NAME = 'projects_locations_sessions'
def __init__(self, client):
super(DataprocV1.ProjectsLocationsSessionsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Create an interactive session asynchronously.
Args:
request: (DataprocProjectsLocationsSessionsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/sessions',
http_method='POST',
method_id='dataproc.projects.locations.sessions.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['requestId', 'sessionId'],
relative_path='v1/{+parent}/sessions',
request_field='session',
request_type_name='DataprocProjectsLocationsSessionsCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes the interactive session resource. If the session is not in terminal state, it will be terminated and deleted afterwards.
Args:
request: (DataprocProjectsLocationsSessionsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}',
http_method='DELETE',
method_id='dataproc.projects.locations.sessions.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsLocationsSessionsDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the resource representation for an interactive session.
Args:
request: (DataprocProjectsLocationsSessionsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Session) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}',
http_method='GET',
method_id='dataproc.projects.locations.sessions.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsLocationsSessionsGetRequest',
response_type_name='Session',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists interactive sessions.
Args:
request: (DataprocProjectsLocationsSessionsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListSessionsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/sessions',
http_method='GET',
method_id='dataproc.projects.locations.sessions.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+parent}/sessions',
request_field='',
request_type_name='DataprocProjectsLocationsSessionsListRequest',
response_type_name='ListSessionsResponse',
supports_download=False,
)
def Terminate(self, request, global_params=None):
r"""Terminates the interactive session.
Args:
request: (DataprocProjectsLocationsSessionsTerminateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Terminate')
return self._RunMethod(
config, request, global_params=global_params)
Terminate.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/sessions/{sessionsId}:terminate',
http_method='POST',
method_id='dataproc.projects.locations.sessions.terminate',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:terminate',
request_field='terminateSessionRequest',
request_type_name='DataprocProjectsLocationsSessionsTerminateRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsWorkflowTemplatesService(base_api.BaseApiService):
"""Service class for the projects_locations_workflowTemplates resource."""
_NAME = 'projects_locations_workflowTemplates'
def __init__(self, client):
super(DataprocV1.ProjectsLocationsWorkflowTemplatesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates new workflow template.
Args:
request: (DataprocProjectsLocationsWorkflowTemplatesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(WorkflowTemplate) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates',
http_method='POST',
method_id='dataproc.projects.locations.workflowTemplates.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/workflowTemplates',
request_field='workflowTemplate',
request_type_name='DataprocProjectsLocationsWorkflowTemplatesCreateRequest',
response_type_name='WorkflowTemplate',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a workflow template. It does not cancel in-progress workflows.
Args:
request: (DataprocProjectsLocationsWorkflowTemplatesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}',
http_method='DELETE',
method_id='dataproc.projects.locations.workflowTemplates.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['version'],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsLocationsWorkflowTemplatesDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter.
Args:
request: (DataprocProjectsLocationsWorkflowTemplatesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(WorkflowTemplate) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}',
http_method='GET',
method_id='dataproc.projects.locations.workflowTemplates.get',
ordered_params=['name'],
path_params=['name'],
query_params=['version'],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsLocationsWorkflowTemplatesGetRequest',
response_type_name='WorkflowTemplate',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (DataprocProjectsLocationsWorkflowTemplatesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:getIamPolicy',
http_method='POST',
method_id='dataproc.projects.locations.workflowTemplates.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:getIamPolicy',
request_field='getIamPolicyRequest',
request_type_name='DataprocProjectsLocationsWorkflowTemplatesGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def Instantiate(self, request, global_params=None):
r"""Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.
Args:
request: (DataprocProjectsLocationsWorkflowTemplatesInstantiateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Instantiate')
return self._RunMethod(
config, request, global_params=global_params)
Instantiate.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:instantiate',
http_method='POST',
method_id='dataproc.projects.locations.workflowTemplates.instantiate',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:instantiate',
request_field='instantiateWorkflowTemplateRequest',
request_type_name='DataprocProjectsLocationsWorkflowTemplatesInstantiateRequest',
response_type_name='Operation',
supports_download=False,
)
def InstantiateInline(self, request, global_params=None):
r"""Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.
Args:
request: (DataprocProjectsLocationsWorkflowTemplatesInstantiateInlineRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('InstantiateInline')
return self._RunMethod(
config, request, global_params=global_params)
InstantiateInline.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates:instantiateInline',
http_method='POST',
method_id='dataproc.projects.locations.workflowTemplates.instantiateInline',
ordered_params=['parent'],
path_params=['parent'],
query_params=['requestId'],
relative_path='v1/{+parent}/workflowTemplates:instantiateInline',
request_field='workflowTemplate',
request_type_name='DataprocProjectsLocationsWorkflowTemplatesInstantiateInlineRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists workflows that match the specified filter in the request.
Args:
request: (DataprocProjectsLocationsWorkflowTemplatesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListWorkflowTemplatesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates',
http_method='GET',
method_id='dataproc.projects.locations.workflowTemplates.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v1/{+parent}/workflowTemplates',
request_field='',
request_type_name='DataprocProjectsLocationsWorkflowTemplatesListRequest',
response_type_name='ListWorkflowTemplatesResponse',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.
Args:
request: (DataprocProjectsLocationsWorkflowTemplatesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy',
http_method='POST',
method_id='dataproc.projects.locations.workflowTemplates.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='DataprocProjectsLocationsWorkflowTemplatesSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (DataprocProjectsLocationsWorkflowTemplatesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}:testIamPermissions',
http_method='POST',
method_id='dataproc.projects.locations.workflowTemplates.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='DataprocProjectsLocationsWorkflowTemplatesTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates (replaces) workflow template. The updated template must contain version that matches the current server version.
Args:
request: (WorkflowTemplate) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(WorkflowTemplate) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}/workflowTemplates/{workflowTemplatesId}',
http_method='PUT',
method_id='dataproc.projects.locations.workflowTemplates.update',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='<request>',
request_type_name='WorkflowTemplate',
response_type_name='WorkflowTemplate',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(DataprocV1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
class ProjectsRegionsAutoscalingPoliciesService(base_api.BaseApiService):
"""Service class for the projects_regions_autoscalingPolicies resource."""
_NAME = 'projects_regions_autoscalingPolicies'
def __init__(self, client):
super(DataprocV1.ProjectsRegionsAutoscalingPoliciesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates new autoscaling policy.
Args:
request: (DataprocProjectsRegionsAutoscalingPoliciesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AutoscalingPolicy) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies',
http_method='POST',
method_id='dataproc.projects.regions.autoscalingPolicies.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/autoscalingPolicies',
request_field='autoscalingPolicy',
request_type_name='DataprocProjectsRegionsAutoscalingPoliciesCreateRequest',
response_type_name='AutoscalingPolicy',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes an autoscaling policy. It is an error to delete an autoscaling policy that is in use by one or more clusters.
Args:
request: (DataprocProjectsRegionsAutoscalingPoliciesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}',
http_method='DELETE',
method_id='dataproc.projects.regions.autoscalingPolicies.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsRegionsAutoscalingPoliciesDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves autoscaling policy.
Args:
request: (DataprocProjectsRegionsAutoscalingPoliciesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AutoscalingPolicy) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}',
http_method='GET',
method_id='dataproc.projects.regions.autoscalingPolicies.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsRegionsAutoscalingPoliciesGetRequest',
response_type_name='AutoscalingPolicy',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (DataprocProjectsRegionsAutoscalingPoliciesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:getIamPolicy',
http_method='POST',
method_id='dataproc.projects.regions.autoscalingPolicies.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:getIamPolicy',
request_field='getIamPolicyRequest',
request_type_name='DataprocProjectsRegionsAutoscalingPoliciesGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists autoscaling policies in the project.
Args:
request: (DataprocProjectsRegionsAutoscalingPoliciesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListAutoscalingPoliciesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies',
http_method='GET',
method_id='dataproc.projects.regions.autoscalingPolicies.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v1/{+parent}/autoscalingPolicies',
request_field='',
request_type_name='DataprocProjectsRegionsAutoscalingPoliciesListRequest',
response_type_name='ListAutoscalingPoliciesResponse',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.
Args:
request: (DataprocProjectsRegionsAutoscalingPoliciesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:setIamPolicy',
http_method='POST',
method_id='dataproc.projects.regions.autoscalingPolicies.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='DataprocProjectsRegionsAutoscalingPoliciesSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (DataprocProjectsRegionsAutoscalingPoliciesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}:testIamPermissions',
http_method='POST',
method_id='dataproc.projects.regions.autoscalingPolicies.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='DataprocProjectsRegionsAutoscalingPoliciesTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates (replaces) autoscaling policy.Disabled check for update_mask, because all updates will be full replacements.
Args:
request: (AutoscalingPolicy) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(AutoscalingPolicy) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/autoscalingPolicies/{autoscalingPoliciesId}',
http_method='PUT',
method_id='dataproc.projects.regions.autoscalingPolicies.update',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='<request>',
request_type_name='AutoscalingPolicy',
response_type_name='AutoscalingPolicy',
supports_download=False,
)
class ProjectsRegionsClustersService(base_api.BaseApiService):
"""Service class for the projects_regions_clusters resource."""
_NAME = 'projects_regions_clusters'
def __init__(self, client):
super(DataprocV1.ProjectsRegionsClustersService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
Args:
request: (DataprocProjectsRegionsClustersCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method='POST',
method_id='dataproc.projects.regions.clusters.create',
ordered_params=['projectId', 'region'],
path_params=['projectId', 'region'],
query_params=['actionOnFailedPrimaryWorkers', 'requestId'],
relative_path='v1/projects/{projectId}/regions/{region}/clusters',
request_field='cluster',
request_type_name='DataprocProjectsRegionsClustersCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
Args:
request: (DataprocProjectsRegionsClustersDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method='DELETE',
method_id='dataproc.projects.regions.clusters.delete',
ordered_params=['projectId', 'region', 'clusterName'],
path_params=['clusterName', 'projectId', 'region'],
query_params=['clusterUuid', 'requestId'],
relative_path='v1/projects/{projectId}/regions/{region}/clusters/{clusterName}',
request_field='',
request_type_name='DataprocProjectsRegionsClustersDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def Diagnose(self, request, global_params=None):
r"""Gets cluster diagnostic information. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). After the operation completes, Operation.response contains DiagnoseClusterResults (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
Args:
request: (DataprocProjectsRegionsClustersDiagnoseRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Diagnose')
return self._RunMethod(
config, request, global_params=global_params)
Diagnose.method_config = lambda: base_api.ApiMethodInfo(
http_method='POST',
method_id='dataproc.projects.regions.clusters.diagnose',
ordered_params=['projectId', 'region', 'clusterName'],
path_params=['clusterName', 'projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose',
request_field='diagnoseClusterRequest',
request_type_name='DataprocProjectsRegionsClustersDiagnoseRequest',
response_type_name='Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the resource representation for a cluster in a project.
Args:
request: (DataprocProjectsRegionsClustersGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Cluster) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method='GET',
method_id='dataproc.projects.regions.clusters.get',
ordered_params=['projectId', 'region', 'clusterName'],
path_params=['clusterName', 'projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/clusters/{clusterName}',
request_field='',
request_type_name='DataprocProjectsRegionsClustersGetRequest',
response_type_name='Cluster',
supports_download=False,
)
def GetClusterAsTemplate(self, request, global_params=None):
r"""Exports a template for a cluster in a project that can be used in future CreateCluster requests.
Args:
request: (DataprocProjectsRegionsClustersGetClusterAsTemplateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Cluster) The response message.
"""
config = self.GetMethodConfig('GetClusterAsTemplate')
return self._RunMethod(
config, request, global_params=global_params)
GetClusterAsTemplate.method_config = lambda: base_api.ApiMethodInfo(
http_method='GET',
method_id='dataproc.projects.regions.clusters.getClusterAsTemplate',
ordered_params=['projectId', 'region', 'clusterName'],
path_params=['clusterName', 'projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:getClusterAsTemplate',
request_field='',
request_type_name='DataprocProjectsRegionsClustersGetClusterAsTemplateRequest',
response_type_name='Cluster',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (DataprocProjectsRegionsClustersGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:getIamPolicy',
http_method='POST',
method_id='dataproc.projects.regions.clusters.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:getIamPolicy',
request_field='getIamPolicyRequest',
request_type_name='DataprocProjectsRegionsClustersGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def InjectCredentials(self, request, global_params=None):
r"""Inject encrypted credentials into all of the VMs in a cluster.The target cluster must be a personal auth cluster assigned to the user who is issuing the RPC.
Args:
request: (DataprocProjectsRegionsClustersInjectCredentialsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('InjectCredentials')
return self._RunMethod(
config, request, global_params=global_params)
InjectCredentials.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:injectCredentials',
http_method='POST',
method_id='dataproc.projects.regions.clusters.injectCredentials',
ordered_params=['project', 'region', 'cluster'],
path_params=['cluster', 'project', 'region'],
query_params=[],
relative_path='v1/{+project}/{+region}/{+cluster}:injectCredentials',
request_field='injectCredentialsRequest',
request_type_name='DataprocProjectsRegionsClustersInjectCredentialsRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists all regions/{region}/clusters in a project alphabetically.
Args:
request: (DataprocProjectsRegionsClustersListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListClustersResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method='GET',
method_id='dataproc.projects.regions.clusters.list',
ordered_params=['projectId', 'region'],
path_params=['projectId', 'region'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/projects/{projectId}/regions/{region}/clusters',
request_field='',
request_type_name='DataprocProjectsRegionsClustersListRequest',
response_type_name='ListClustersResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates a cluster in a project. The returned Operation.metadata will be ClusterOperationMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). The cluster must be in a RUNNING state or an error is returned.
Args:
request: (DataprocProjectsRegionsClustersPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method='PATCH',
method_id='dataproc.projects.regions.clusters.patch',
ordered_params=['projectId', 'region', 'clusterName'],
path_params=['clusterName', 'projectId', 'region'],
query_params=['gracefulDecommissionTimeout', 'requestId', 'updateMask'],
relative_path='v1/projects/{projectId}/regions/{region}/clusters/{clusterName}',
request_field='cluster',
request_type_name='DataprocProjectsRegionsClustersPatchRequest',
response_type_name='Operation',
supports_download=False,
)
def Repair(self, request, global_params=None):
r"""Repairs a cluster.
Args:
request: (DataprocProjectsRegionsClustersRepairRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Repair')
return self._RunMethod(
config, request, global_params=global_params)
Repair.method_config = lambda: base_api.ApiMethodInfo(
http_method='POST',
method_id='dataproc.projects.regions.clusters.repair',
ordered_params=['projectId', 'region', 'clusterName'],
path_params=['clusterName', 'projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:repair',
request_field='repairClusterRequest',
request_type_name='DataprocProjectsRegionsClustersRepairRequest',
response_type_name='Operation',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.
Args:
request: (DataprocProjectsRegionsClustersSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:setIamPolicy',
http_method='POST',
method_id='dataproc.projects.regions.clusters.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='DataprocProjectsRegionsClustersSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def Start(self, request, global_params=None):
r"""Starts a cluster in a project.
Args:
request: (DataprocProjectsRegionsClustersStartRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Start')
return self._RunMethod(
config, request, global_params=global_params)
Start.method_config = lambda: base_api.ApiMethodInfo(
http_method='POST',
method_id='dataproc.projects.regions.clusters.start',
ordered_params=['projectId', 'region', 'clusterName'],
path_params=['clusterName', 'projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:start',
request_field='startClusterRequest',
request_type_name='DataprocProjectsRegionsClustersStartRequest',
response_type_name='Operation',
supports_download=False,
)
def Stop(self, request, global_params=None):
r"""Stops a cluster in a project.
Args:
request: (DataprocProjectsRegionsClustersStopRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Stop')
return self._RunMethod(
config, request, global_params=global_params)
Stop.method_config = lambda: base_api.ApiMethodInfo(
http_method='POST',
method_id='dataproc.projects.regions.clusters.stop',
ordered_params=['projectId', 'region', 'clusterName'],
path_params=['clusterName', 'projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:stop',
request_field='stopClusterRequest',
request_type_name='DataprocProjectsRegionsClustersStopRequest',
response_type_name='Operation',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (DataprocProjectsRegionsClustersTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/clusters/{clustersId}:testIamPermissions',
http_method='POST',
method_id='dataproc.projects.regions.clusters.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='DataprocProjectsRegionsClustersTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsRegionsJobsService(base_api.BaseApiService):
"""Service class for the projects_regions_jobs resource."""
_NAME = 'projects_regions_jobs'
def __init__(self, client):
super(DataprocV1.ProjectsRegionsJobsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs.list (https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or regions/{region}/jobs.get (https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
Args:
request: (DataprocProjectsRegionsJobsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
http_method='POST',
method_id='dataproc.projects.regions.jobs.cancel',
ordered_params=['projectId', 'region', 'jobId'],
path_params=['jobId', 'projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel',
request_field='cancelJobRequest',
request_type_name='DataprocProjectsRegionsJobsCancelRequest',
response_type_name='Job',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.
Args:
request: (DataprocProjectsRegionsJobsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
http_method='DELETE',
method_id='dataproc.projects.regions.jobs.delete',
ordered_params=['projectId', 'region', 'jobId'],
path_params=['jobId', 'projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/jobs/{jobId}',
request_field='',
request_type_name='DataprocProjectsRegionsJobsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the resource representation for a job in a project.
Args:
request: (DataprocProjectsRegionsJobsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method='GET',
method_id='dataproc.projects.regions.jobs.get',
ordered_params=['projectId', 'region', 'jobId'],
path_params=['jobId', 'projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/jobs/{jobId}',
request_field='',
request_type_name='DataprocProjectsRegionsJobsGetRequest',
response_type_name='Job',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (DataprocProjectsRegionsJobsGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:getIamPolicy',
http_method='POST',
method_id='dataproc.projects.regions.jobs.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:getIamPolicy',
request_field='getIamPolicyRequest',
request_type_name='DataprocProjectsRegionsJobsGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def GetJobAsTemplate(self, request, global_params=None):
r"""Exports the resource representation for a job in a project as a template that can be used as a SubmitJobRequest.
Args:
request: (DataprocProjectsRegionsJobsGetJobAsTemplateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('GetJobAsTemplate')
return self._RunMethod(
config, request, global_params=global_params)
GetJobAsTemplate.method_config = lambda: base_api.ApiMethodInfo(
http_method='GET',
method_id='dataproc.projects.regions.jobs.getJobAsTemplate',
ordered_params=['projectId', 'region', 'jobId'],
path_params=['jobId', 'projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/jobs/{jobId}:getJobAsTemplate',
request_field='',
request_type_name='DataprocProjectsRegionsJobsGetJobAsTemplateRequest',
response_type_name='Job',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists regions/{region}/jobs in a project.
Args:
request: (DataprocProjectsRegionsJobsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListJobsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method='GET',
method_id='dataproc.projects.regions.jobs.list',
ordered_params=['projectId', 'region'],
path_params=['projectId', 'region'],
query_params=['clusterName', 'filter', 'jobStateMatcher', 'pageSize', 'pageToken'],
relative_path='v1/projects/{projectId}/regions/{region}/jobs',
request_field='',
request_type_name='DataprocProjectsRegionsJobsListRequest',
response_type_name='ListJobsResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates a job in a project.
Args:
request: (DataprocProjectsRegionsJobsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
http_method='PATCH',
method_id='dataproc.projects.regions.jobs.patch',
ordered_params=['projectId', 'region', 'jobId'],
path_params=['jobId', 'projectId', 'region'],
query_params=['updateMask'],
relative_path='v1/projects/{projectId}/regions/{region}/jobs/{jobId}',
request_field='job',
request_type_name='DataprocProjectsRegionsJobsPatchRequest',
response_type_name='Job',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.
Args:
request: (DataprocProjectsRegionsJobsSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:setIamPolicy',
http_method='POST',
method_id='dataproc.projects.regions.jobs.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='DataprocProjectsRegionsJobsSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def Submit(self, request, global_params=None):
r"""Submits a job to a cluster.
Args:
request: (DataprocProjectsRegionsJobsSubmitRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Submit')
return self._RunMethod(
config, request, global_params=global_params)
Submit.method_config = lambda: base_api.ApiMethodInfo(
http_method='POST',
method_id='dataproc.projects.regions.jobs.submit',
ordered_params=['projectId', 'region'],
path_params=['projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/jobs:submit',
request_field='submitJobRequest',
request_type_name='DataprocProjectsRegionsJobsSubmitRequest',
response_type_name='Job',
supports_download=False,
)
def SubmitAsOperation(self, request, global_params=None):
r"""Submits job to a cluster.
Args:
request: (DataprocProjectsRegionsJobsSubmitAsOperationRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('SubmitAsOperation')
return self._RunMethod(
config, request, global_params=global_params)
SubmitAsOperation.method_config = lambda: base_api.ApiMethodInfo(
http_method='POST',
method_id='dataproc.projects.regions.jobs.submitAsOperation',
ordered_params=['projectId', 'region'],
path_params=['projectId', 'region'],
query_params=[],
relative_path='v1/projects/{projectId}/regions/{region}/jobs:submitAsOperation',
request_field='submitJobRequest',
request_type_name='DataprocProjectsRegionsJobsSubmitAsOperationRequest',
response_type_name='Operation',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (DataprocProjectsRegionsJobsTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/jobs/{jobsId}:testIamPermissions',
http_method='POST',
method_id='dataproc.projects.regions.jobs.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='DataprocProjectsRegionsJobsTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsRegionsOperationsService(base_api.BaseApiService):
"""Service class for the projects_regions_operations resource."""
_NAME = 'projects_regions_operations'
def __init__(self, client):
super(DataprocV1.ProjectsRegionsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.
Args:
request: (DataprocProjectsRegionsOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:cancel',
http_method='POST',
method_id='dataproc.projects.regions.operations.cancel',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:cancel',
request_field='',
request_type_name='DataprocProjectsRegionsOperationsCancelRequest',
response_type_name='Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.
Args:
request: (DataprocProjectsRegionsOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}',
http_method='DELETE',
method_id='dataproc.projects.regions.operations.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsRegionsOperationsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (DataprocProjectsRegionsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}',
http_method='GET',
method_id='dataproc.projects.regions.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsRegionsOperationsGetRequest',
response_type_name='Operation',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (DataprocProjectsRegionsOperationsGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:getIamPolicy',
http_method='POST',
method_id='dataproc.projects.regions.operations.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:getIamPolicy',
request_field='getIamPolicyRequest',
request_type_name='DataprocProjectsRegionsOperationsGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding allows API services to override the binding to use different resource name schemes, such as users/*/operations. To override the binding, API services can add a binding such as "/v1/{name=users/*}/operations" to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
Args:
request: (DataprocProjectsRegionsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/operations',
http_method='GET',
method_id='dataproc.projects.regions.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsRegionsOperationsListRequest',
response_type_name='ListOperationsResponse',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.
Args:
request: (DataprocProjectsRegionsOperationsSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:setIamPolicy',
http_method='POST',
method_id='dataproc.projects.regions.operations.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='DataprocProjectsRegionsOperationsSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (DataprocProjectsRegionsOperationsTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/operations/{operationsId}:testIamPermissions',
http_method='POST',
method_id='dataproc.projects.regions.operations.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='DataprocProjectsRegionsOperationsTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsRegionsWorkflowTemplatesService(base_api.BaseApiService):
"""Service class for the projects_regions_workflowTemplates resource."""
_NAME = 'projects_regions_workflowTemplates'
def __init__(self, client):
super(DataprocV1.ProjectsRegionsWorkflowTemplatesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates new workflow template.
Args:
request: (DataprocProjectsRegionsWorkflowTemplatesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(WorkflowTemplate) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates',
http_method='POST',
method_id='dataproc.projects.regions.workflowTemplates.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/workflowTemplates',
request_field='workflowTemplate',
request_type_name='DataprocProjectsRegionsWorkflowTemplatesCreateRequest',
response_type_name='WorkflowTemplate',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a workflow template. It does not cancel in-progress workflows.
Args:
request: (DataprocProjectsRegionsWorkflowTemplatesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}',
http_method='DELETE',
method_id='dataproc.projects.regions.workflowTemplates.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['version'],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsRegionsWorkflowTemplatesDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves the latest workflow template.Can retrieve previously instantiated template by specifying optional version parameter.
Args:
request: (DataprocProjectsRegionsWorkflowTemplatesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(WorkflowTemplate) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}',
http_method='GET',
method_id='dataproc.projects.regions.workflowTemplates.get',
ordered_params=['name'],
path_params=['name'],
query_params=['version'],
relative_path='v1/{+name}',
request_field='',
request_type_name='DataprocProjectsRegionsWorkflowTemplatesGetRequest',
response_type_name='WorkflowTemplate',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (DataprocProjectsRegionsWorkflowTemplatesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:getIamPolicy',
http_method='POST',
method_id='dataproc.projects.regions.workflowTemplates.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:getIamPolicy',
request_field='getIamPolicyRequest',
request_type_name='DataprocProjectsRegionsWorkflowTemplatesGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def Instantiate(self, request, global_params=None):
r"""Instantiates a template and begins execution.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.
Args:
request: (DataprocProjectsRegionsWorkflowTemplatesInstantiateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Instantiate')
return self._RunMethod(
config, request, global_params=global_params)
Instantiate.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:instantiate',
http_method='POST',
method_id='dataproc.projects.regions.workflowTemplates.instantiate',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:instantiate',
request_field='instantiateWorkflowTemplateRequest',
request_type_name='DataprocProjectsRegionsWorkflowTemplatesInstantiateRequest',
response_type_name='Operation',
supports_download=False,
)
def InstantiateInline(self, request, global_params=None):
r"""Instantiates a template and begins execution.This method is equivalent to executing the sequence CreateWorkflowTemplate, InstantiateWorkflowTemplate, DeleteWorkflowTemplate.The returned Operation can be used to track execution of workflow by polling operations.get. The Operation will complete when entire workflow is finished.The running workflow can be aborted via operations.cancel. This will cause any inflight jobs to be cancelled and workflow-owned clusters to be deleted.The Operation.metadata will be WorkflowMetadata (https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). Also see Using WorkflowMetadata (https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).On successful completion, Operation.response will be Empty.
Args:
request: (DataprocProjectsRegionsWorkflowTemplatesInstantiateInlineRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('InstantiateInline')
return self._RunMethod(
config, request, global_params=global_params)
InstantiateInline.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates:instantiateInline',
http_method='POST',
method_id='dataproc.projects.regions.workflowTemplates.instantiateInline',
ordered_params=['parent'],
path_params=['parent'],
query_params=['requestId'],
relative_path='v1/{+parent}/workflowTemplates:instantiateInline',
request_field='workflowTemplate',
request_type_name='DataprocProjectsRegionsWorkflowTemplatesInstantiateInlineRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists workflows that match the specified filter in the request.
Args:
request: (DataprocProjectsRegionsWorkflowTemplatesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListWorkflowTemplatesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates',
http_method='GET',
method_id='dataproc.projects.regions.workflowTemplates.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v1/{+parent}/workflowTemplates',
request_field='',
request_type_name='DataprocProjectsRegionsWorkflowTemplatesListRequest',
response_type_name='ListWorkflowTemplatesResponse',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy.Can return NOT_FOUND, INVALID_ARGUMENT, and PERMISSION_DENIED errors.
Args:
request: (DataprocProjectsRegionsWorkflowTemplatesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:setIamPolicy',
http_method='POST',
method_id='dataproc.projects.regions.workflowTemplates.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='DataprocProjectsRegionsWorkflowTemplatesSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error.Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (DataprocProjectsRegionsWorkflowTemplatesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}:testIamPermissions',
http_method='POST',
method_id='dataproc.projects.regions.workflowTemplates.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='DataprocProjectsRegionsWorkflowTemplatesTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
def Update(self, request, global_params=None):
r"""Updates (replaces) workflow template. The updated template must contain version that matches the current server version.
Args:
request: (WorkflowTemplate) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(WorkflowTemplate) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
Update.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/regions/{regionsId}/workflowTemplates/{workflowTemplatesId}',
http_method='PUT',
method_id='dataproc.projects.regions.workflowTemplates.update',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='<request>',
request_type_name='WorkflowTemplate',
response_type_name='WorkflowTemplate',
supports_download=False,
)
class ProjectsRegionsService(base_api.BaseApiService):
"""Service class for the projects_regions resource."""
_NAME = 'projects_regions'
def __init__(self, client):
super(DataprocV1.ProjectsRegionsService, self).__init__(client)
self._upload_configs = {
}
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(DataprocV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
|
import os
import tornado.ioloop
import tornado.web
import tornado.websocket
# TODO move this to cfg
PORT = 8080
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("html/index.html")
class PageNotFoundHandler(tornado.web.RequestHandler):
def get(self):
self.redirect("/", True)
class LettersSolverHandler(tornado.web.RequestHandler):
def get(self):
self.render("html/lettersSolver.html")
class NumbersSolverHandler(tornado.web.RequestHandler):
def get(self):
self.render("html/numbersSolver.html")
class RandomNumberDisplayHandler(tornado.web.RequestHandler):
def get(self):
self.render("html/randomNumberDisplay.html")
def make_app():
root = os.path.dirname(os.path.abspath(__file__))
return tornado.web.Application([
(r"/", MainHandler),
(r"/letters", LettersSolverHandler),
(r"/numbers", NumbersSolverHandler),
(r"/display", RandomNumberDisplayHandler),
(r"/resources/js/(.*)", tornado.web.StaticFileHandler, {"path": "js/"}),
(r"/resources/css/(.*)", tornado.web.StaticFileHandler, {"path": "css/"}),
], default_handler_class=PageNotFoundHandler,)
if __name__ == "__main__":
app = make_app()
app.listen(PORT)
print("Now serving on port {0}".format(PORT))
tornado.ioloop.IOLoop.current().start()
|
# Generated by Django 2.1.3 on 2018-11-17 17:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chat', '0004_remove_chat_admin'),
]
operations = [
migrations.AlterField(
model_name='chat',
name='participants',
field=models.ManyToManyField(related_name='chats', to='chat.Contact'),
),
]
|
a = [5, 2, 6, 3]
print(sorted(a)) # RETURNS SORTED ARRAY => [2, 3, 5, 6]
print(a) # => [5, 2, 6, 3]
a.sort() # SORT IN ASCENDING ORDER
print(a) # [2, 3, 5, 6]
a.sort(reverse=True) # SORT IN DESCENDING ORDER
print(a) # [6, 5, 3, 2]
|
# Copyright (c) 2022. K2-Software
# All software, both binary and source published by K2-Software (hereafter, Software) is copyrighted by the author (hereafter, K2-Software) and ownership of all right, title and interest in and to the Software remains with K2-Software. By using or copying the Software, User agrees to abide by the terms of this Agreement.
import json
import html
import requests
import re
from object_type import ObjectType
class IManageObject:
def __init__(self, body):
self._body = body
if 'id' in body:
self.id = body['id']
else:
self.id = 'None'
if 'database' in body:
self.database = body['database']
else:
self.database = 'None'
if 'wstype' in body:
self.wstype = ObjectType.value(body['wstype'])
else:
self.wstype = ObjectType.UNKNOWN
if 'name' in body:
self.name = body['name']
else:
self.name = '<empty>'
self.session = None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def database(self):
return self._database
@database.setter
def database(self, value):
self._database = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def wstype(self):
return self._wstype
@wstype.setter
def wstype(self, value):
self._wstype = value
@property
def session(self):
return self._session
@session.setter
def session(self, value):
self._session = value
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = value
def info(self):
print(self.database + '!' + str(self.wstype) + ': ' + self.name + ' (' + self.id + ')')
if self.session is None:
print('No session found')
|
from typing import Any
from modAL.utils.data import modALinput
from modAL import ActiveLearner
class TorchTopicsActiveLearner(ActiveLearner):
def _fit_to_known(self, bootstrap: bool = False, **fit_kwargs) -> 'BaseLearner':
pass
def _fit_on_new(self, X: modALinput, y: modALinput, bootstrap: bool = False, **fit_kwargs) -> 'BaseLearner':
pass
def score(self, X: modALinput, y: modALinput, **score_kwargs) -> Any:
pass
# return self.estimator.evaluate(X, y, verbose=0, **score_kwargs)[1]
|
"""The go-eCharger (MQTT) switch."""
import logging
from homeassistant import config_entries, core
from homeassistant.components import mqtt
from homeassistant.components.select import SelectEntity
from homeassistant.core import callback
from .definitions import SELECTS, GoEChargerSelectEntityDescription
from .entity import GoEChargerEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Config entry setup."""
async_add_entities(
GoEChargerSelect(config_entry, description)
for description in SELECTS
if not description.disabled
)
class GoEChargerSelect(GoEChargerEntity, SelectEntity):
"""Representation of a go-eCharger switch that is updated via MQTT."""
entity_description: GoEChargerSelectEntityDescription
def __init__(
self,
config_entry: config_entries.ConfigEntry,
description: GoEChargerSelectEntityDescription,
) -> None:
"""Initialize the sensor."""
super().__init__(config_entry, description)
self.entity_description = description
self._attr_options = list(description.options.values())
self._attr_current_option = None
@property
def available(self):
"""Return True if entity is available."""
return self._attr_current_option is not None
def key_from_option(self, option: str):
"""Return the option a given payload is assigned to."""
try:
return next(
key
for key, value in self.entity_description.options.items()
if value == option
)
except StopIteration:
return None
async def async_select_option(self, option: str) -> None:
"""Update the current value."""
await mqtt.async_publish(
self.hass, f"{self._topic}/set", self.key_from_option(option)
)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
@callback
def message_received(message):
"""Handle new MQTT messages."""
if self.entity_description.state is not None:
self._attr_current_option = self.entity_description.state(
message.payload, self.entity_description.attribute
)
else:
payload = message.payload
if payload is None or payload in ["null", "none"]:
return
if payload not in self.entity_description.options.keys():
_LOGGER.error(
"Invalid option for %s: '%s' (valid options: %s)",
self.entity_id,
payload,
self.options,
)
return
self._attr_current_option = self.entity_description.options[payload]
self.async_write_ha_state()
await mqtt.async_subscribe(self.hass, self._topic, message_received, 1)
|
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import asyncio
import logging
import pathlib
import os
import shutil
from magma.common.service import MagmaService
from magma.magmad.upgrade.magma_upgrader import compare_package_versions
from magma.magmad.upgrade.upgrader import UpgraderFactory
from magma.magmad.upgrade.upgrader2 import ImageNameT, run_command, \
UpgradeIntent, Upgrader2, VersionInfo, VersionT
IMAGE_INSTALL_DIR = '/var/cache/magma_feg'
IMAGE_INSTALL_SCRIPT = IMAGE_INSTALL_DIR + '/install.sh'
class FegUpgrader(Upgrader2):
"""
Downloads and installs the federation gateway images
"""
def version_to_image_name(self, version: VersionT) -> ImageNameT:
"""
Returns the image format from the version string.
(i.e) 0.3.68-1541626353-d1c29db1 -> magma_feg_d1c29db1.zip
"""
parts = version.split("-")
if len(parts) != 3:
raise ValueError("Unknown version format: %s" % version)
return ImageNameT("magma_feg_%s.zip" % parts[2])
async def get_upgrade_intent(self) -> UpgradeIntent:
"""
Returns the desired version for the gateway.
We don't support downgrading, and so checks are made to update
only if the target version is higher than the current version.
"""
tgt_version = self.service.mconfig.package_version
curr_version = self.service.version
if (tgt_version == "0.0.0-0" or
compare_package_versions(curr_version, tgt_version) <= 0):
tgt_version = curr_version
return UpgradeIntent(stable=VersionT(tgt_version), canary=VersionT(""))
async def get_versions(self) -> VersionInfo:
""" Returns the current version """
return VersionInfo(
current_version=self.service.version,
available_versions=set(),
)
async def prepare_upgrade(
self, version: VersionT, path_to_image: pathlib.Path
) -> None:
""" No-op for the feg upgrader """
return
async def upgrade(
self, version: VersionT, path_to_image: pathlib.Path
) -> None:
""" Time to actually upgrade the Feg using the image """
# Extract the image to the install directory
shutil.rmtree(IMAGE_INSTALL_DIR, ignore_errors=True)
os.mkdir(IMAGE_INSTALL_DIR)
await run_command("unzip", str(path_to_image), "-d", IMAGE_INSTALL_DIR)
logging.info("Running image install script: %s", IMAGE_INSTALL_SCRIPT)
await run_command(IMAGE_INSTALL_SCRIPT)
class FegUpgraderFactory(UpgraderFactory):
""" Returns an instance of the FegUpgrader """
def create_upgrader(
self,
magmad_service: MagmaService,
loop: asyncio.AbstractEventLoop,
) -> FegUpgrader:
return FegUpgrader(magmad_service)
|
# test construction of array.array from different objects
from array import array
# tuple, list
print(array('b', (1, 2)))
print(array('h', [1, 2]))
# raw copy from bytes, bytearray
print(array('h', b'12'))
print(array('h', bytearray(2)))
print(array('i', bytearray(4)))
# convert from other arrays
print(array('H', array('b', [1, 2])))
print(array('b', array('I', [1, 2])))
|
import os
import sys
from django.conf import settings
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
BASE_DIR = os.path.dirname(__file__)
DEBUG = False
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'publications',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
ROOT_URLCONF = 'publications.tests.urls'
settings.configure(
MEDIA_ROOT=os.path.join(BASE_DIR, 'media'),
MEDIA_URL='/media/',
STATIC_ROOT=os.path.join(BASE_DIR, 'static'),
STATIC_URL='/static/',
DEBUG=DEBUG,
INSTALLED_APPS=INSTALLED_APPS,
DATABASES=DATABASES,
MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=TEMPLATE_CONTEXT_PROCESSORS,
ROOT_URLCONF=ROOT_URLCONF)
import django
from distutils.version import StrictVersion
if StrictVersion(django.get_version()) >= StrictVersion('1.7.0'):
from django import setup
from django.test.runner import DiscoverRunner
setup()
sys.exit(DiscoverRunner(verbosity=1).run_tests(['publications']))
else:
from django.test.simple import DjangoTestSuiteRunner
sys.exit(DjangoTestSuiteRunner(verbosity=1).run_tests(['publications.Tests', 'publications.LiveTests']))
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
File: prepare_lung_coronavirus.py
we assume that you have download the dataset through the link and save them as following:
lung_coronavirus
|--20_ncov_scan.zip
|--infection.zip
|--lung_infection.zip
|--lung_mask.zip
support:
1. uncompress the file and save the img as the following format
2. save your img as the rules.
lung_coronavirus_phase0
|
|--images
|--labels
"""
import os
import sys
import glob
import time
import random
import zipfile
import functools
import numpy as np
import nibabel as nib
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))
from utils import list_files
from paddleseg3d.datasets.preprocess_utils import uncompressor
from paddleseg3d.datasets.preprocess_utils import HU2float32, resample
urls = {
"lung_infection.zip":
"https://bj.bcebos.com/v1/ai-studio-online/432237969243497caa4d389c33797ddb2a9fa877f3104e4a9a63bd31a79e4fb8?responseContentDisposition=attachment%3B%20filename%3DLung_Infection.zip&authorization=bce-auth-v1%2F0ef6765c1e494918bc0d4c3ca3e5c6d1%2F2020-05-10T03%3A42%3A16Z%2F-1%2F%2Faccd5511d56d7119555f0e345849cca81459d3783c547eaa59eb715df37f5d25",
"lung_mask.zip":
"https://bj.bcebos.com/v1/ai-studio-online/96f299c5beb046b4a973fafb3c39048be8d5f860bd0d47659b92116a3cd8a9bf?responseContentDisposition=attachment%3B%20filename%3DLung_Mask.zip&authorization=bce-auth-v1%2F0ef6765c1e494918bc0d4c3ca3e5c6d1%2F2020-05-10T03%3A41%3A14Z%2F-1%2F%2Fb8e23810db1081fc287a1cae377c63cc79bac72ab0fb835d48a46b3a62b90f66",
"infection_mask.zip":
"https://bj.bcebos.com/v1/ai-studio-online/2b867932e42f4977b46bfbad4fba93aa158f16c79910400b975305c0bd50b638?responseContentDisposition=attachment%3B%20filename%3DInfection_Mask.zip&authorization=bce-auth-v1%2F0ef6765c1e494918bc0d4c3ca3e5c6d1%2F2020-05-10T03%3A42%3A37Z%2F-1%2F%2Fabd47aa33ddb2d4a65555795adef14826aa68b20c3ee742dff2af010ae164252",
"20_ncov_scan.zip":
"https://bj.bcebos.com/v1/ai-studio-online/12b02c4d5f9d44c5af53d17bbd4f100888b5be1dbc3d40d6b444f383540bd36c?responseContentDisposition=attachment%3B%20filename%3D20_ncov_scan.zip&authorization=bce-auth-v1%2F0ef6765c1e494918bc0d4c3ca3e5c6d1%2F2020-05-10T14%3A54%3A21Z%2F-1%2F%2F1d812ca210f849732feadff9910acc9dcf98ae296988546115fa7b987d856b85"
}
class Prep:
dataset_root = "data/lung_coronavirus"
phase0_path = os.path.join(dataset_root, "lung_coronavirus_phase0/")
raw_data_path = os.path.join(dataset_root, "lung_coronavirus_raw/")
image_dir = os.path.join(raw_data_path, "20_ncov_scan")
label_dir = os.path.join(raw_data_path, "lung_mask")
def __init__(self, phase_path=phase0_path, train_split=15):
self.train_split = train_split
self.phase_path = phase_path
self.image_path = os.path.join(self.phase_path, "images")
self.label_path = os.path.join(self.phase_path, "labels")
os.makedirs(self.image_path, exist_ok=True)
os.makedirs(self.label_path, exist_ok=True)
def uncompress_file(self, num_zipfiles):
uncompress_tool = uncompressor(
urls=urls, savepath=self.dataset_root, print_progress=True)
"""unzip all the file in the root directory"""
zipfiles = glob.glob(os.path.join(self.dataset_root, "*.zip"))
assert len(zipfiles) == num_zipfiles, print(
"The file directory should include {} zip file, but there is only {}"
.format(num_zipfiles, len(zipfiles)))
for f in zipfiles:
extract_path = os.path.join(self.raw_data_path,
f.split("/")[-1].split('.')[0])
uncompress_tool._uncompress_file(
f, extract_path, delete_file=False, print_progress=True)
def load_save(self,
file_dir,
load_type=np.float32,
savepath=None,
preprocess=None,
tag="image"):
"""
Load the file in file dir, preprocess it and save it to the directory.
"""
files = list_files(file_dir)
assert len(files) != 0, print(
"The data directory you assigned is wrong, there is no file in it."
)
for f in files:
filename = f.split("/")[-1].split(".")[0]
nii_np = nib.load(f).get_fdata(dtype=load_type)
if preprocess is not None:
for op in preprocess:
nii_np = op(nii_np)
np.save(os.path.join(savepath, filename), nii_np)
print("Sucessfully convert medical images to numpy array!")
def convert_path(self):
"""convert nii.gz file to numpy array in the right directory"""
import pdb
pdb.set_trace()
print("Start convert images to numpy array, please wait patiently")
self.load_save(
self.image_dir,
load_type=np.float32,
savepath=self.image_path,
preprocess=[
HU2float32,
functools.partial(resample, new_shape=[128, 128, 128])
])
print("start convert labels to numpy array, please wait patiently")
self.load_save(
self.label_dir,
np.float32,
self.label_path,
preprocess=[
functools.partial(
resample, new_shape=[128, 128, 128], order=0)
],
tag="label")
def generate_txt(self):
"""generate the train_list.txt and val_list.txt"""
def write_txt(txt, files):
with open(txt, 'w') as f:
if "train" in txt:
image_names = files[:self.train_split]
else:
image_names = files[self.train_split:]
label_names = [
name.replace("_org_covid-19-pneumonia-", "_").replace(
"-dcm", "").replace("_org_", "_")
for name in image_names
] # todo: remove specific for this class
for i in range(len(image_names)):
string = "{} {}\n".format('images/' + image_names[i],
'labels/' + label_names[i])
f.write(string)
print("successfully write to {}".format(txt))
txtname = [
os.path.join(self.phase_path, 'train_list.txt'),
os.path.join(self.phase_path, 'val_list.txt')
]
files = os.listdir(self.image_path)
random.shuffle(files)
write_txt(txtname[0], files)
write_txt(txtname[1], files)
if __name__ == "__main__":
prep = Prep()
prep.uncompress_file(num_zipfiles=4)
prep.convert_path()
prep.generate_txt()
|
# -*- coding:utf-8 -*-
"""
服务配置
Author: HuangTao
Date: 2018/05/03
"""
import json
from quant.utils import logger
class Config:
""" 服务配置
"""
def __init__(self):
""" 配置项
`SERVER_ID` 服务ID
`RUN_TIME_UPDATE` 是否支持配置动态更新
`LOG` 日志配置
`RABBITMQ` RabbitMQ配置
`MONGODB` mongodb配置
`REDIS` redis配置
`PLATFORMS` 交易所配置
`HEARTBEAT` 服务心跳配置 {"interval": 0, "broadcast": 0}
`PROXY` HTTP代理配置
"""
self.server_id = None # 服务id(manager服务创建)
self.run_time_update = False # 是否支持配置动态更新
self.log = {} # 日志配置
self.rabbitmq = {} # RabbitMQ配置
self.mongodb = {} # Mongodb配置
self.redis = {} # Redis配置
self.platforms = {} # 交易所配置
self.heartbeat = {} # 服务心跳配置
self.service = {} # 代理服务配置
self.proxy = None # HTTP代理配置
def initialize(self):
""" 初始化
"""
# 订阅事件 做市参数更新
if self.run_time_update:
from quant.event import EventConfig
EventConfig(self.server_id).subscribe(self.on_event_config, False)
async def on_event_config(self, event):
""" 更新参数
@param event 事件对象
"""
from quant.event import EventConfig
event = EventConfig().duplicate(event)
if event.server_id != self.server_id:
return
if not isinstance(event.params, dict):
logger.error("params format error! params:", event.params, caller=self)
return
# 将配置文件中的数据按照dict格式解析并设置成config的属性
self.update(event.params)
logger.info("config update success!", caller=self)
def loads(self, config_file=None):
""" 加载配置
@param config_file json配置文件
"""
configures = {}
if config_file:
try:
with open(config_file) as f:
data = f.read()
configures = json.loads(data)
except Exception as e:
print(e)
exit(0)
if not configures:
print("config json file error!")
exit(0)
self.update(configures)
def update(self, update_fields):
""" 更新配置
@param update_fields 更新字段
"""
self.server_id = update_fields.get("SERVER_ID") # 服务id
self.run_time_update = update_fields.get("RUN_TIME_UPDATE", False) # 是否支持配置动态更新
self.log = update_fields.get("LOG", {}) # 日志配置
self.rabbitmq = update_fields.get("RABBITMQ", None) # RabbitMQ配置
self.mongodb = update_fields.get("MONGODB", None) # mongodb配置
self.redis = update_fields.get("REDIS", None) # redis配置
self.platforms = update_fields.get("PLATFORMS", {}) # 交易所配置
self.heartbeat = update_fields.get("HEARTBEAT", {}) # 服务心跳配置
self.service = update_fields.get("SERVICE", {}) # 代理服务配置
self.proxy = update_fields.get("PROXY", None) # HTTP代理配置
# 将配置文件中的数据按照dict格式解析并设置成config的属性
for k, v in update_fields.items():
setattr(self, k, v)
config = Config()
|
"""
Time: O(1)
Space: O(1), no "extra" space are used.
"""
class Solution(object):
def readBinaryWatch(self, turnedOn):
ans = []
if turnedOn>8: return ans #at most 8 LED are turned on for a valid time.
for h in xrange(12):
for m in xrange(60):
if (bin(h) + bin(m)).count('1')==turnedOn:
ans.append('%d:%02d' % (h, m))
return ans |
# This file contains the interfaces that can be used for any order-book on the zcoins platform.
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Text
class SingleProductOrderBook(ABC):
"""Contains the order-book for a single product."""
def __init__(self, product_id: Text, base_currency: Text, quote_currency: Text):
"""An order-book interface for a single product.
Args:
product_id (Text): The exchange-specific product_id.
base_currency (Text): The standard code for a currency (ISO4217 if the currency is a non-crypto)
quote_currency (Text): The standard code for a currency (ISO4217 if the currency is a non-crypto)
"""
self.product_id = product_id
self.base_currency = base_currency
self.quote_currency = quote_currency
@abstractmethod
def get_bids(self, top_n: int = None):
"""Returns a list of (price, size) tuples, representing the current bids order-book.
Prices are in quote_currency.
Results are sorted from high-to-low by price.
Example Result:
[(1.06, 3), (1.05, 2), (1.00, 5)]
Args:
top_n (int): Controls the number of results that should be returned. May return less if top_n is greater than the
size of the current order book.
"""
pass
@abstractmethod
def get_asks(self, top_n: int = None):
"""Returns a list of (price, size) tuples, representing the current asks order-book.
Prices are in quote_currency.
Results are sorted from low-to-high by price.
Example Result:
[(1.00, 5), (1.05, 2), (1.06, 3)]
Args:
top_n (int): Controls the number of results that should be returned. May return less if top_n is greater than the
size of the current order book.
"""
pass
def get_book(self, top_n: int = None):
"""Returns the results of get_bids and get_asks in a single dict with keys 'bids' and 'asks'.
Example Result:
{
'bids': [(1.06, 3), (1.05, 2), (1.00, 5)],
'asks': [(1.00, 5), (1.05, 2), (1.06, 3)]
}
"""
return {
'bids': self.get_bids(top_n=top_n),
'asks': self.get_asks(top_n=top_n)
}
class MultiProductOrderBook(ABC):
"""Contains the order-books for many products on the same exchange."""
def __init__(self, product_ids: list[Text] = None):
if product_ids is None:
product_ids = []
self._order_books = {}
self._order_books_by_quote_currency = defaultdict(list)
self._order_books_by_base_currency = defaultdict(list)
self.product_ids = product_ids
def _post_subclass_init(self):
self.add_order_books(self.product_ids)
def add_order_book(self, product_id) -> SingleProductOrderBook:
return self.add_order_books([product_id])[0]
def add_order_books(self, product_ids: list[Text]) -> list[SingleProductOrderBook]:
books = self.make_multiple_product_order_book(product_ids)
for idx in range(len(books)):
product_id = product_ids[idx]
ob = books[idx]
self._order_books[product_id] = ob
self._order_books_by_base_currency[ob.base_currency].append(ob)
self._order_books_by_quote_currency[ob.quote_currency].append(ob)
return books
def get_order_book(self, product_id) -> SingleProductOrderBook:
return self._order_books[product_id]
def get_order_books_by_quote_currency(self, quote_currency) -> list[SingleProductOrderBook]:
return self._order_books_by_quote_currency[quote_currency]
def get_order_books_by_base_currency(self, base_currency) -> list[SingleProductOrderBook]:
return self._order_books_by_base_currency[base_currency]
def get_tracked_products(self):
return self._order_books.keys()
@abstractmethod
def make_single_product_order_book(self, product_id: Text) -> SingleProductOrderBook:
"""Create a SingleProductOrderBook for the given product_id."""
pass
def make_multiple_product_order_book(self, product_ids: list[Text]) -> list[SingleProductOrderBook]:
"""Default implementation, you might want to override this to make it more efficient."""
order_books = []
for product_id in product_ids:
order_books.append(self.make_single_product_order_book(product_id))
return order_books
|
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from PyQt5.QtGui import QOpenGLTexture, QImage, QAbstractOpenGLFunctions
## A class describing the interface to be used for texture objects.
#
# This interface should be implemented by OpenGL implementations to handle texture
# objects.
class Texture:
def __init__(self, open_gl_binding_object: QAbstractOpenGLFunctions) -> None:
super().__init__()
self._qt_texture = QOpenGLTexture(QOpenGLTexture.Target2D)
self._gl = open_gl_binding_object
self._file_name = None
self._image = None
## Get the OpenGL ID of the texture.
def getTextureId(self) -> int:
return self._qt_texture.textureId()
## Bind the texture to a certain texture unit.
#
# \param texture_unit The texture unit to bind to.
def bind(self, texture_unit):
if not self._qt_texture.isCreated():
if self._file_name != None:
self._image = QImage(self._file_name).mirrored()
elif self._image is None: # No filename or image set.
self._image = QImage(1, 1, QImage.Format_ARGB32)
self._image.fill(0)
self._qt_texture.setData(self._image)
self._qt_texture.setMinMagFilters(QOpenGLTexture.Linear, QOpenGLTexture.Linear)
self._qt_texture.bind(texture_unit)
## Release the texture from a certain texture unit.
#
# \param texture_unit The texture unit to release from.
def release(self, texture_unit):
self._qt_texture.release(texture_unit)
## Load an image and upload it to the texture.
#
# \param file_name The file name of the image to load.
def load(self, file_name):
self._file_name = file_name
#Actually loading the texture is postponed until the next bind() call.
#This makes sure we are on the right thread and have a current context when trying to upload.
def setImage(self, image):
self._image = image |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.