content
stringlengths 5
1.05M
|
---|
"""this file router for delete user."""
from app import app
from db.db_queries.get_user_for_delete_query import get_user_for_delete, \
redirect_recipe
from db.db_queries.get_user_query import get_user
from flask import flash, redirect, url_for
from models.db import db
from routers.login_required import login_required
@app.route('/users/<int:user_id>/delete', methods=['POST', 'GET'])
@login_required(default_role=1)
def delete_user(user_id: int):
"""Router for delete user."""
user = get_user(user_id)
del_user = get_user_for_delete()
recipes = redirect_recipe(user_id)
for recipe in recipes:
recipe.author = del_user
db.session.delete(user)
db.session.commit()
flash(f'Профиль успешно удалён!', 'success')
return redirect(url_for('users'))
|
#coding:utf-8
'''
filename:points_distance.py
chap:5
subject:11
conditions:pionts a,b
solution:distance(a,b)
'''
import math
def distance(a:tuple,b:tuple)->float:
c = sum([(a[i]-b[i])**2 for i in range(2)])
return math.sqrt(c)
print(distance((0,2),(0,4)))
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
class CdmError(Exception):
"""Base CDM exception"""
|
#!/usr/bin/env python3
import os
import sqlite3
from pygments.formatters import HtmlFormatter
self_dir = os.path.dirname(os.path.realpath(__file__))
print("Setting up database...", end="")
db = sqlite3.connect(self_dir + "/data/db.sqlite")
db.execute("""
CREATE TABLE IF NOT EXISTS pastes (
id CHAR(8) PRIMARY KEY,
title TEXT,
lang CHAR(30) NOT NULL,
hash VARCHAR(64) NOT NULL,
create_time INTEGER NOT NULL
)""")
db.execute("""
CREATE TABLE IF NOT EXISTS contents (
hash CHAR(64) PRIMARY KEY,
contents TEXT NOT NULL
)
""")
print("done")
print("Generating stylesheets...", end="")
with open(self_dir + "/static/_pygments.css", "w") as f:
f.write(HtmlFormatter().get_style_defs('.highlight'))
print("done") |
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def paginator(request, queryset, per_page=25):
per_page = request.GET.get('per_page', per_page)
paginator = Paginator(queryset, per_page)
page = request.GET.get('page')
try:
return paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
return paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
return paginator.page(paginator.num_pages)
|
import os
import pandas as pd
import csv
import json
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datetime import datetime
import compress_json
def export_key(out):
"""Export Generated Key Template"""
data_name = "Master_Key/master_key.csv"
exists = os.path.exists(data_name)
if exists:
for i in range(100):
if not os.path.exists("Master_Key/master_key_v%s.csv" % (i+ 1)):
os.rename(data_name, "Master_Key/master_key_v%s.csv" % (i+ 1))
break
for row in out:
out_fields = [x for x in row]
exists = os.path.exists(data_name)
with open(data_name, "a") as fp:
writer = csv.writer(fp)
if not exists:
writer.writerow(out_fields)
writer.writerow([row[x] for x in out_fields])
def format_data(folders):
"""Format data to be added to template"""
out = []
for key in folders:
df = pd.read_csv(key)
for col in folders[key]["cols"]:
# print(df[col].dtype)
row = {
"File": key,
"State": folders[key]["state"],
"Column Name": col,
"Category": "",
"New Key": "",
"Data Type": (df[col]).dtype
}
out.append(row)
return out
def iterate_dir():
"""Iterate through directory to get all column names of all files"""
unwanted = ["SouthKoreaCensus", "US_Census", "unacast", "nytimes"]
directory = "../../../covid19demographics-fall"
folders = {}
for folder in os.listdir(directory):
if folder in unwanted:
continue
path = directory + "/" + folder + "/data/"
if os.path.exists(path):
for filename in os.listdir(path):
if filename.endswith(".csv"):
# print(folder, filename)
try:
df = pd.read_csv(path + filename)
except:
print(folder, filename)
raise
folders[path + filename] = {
"state": folder,
"cols": df.columns
}
return folders
def fix_percentages(current_df, current_file):
"""Fix percentages format from decimal to full number and check to see % values make sense"""
sum_pct = 0
pct_cols = []
# Iterate through all columns
for col in current_df.columns:
# If a percentage column
if type(col) != str:
print("Irregular column name " + str(col) + " in " + current_file)
continue
if "%" in col:
pct_cols.append(col)
# If string, convert to numeric, ignoring errors
if current_df[col].dtypes == "object":
current_df[col] = pd.to_numeric(current_df[col], errors ="ignore")
# Getting virst valid row index
valid_index = current_df[col].first_valid_index()
# If value still continues to be string, then strip % sign or raise exception
if isinstance(current_df.loc[valid_index, col], str):
if "%" in current_df.loc[valid_index, col]:
current_df.loc[valid_index, col] = float(current_df.loc[valid_index, col].strip("%"))
else:
raise Exception(current_file, current_df.loc[valid_index, col])
# # If value is less than 1, multiply by 100
# if current_df.loc[valid_index, col] < 1:
# current_df[col] *= 100
# # If value equals 1, check next index to see if it is less than 1, if so convert
# elif current_df.loc[valid_index, col] == 1:
# if current_df.loc[valid_index + 1, col] < 1:
# current_df[col] *= 100
# # If value is greater than 100, raise problem - needs to be checked
sum_pct += current_df.loc[valid_index, col]
if current_df.loc[valid_index, col] > 103:
print(current_file, col, current_df.loc[valid_index, col])
# Check sum_pct to see if we are dealing with decimals or not
#print(current_file, sum_pct, len(pct_cols))
if sum_pct < 60:
for col in pct_cols:
if current_df.loc[valid_index, col] <= 1:
current_df[col] *= 100
return current_df
def fix_na(current_df, current_file):
"""Find out which columns have NA values"""
na_df = current_df.isna().any().to_frame()
file_array = [current_file for i in range(len(current_df.isna().any()))]
na_df['File'] = file_array
return na_df
def output_na(na_list):
curr_df = pd.concat(na_list)
# Export to CSV
data_name = "NAs/columns_na_list.csv"
exists = os.path.exists(data_name)
if exists:
for i in range(100):
if not os.path.exists("NAs/columns_na_list_v%s.csv" % (i+ 1)):
os.rename(data_name, "NAs/columns_na_list_v%s.csv" % (i+ 1))
break
curr_df.to_csv(data_name)
def remove_dup(current_df, current_file):
# Drop identical rows
current_df = current_df.drop_duplicates()
# Don't attempt with county
if "county" in current_file or "zip" in current_file:
# print(current_file)
return current_df
for col in current_df.columns:
try:
if "county" in col.lower() or "zip" in col.lower() or "fips" in col.lower():
return current_df
except:
print("Irregular column name [" + str(col) + "] in " + current_file)
# Drop identical report dates
if "Report Date" in current_df.columns:
current_df = current_df.drop_duplicates(subset="Report Date")
return current_df
# Remove duplicates based on scrape time
if "Scrape Time" not in current_df.columns:
return current_df
try:
current_df["Scrape Time"] = pd.to_datetime(current_df["Scrape Time"])
except:
print("Unable to convert Scrape time to datetime in: " + current_file)
return current_df
current_df = current_df.sort_values(by=["Scrape Time"])
prev_dt = None
for index, row in current_df.iterrows():
if prev_dt == None:
prev_dt = row["Scrape Time"]
continue
diff = row["Scrape Time"] - prev_dt
# If difference is less than 8 hours, drop row
if diff.total_seconds() <= 8*60*60:
current_df.drop(index, inplace = True)
else:
prev_dt = row["Scrape Time"]
# Convert datetime back to string to be able to JSONify
current_df["Scrape Time"] = current_df["Scrape Time"].dt.strftime("%Y-%m-%d %r")
return current_df
def vlookup(key_df, case):
# Helper variables
current_file = None
current_df = None
current_state = None
column_mapping = {}
new_files = {}
is_first = True
na_list = []
# Iterate through files, create copy in code that substitutes column names
# with new keys
for index, row in key_df.iterrows():
# Check if file is current file - avoid reading df again
if row['File'] != current_file:
if not is_first:
# Rename current columns
current_df.rename(columns=column_mapping, inplace=True)
# Find and remove duplicate dates
current_df = remove_dup(current_df, current_file)
# Fix percentage values
current_df = fix_percentages(current_df, current_file)
if case == "2":
# Find NA values
na_list.append(fix_na(current_df, current_file))
# Append df to dict
key = (current_file.replace("../../../covid19demographics-fall/" + current_state + "/data/","")).replace(".csv","")
if current_state not in new_files.keys():
new_files[current_state] = [{key: current_df}]
else:
new_files[current_state].append({key: current_df})
# Reset Column Mapping
column_mapping = {}
# Getting current information and assigning to global vars
current_state = row["State"]
current_file = row["File"]
current_df = pd.read_csv(current_file)
is_first = False
# Add new key and value to column_mapping
column_mapping[row["Column Name"]] = row["New Key"]
# Append last df to new_files
# Rename current columns
current_df.rename(columns=column_mapping, inplace=True)
# Find and remove duplicate dates
current_df = remove_dup(current_df, current_file)
# Fix percentage values
current_df = fix_percentages(current_df, current_file)
if case == "2":
# Find NA values
na_list.append(fix_na(current_df, current_file))
# Append df to dict
key = (current_file.replace("../../../covid19demographics-fall/" + current_state + "/data/","")).replace(".csv","")
if current_state not in new_files.keys():
new_files[current_state] = [{key: current_df}]
else:
new_files[current_state].append({key: current_df})
# Output NA columns to csv file
# if case == "2":
# output_na(na_list)
return new_files
def create_JSON(case):
""""Create JSON with all Data"""
# Read CSV
key_file = "Master_Key/master_key.csv"
key_df = pd.read_csv(key_file)
key_df = key_df.sort_values(by=["File"])
# Remove Unacast
key_df = key_df[key_df["File"] != "../../../covid19demographics-fall/unacast/data/2020-04-07 22:39:47.057978.csv"]
new_files = vlookup(key_df, case)
out = {
"USA": {},
"Intl.": {}
}
intl_keys = ["Iceland", "SouthKorea", "SouthKoreaCensus"]
# Add all rows to state key
for state in new_files:
international = False
if state in intl_keys:
out["Intl."][state] = []
international = True
else:
out["USA"][state] = []
for dic in new_files[state]:
for key in dic:
rows = dic[key].to_dict(orient="records")
if international:
out["Intl."][state].extend(rows)
else:
out["USA"][state].extend(rows)
now = str(datetime.now())
# Export JSON - works when running on Andrew's PC
compress_json.dump(out, "../../../covid19demographics/data/data.json.gz")
# with open("../../../covid19demographics/data/data_" + now +".json", "w") as fp:
# json.dump(out, fp)
def generate_graphs(case):
# Read CSV
key_file = "Master_Key/master_key.csv"
key_df = pd.read_csv(key_file)
key_df = key_df.sort_values(by=["File"])
# Get data
new_files = vlookup(key_df, case)
# Make directory if it does not exist
if not os.path.exists("Graphs"):
os.mkdir("Graphs")
# Create overall report
files_error = []
for state in new_files:
for dic in new_files[state]:
for key in dic:
datafile = key
# Getting path
path = "Graphs/" + str(state) + "_" + datafile
if not os.path.exists(path):
os.mkdir(path)
# Get time variable
time_priority = [
"Report Date",
"Scrape Time",
"Updated Date",
"Edit Date",
"Report Time",
"Entry Creation Date",
"Report Date Gender",
"Quarantine Report Date",
"Test Date",
"Last Day Test Date",
]
# Determining time variable
time_var = ""
for time in time_priority:
if time in dic[key].columns:
time_var = time
break
# Create report string
report = ""
if time_var == "":
Exception("No time variable for " + state + " in " + datafile)
report += "No time variable for " + state + " in " + datafile + "\n"
files_error.append(str(state) + "_" + datafile)
continue
# Fix percentages
current_df = fix_percentages(dic[key], path)
# Getting NaN values:
na_list = current_df.isna().any()
na_cols = []
for index, val in na_list.items():
if val:
na_cols.append(index)
report += (index + " contains NaN values\n")
files_error.append(str(state) + "_" + datafile)
# Graphing every column
for col in current_df:
if col not in time_priority:
name = col.replace("/", "_")
try:
# Check if column is numeric
if current_df[col].dtypes == "object":
report += (col + " in " + path + " is not numeric\n")
files_error.append(str(state) + "_" + datafile)
elif current_df[col].isnull().all():
report += ("Entire " + col + " is NaN\n")
files_error.append(str(state) + "_" + datafile)
else:
# Graph line
line = current_df.plot(x = time_var, y = col, kind = "line", rot=45, figsize = (25,15))
line_fig = line.get_figure()
line_fig.savefig(path + "/" + name + "_line.png")
plt.close()
# Graph box
box = current_df.boxplot(column=col)
box_fig = box.get_figure()
box_fig.savefig(path + "/" + name + "_box.png")
plt.close()
# Check for stale data and outliers
std = current_df[col].std(skipna=True)
mean = current_df[col].mean(skipna=True)
upper = mean + std * 3
lower = mean - std * 3
count = 1
prev_val = ""
start_time = ""
for index, val in current_df[col].items():
if val > upper or val < lower:
report += (col + ": " + str(val) + " at time " + str(current_df.loc[index, time_var]) + " is an outlier in " + datafile + "\n")
files_error.append(str(state) + "_" + datafile)
if str(val) == prev_val:
count += 1
else:
if count > 3:
report += ("Repetitive values of " + str(val) + " in " + col + "starting at " + str(start_time) + " and ending at " + str(current_df.loc[index, time_var]) + "\n")
files_error.append(str(state) + "_" + datafile)
count = 1
prev_val = val
start_time = str(current_df.loc[index, time_var])
except Exception as e:
report += ("Unable to graph " + col + " in " + datafile + " for " + state + "\n")
files_error.append(str(state) + "_" + datafile)
print(state, datafile, col, e)
with open(path + "/report_" + datafile + ".txt", "w") as text_file:
text_file.write(report)
report = ""
uniq_error_files = []
[uniq_error_files.append(x) for x in files_error if x not in uniq_error_files]
rep_str = ""
for file in uniq_error_files:
rep_str += (str(file) + "\n")
with open("Graphs/overall_report.txt", "w") as fp:
fp.write(rep_str)
def make_key(args):
while True:
option = input("What do you wish to do?\n1) Create new key\n2) Create/Update JSON\n3) Generate SPC Graphs\n")
option = option.strip()
if option == "1":
# Create new master_key template
folders = iterate_dir()
out = format_data(folders)
export_key(out)
break
elif option == "2":
# Create JSON
create_JSON(option)
break
elif option == "3":
generate_graphs(option)
break
else:
print("Enter a valid input\n")
if __name__ == '__main__':
make_key({}) |
"""
Collection of Numpy math functions, wrapped to fit Ivy syntax and signature.
"""
# global
import numpy as _np
try:
from scipy.special import erf as _erf
except (ImportError, ModuleNotFoundError):
_erf = None
tan = _np.tan
asin = _np.arcsin
acos = _np.arccos
atan = _np.arctan
atan2 = _np.arctan2
sinh = _np.sinh
cosh = _np.cosh
tanh = _np.tanh
acosh = _np.arccosh
atanh = _np.arctanh
log = _np.log
exp = _np.exp
def erf(x):
if _erf is None:
raise Exception('scipy must be installed in order to call ivy.erf with a numpy backend.')
return _erf(x)
|
try: # Python 3.5+
from http import HTTPStatus as HTTPStatus
except ImportError:
from http import client as HTTPStatus
from flask import Blueprint, jsonify, request
import json
import logging
from v1.db.db import Db
from v1.auth.auth import auth
from mongoengine import *
unsubscribe = Blueprint('unsubscribe', 'unsubscribe')
@unsubscribe.route('/', methods=['POST'], strict_slashes=False)
@auth
def unsubscribePost() -> object:
"""
Removes subscriptions from subscription database..
:return: list of remaining subscriptions
"""
log = logging.getLogger(__name__)
log.debug("Beginning deletion")
db = Db()
body = request.get_json()
log.debug(body)
if body is None:
return jsonify({"error": "json body is required"}), HTTPStatus.HTTPStatus.BAD_REQUEST
if not('datasetId') in body:
return jsonify({"error": "datasetId is a required attribute"}), HTTPStatus.HTTPStatus.BAD_REQUEST
count = 0
idArray = json.loads(body['datasetId'])
log.debug(str(idArray) + " has length " + str(len(idArray)))
for deleteId in idArray:
log.debug(deleteId)
deletion = db.Subscriptions.objects(
datasetId=deleteId,
notificationUrl=body['notificationUrl']
)
log.debug(deletion)
deletion.delete()
count += 1
return jsonify(str(count) + " records deleted."), HTTPStatus.OK
|
"""
IMAP sensor support.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.imap/
"""
import logging
import voluptuous as vol
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_PORT, CONF_USERNAME, CONF_PASSWORD)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_SERVER = "server"
DEFAULT_PORT = 993
ICON = 'mdi:email-outline'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_SERVER): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the IMAP platform."""
sensor = ImapSensor(config.get(CONF_NAME, None),
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
config.get(CONF_SERVER),
config.get(CONF_PORT))
if sensor.connection:
add_devices([sensor])
else:
return False
class ImapSensor(Entity):
"""Representation of an IMAP sensor."""
def __init__(self, name, user, password, server, port):
"""Initialize the sensor."""
self._name = name or user
self._user = user
self._password = password
self._server = server
self._port = port
self._unread_count = 0
self.connection = self._login()
self.update()
def _login(self):
"""Login and return an IMAP connection."""
import imaplib
try:
connection = imaplib.IMAP4_SSL(self._server, self._port)
connection.login(self._user, self._password)
return connection
except imaplib.IMAP4.error:
_LOGGER.error("Failed to login to %s.", self._server)
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the number of unread emails."""
return self._unread_count
def update(self):
"""Check the number of unread emails."""
import imaplib
try:
self.connection.select()
self._unread_count = len(self.connection.search(
None, 'UnSeen UnDeleted')[1][0].split())
except imaplib.IMAP4.error:
_LOGGER.info("Connection to %s lost, attempting to reconnect",
self._server)
try:
self.connection = self._login()
except imaplib.IMAP4.error:
_LOGGER.error("Failed to reconnect.")
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
|
from typing import Optional
from ..interfaces.data_manager import DataManager
from ..mixins.pandas_storage_manager import PandasStorageManager
from ..mixins.configured_resource_manager import ConfiguredResourceManager
from ..mixins.logger import Logger
from ..mixins.string_path_resolver import StringPathResolver
class PandasConfiguredDataManager(StringPathResolver, PandasStorageManager, ConfiguredResourceManager, DataManager, Logger):
@property
def _config_root(self) -> Optional[str]:
""""
The config root, if set the subkey to use as the base config for the resources.
This can be useful if you want to have multiple resource types within the
same config tree.
"""
return "data.local"
@property
def id(self) -> str:
return "pandas-configured" |
import wx
class StartPanel(wx.Panel):
def __init__(self,parent,id):
wx.Panel.__init__(self,parent,id)
self.parent = parent
self.txt = wx.StaticText(self, label="Drag A Pak File To Here")
self.txt.SetFont(wx.Font(15, wx.DEFAULT, wx.SLANT, wx.BOLD))
Vsizer = wx.BoxSizer(wx.VERTICAL)
Vsizer.Add(self.txt, 1, flag=wx.ALIGN_CENTER)
Hsizer = wx.BoxSizer(wx.HORIZONTAL)
Hsizer.Add(Vsizer, 1, flag=wx.ALIGN_CENTER)
self.SetSizer(Hsizer)
|
import pickle
from os.path import join as pjoin
import click
import pandas
from vocabirt.embed_nn.loader.common import CVSplitEhara
from vocabirt.embed_nn.utils import get_sv12k_word_list
from .vocabirt2pl_svl12k import estimate_irt
def split_modewise(split_mode, df, words):
total_resp_splits = 3 if split_mode in ("respondent", "both") else 1
total_vocab_splits = 3 if split_mode in ("word", "both") else 1
for resp_split_idx in range(total_resp_splits):
for vocab_split_idx in range(total_vocab_splits):
splitter = CVSplitEhara(
words,
resp_split_idx=resp_split_idx,
vocab_split_idx=vocab_split_idx,
total_resp_splits=total_resp_splits,
total_vocab_splits=total_vocab_splits,
)
yield resp_split_idx, vocab_split_idx, splitter.split_all(
df
), f"resp{resp_split_idx}_vocab{vocab_split_idx}"
split_mode_opt = click.option(
"--split-mode", type=click.Choice(["none", "respondent", "word", "both"])
)
@click.command()
@click.argument("inf")
@click.argument("outdir")
@click.option("--difficulties")
@split_mode_opt
def main(inf, outdir, difficulties, split_mode):
df = pandas.read_parquet(inf)
words = get_sv12k_word_list(inf)
for resp_split_idx, vocab_split_idx, split_tpl, basename in split_modewise(
split_mode, df, words
):
path = pjoin(outdir, f"{basename}.pkl")
with open(path, "wb") as pickle_out:
pickle.dump(
estimate_irt(split_tpl.train_df, ordinal=True), pickle_out,
)
if __name__ == "__main__":
main()
|
import requests
import os
import config
from templates.text import TextTemplate
OPENWEATHER_API = os.environ.get('OPENWEATHER_API', config.OPENWEATHER_API)
def process(input, entities, sender):
output = {}
try:
location = entities['location'][0]['value']
r = requests.get('http://api.openweathermap.org/data/2.5/weather?q=' + location + '&appid=' + OPENWEATHER_API + '&units=imperial')
data = r.json()
description = data['weather'][0]['description']
temp = data['main']['temp']
wind = data['wind']['speed'];
wind_direction = data['wind']['deg']
humidity = data['main']['humidity']
name = data['name']
msg = "Oh %s! Right now it's %s.\nTemperature: %sF\nHumidity: %s%%\nWind: %s mph" % (name, description, temp, humidity, wind)
output['input'] = input
output['output'] = TextTemplate(msg).get_message()
output['success'] = True
except:
output['success'] = False
return output
|
"""Simple test for monochromatic character LCD"""
import time
import board
import digitalio
import adafruit_character_lcd.character_lcd as characterlcd
# Modify this if you have a different sized character LCD
lcd_columns = 16
lcd_rows = 2
# Metro M0/M4 Pin Config:
lcd_rs = digitalio.DigitalInOut(board.D7)
lcd_en = digitalio.DigitalInOut(board.D8)
lcd_d7 = digitalio.DigitalInOut(board.D12)
lcd_d6 = digitalio.DigitalInOut(board.D11)
lcd_d5 = digitalio.DigitalInOut(board.D10)
lcd_d4 = digitalio.DigitalInOut(board.D9)
lcd_backlight = digitalio.DigitalInOut(board.D13)
# Initialise the LCD class
lcd = characterlcd.Character_LCD_Mono(
lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7, lcd_columns, lcd_rows, lcd_backlight
)
# Turn backlight on
lcd.backlight = True
# Print a two line message
lcd.message = "Hello\nCircuitPython"
# Wait 5s
time.sleep(5)
lcd.clear()
# Print two line message right to left
lcd.text_direction = lcd.RIGHT_TO_LEFT
lcd.message = "Hello\nCircuitPython"
# Wait 5s
time.sleep(5)
# Return text direction to left to right
lcd.text_direction = lcd.LEFT_TO_RIGHT
# Display cursor
lcd.clear()
lcd.cursor = True
lcd.message = "Cursor! "
# Wait 5s
time.sleep(5)
# Display blinking cursor
lcd.clear()
lcd.blink = True
lcd.message = "Blinky Cursor!"
# Wait 5s
time.sleep(5)
lcd.blink = False
lcd.clear()
# Create message to scroll
scroll_msg = "<-- Scroll"
lcd.message = scroll_msg
# Scroll message to the left
for i in range(len(scroll_msg)):
time.sleep(0.5)
lcd.move_left()
lcd.clear()
lcd.message = "Going to sleep\nCya later!"
time.sleep(3)
# Turn backlight off
lcd.backlight = False
time.sleep(2)
|
from http.server import HTTPServer
from .handlers.http_request_handler import HttpRequestHandler
from .router.router import Router
class Application(Router):
def __init__(self):
super().__init__()
self.instance = None
self.asset_directory = {}
pass
def define_asset(self, name: str, directory: str):
self.asset_directory = {
"name": name,
"directory": directory
}
def listen(self, port: int, callback):
if not port and type(port) is not int:
callback(ValueError("No port found."))
try:
callback(None)
self.instance = HTTPServer(('localhost', port), self._get_handler)
self.instance.serve_forever()
except:
callback(ConnectionError("Impossible connection"))
def _get_handler(self, *args):
HttpRequestHandler(self.get_routes(), self.asset_directory, *args) |
from aws_cdk import core
from aws_cdk import (aws_dynamodb,
aws_apigateway,
aws_lambda,
aws_s3,
aws_lambda,
aws_sns,
aws_sns_subscriptions,
aws_ecs,
aws_ecs_patterns,
aws_iam,
aws_ec2
)
from aws_cdk.aws_lambda_event_sources import S3EventSource
class AudiobookStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# ******* Database table
audiobooksDB = aws_dynamodb.Table(
self, "audiobooksDB",
partition_key=aws_dynamodb.Attribute(
name="id",
type=aws_dynamodb.AttributeType.STRING
),
read_capacity=2,
write_capacity=2,
billing_mode=aws_dynamodb.BillingMode.PROVISIONED
)
# ******* Lambda functions
book_upload_lambda_function = aws_lambda.Function(self, "HandleBookUploadLambda",
handler='app.lambda_handler',
runtime=aws_lambda.Runtime.PYTHON_3_8,
code=aws_lambda.Code.from_asset(
'../Functions/handlers/handle_book_upload'))
polly_audio_lambda_function = aws_lambda.Function(self, "HandlePollyAudioLambda",
handler='app.lambda_handler',
runtime=aws_lambda.Runtime.PYTHON_3_8,
code=aws_lambda.Code.from_asset(
'../Functions/handlers/handle_polly_audio'),
timeout=core.Duration.seconds(120))
# ******* S3 upload buckets
BookUploadBucket = aws_s3.Bucket(self, "BookUploadBucket")
AudioUploadBucket = aws_s3.Bucket(self, "AudioUploadBucket")
VideoUploadBucket = aws_s3.Bucket(self, "VideoUploadBucket")
ImageUploadBucket = aws_s3.Bucket(self, "ImageUploadBucket")
# ******* Create S3 event source
book_upload_lambda_function.add_event_source(S3EventSource(BookUploadBucket,
events=[
aws_s3.EventType.OBJECT_CREATED],
filters=[
{"suffix": '.txt'}]
))
# ******* Create SNS topic
PollySNSTopic = aws_sns.Topic(self, "PollySNSTopic")
PollySNSTopic.add_subscription(
aws_sns_subscriptions.LambdaSubscription(polly_audio_lambda_function))
# ******* Book function environment variables
book_upload_lambda_function.add_environment(
"TABLE_NAME", audiobooksDB.table_name)
book_upload_lambda_function.add_environment(
"AUDIO_S3_BUCKET", AudioUploadBucket.bucket_name)
book_upload_lambda_function.add_environment(
"SNS_TOPIC", PollySNSTopic.topic_arn)
# ******* Book function permissions
audiobooksDB.grant_write_data(book_upload_lambda_function)
BookUploadBucket.grant_read(book_upload_lambda_function)
AudioUploadBucket.grant_write(book_upload_lambda_function)
PollySNSTopic.grant_publish(book_upload_lambda_function)
book_upload_lambda_function.add_to_role_policy(aws_iam.PolicyStatement(actions=["polly:*"], resources=["*"]))
# ******* Fargate container permissions
role = aws_iam.Role(self, "FargateContainerRole", assumed_by=aws_iam.ServicePrincipal("ecs-tasks.amazonaws.com"))
role.add_to_policy(aws_iam.PolicyStatement(actions=["s3:PutObject"], resources=[VideoUploadBucket.bucket_arn+"/*"]))
role.add_to_policy(aws_iam.PolicyStatement(actions=["s3:GetObject"], resources=[AudioUploadBucket.bucket_arn+"/*"]))
role.add_to_policy(aws_iam.PolicyStatement(actions=["s3:GetObject"], resources=[ImageUploadBucket.bucket_arn+"/*"]))
# ******* Fargate container
vpc = aws_ec2.Vpc(self, "CdkFargateVpc", max_azs=2)
cluster = aws_ecs.Cluster(self, 'FargateCluster', vpc=vpc)
image = aws_ecs.ContainerImage.from_asset("../Functions/ECSContainerFiles")
task_definition = aws_ecs.FargateTaskDefinition(
self, "FargateContainerTaskDefinition", execution_role=role, task_role=role, cpu=1024, memory_limit_mib=3072
)
port_mapping = aws_ecs.PortMapping(container_port=80, host_port=80)
container = task_definition.add_container(
"Container", image=image,
logging=aws_ecs.AwsLogDriver(stream_prefix="videoProcessingContainer")
)
container.add_port_mappings(port_mapping)
# ******* Audio function environment variables
polly_audio_lambda_function.add_environment("VIDEO_S3_BUCKET", VideoUploadBucket.bucket_name)
polly_audio_lambda_function.add_environment("TASK_DEFINITION_ARN", task_definition.task_definition_arn)
polly_audio_lambda_function.add_environment("CLUSTER_ARN", cluster.cluster_arn)
polly_audio_lambda_function.add_environment("TABLE_NAME", audiobooksDB.table_name)
polly_audio_lambda_function.add_environment("CONTAINER_NAME", container.container_name)
polly_audio_lambda_function.add_environment("VPC_ID", str(vpc.vpc_id))
# ******* Audio function permissions
audiobooksDB.grant_read_write_data(polly_audio_lambda_function)
polly_audio_lambda_function.add_to_role_policy(aws_iam.PolicyStatement(actions=["ecs:RunTask"], resources=["*"]))
polly_audio_lambda_function.add_to_role_policy(aws_iam.PolicyStatement(actions=["iam:PassRole"], resources=["*"]))
polly_audio_lambda_function.add_to_role_policy(aws_iam.PolicyStatement(actions=["ec2:DescribeSubnets"], resources=["*"])) |
# -*- encoding: latin-1 -*-
import sys
MKL_THREADS_VAR = str(sys.argv[1])
import os
os.environ["MKL_NUM_THREADS"] = MKL_THREADS_VAR
os.environ["NUMEXPR_NUM_THREADS"] = MKL_THREADS_VAR
os.environ["OMP_NUM_THREADS"] = "1"
from numpy import *
from ir_load import ir_load
from parameters import parameters
from hamiltonian import hamiltonian
from gfunction import gfunction_calc
from gfunction import gfunction_load
from eliashberg import eliashberg
from kpath_extract import kpath_extract
import matplotlib
import matplotlib.pyplot as plt
import datetime
import time
##### Please input in order:
# MKL_NUM_THREADS | T | T_load | JUratio | JU_ratio_load | round_it
n_fill = 1.785
T = float(sys.argv[2])
T_load = float(sys.argv[3])
u0 = float(sys.argv[4])
JU_ratio = float(sys.argv[5])
JU_ratio_load = float(sys.argv[6])
round_it = int(sys.argv[7])
print(n_fill, T, T_load, u0, JU_ratio, JU_ratio_load, round_it)
### Initiate parameters -------------------------------------------------------
start = time.process_time()
p = parameters(round(T, 5), round(n_fill ,5), round(JU_ratio,5), round(u0,5), round_it,\
T_load = round(T_load, 5), JU_ratio_load = round(JU_ratio_load, 5))
print("##################################################"\
, file=open(p.Logstr,'a'))
print(datetime.datetime.now().strftime('%d. %B %Y %I:%M%p')\
, file=open(p.Logstr,'a'))
print("Parameter set: n = {}, T = {}, U = {}, J/U = {}\n".format\
(p.n_fill, p.T, p.u0 ,p.JU_ratio), file=open(p.Logstr,'a'))
print("Elapsed time - parameter init: " + str(time.process_time() - start)\
, file=open(p.Logstr,'a'))
### Load hamiltionian ---------------------------------------------------------
t_hset = time.process_time()
h = hamiltonian(p)
print("Elapsed time - hamiltonian set (tot | module): " \
+ str(time.process_time() - start) + " | " \
+ str(time.process_time() - t_hset), file=open(p.Logstr,'a'))
### Load irbasis --------------------------------------------------------------
t_bload = time.process_time()
b = ir_load(p.Lambda, p.beta, p.IR_tol)
print("Elapsed time - basis load (tot | module): " \
+ str(time.process_time() - start) + " | " \
+ str(time.process_time() - t_bload), file=open(p.Logstr,'a'))
### Calculate full Greens function --------------------------------------------
t_gcal = time.process_time()
g = gfunction_calc(p,h,b)
#g = gfunction_load(p,b)
print("Elapsed time - g_scf_calc load (tot | module): " \
+ str(time.process_time() - start) + " | " \
+ str(time.process_time() - t_gcal), file=open(p.Logstr,'a'))
### Security convergence check of greens function
if g.tag == 'calc' and p.mode == 'FLEX':
# U convergence
if p.u0 != g.u0_pval:
print("Not calculating eliashberg equation.\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"\
, file=open(p.Logstr,'a'))
print(" " * len(p.err_str_begin) + "=> eliashberg skipped."\
, file=open(p.Logerrstr,'a'))
print("##################################################\n"\
, file=open(p.Logstr,'a'))
#continue
# Sigma convergence
if sum(abs(g.sigma_old-g.sigma))/sum(abs(g.sigma)) > p.g_sfc_tol:
print("Not calculating eliashberg equation.\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"\
, file=open(p.Logstr,'a'))
print(" " * len(p.err_str_begin) + "=> eliashberg skipped."\
, file=open(p.Logerrstr,'a'))
print("##################################################\n"\
, file=open(p.Logstr,'a'))
#continue
# ### Calculate SC parameter --------------------------------------------------
t_eliashberg = time.process_time()
el = eliashberg(g, p, b, h)
print("Elapsed time - Eliashberg calc (tot | module): " \
+ str(time.process_time() - start) + " | " \
+ str(time.process_time() - t_eliashberg), file=open(p.Logstr,'a'))
print("Done: n = {} | T/t = {} ({} K) | J/U = {} | abs(lambda) = {}".format\
(p.n_fill, p.T, round(p.T*(1.5/8.374)*1.16*10**4,2), p.JU_ratio,\
abs(el.result)), file=open(p.Logstr,'a'))
#1.5/8.374 is factor for NaCoO2 model
### Save resulting lambda value -----------------------------------------------
with open(p.SC_EV_path, 'a') as file:
file.write("{} {} {}\n".format(p.T, real(el.result), imag(el.result)))
### Extract quantities along HS path in BZ ------------------------------------
print("Now extract kpath of GF, X_s...", file=open(p.Logstr, 'a'))
kpath_extract(p,h,b,g)
print("Done.", file=open(p.Logstr,'a'))
print("##################################################\n"\
, file=open(p.Logstr,'a'))
|
matrix=[[1,4,7,3],[12,63,43,65],[12,55,22,77]]
for i in range(4):
lst=[]
for row in matrix:
print(row[i])
transposed=[[row[i] for row in matrix] for i in range(4)]
print(transposed)
|
import root_pandas
import pandas as pd
import numpy as np
from nose.tools import raises
def test_simple():
filename = 'tests/samples/simple.root'
reference_df = pd.DataFrame()
reference_df['one'] = np.array([1, 2, 3, 4], dtype=np.int32)
reference_df['two'] = np.array([1.1, 2.2, 3.3, 4.4], dtype=np.float32)
reference_df['three'] = [b'uno', b'dos', b'tres', b'quatro']
df = root_pandas.read_root(filename, key='tree')
assert df.equals(reference_df)
df = root_pandas.read_root(filename)
assert df.equals(reference_df)
@raises(TypeError)
def test_small_evnt_tree_fullsplit():
""" FIXME """
filename = 'tests/samples/small-evnt-tree-fullsplit.root'
root_pandas.read_root(filename, key='tree')
def test_small_flat_tree():
filename = 'tests/samples/small-flat-tree.root'
expected_columns = [
'Int32', 'Int64', 'UInt32', 'UInt64', 'Float32', 'Float64', 'Str',
'ArrayInt32', 'ArrayInt64', 'ArrayUInt32', 'ArrayUInt64',
'ArrayFloat32', 'ArrayFloat64', 'N', 'SliceInt32', 'SliceInt64',
'SliceUInt32', 'SliceUInt64', 'SliceFloat32', 'SliceFloat64'
]
df = root_pandas.read_root(filename, key='tree')
assert set(df.columns) == set(expected_columns), df.columns
|
#!/usr/bin/env python
import argparse
from scapy.all import load_layer
from scapy.sendrecv import AsyncSniffer
from meter.flow_session import generate_session_class
def create_sniffer(input_file, input_interface, output_mode, output_file):
assert (input_file is None) ^ (input_interface is None)
NewFlowSession = generate_session_class(output_mode, output_file)
if input_file is not None:
return AsyncSniffer(offline=input_file, filter='tcp port 443', prn=None, session=NewFlowSession, store=False)
else:
return AsyncSniffer(iface=input_interface, filter='tcp port 443', prn=None,
session=NewFlowSession, store=False)
def main():
parser = argparse.ArgumentParser()
input_group = parser.add_mutually_exclusive_group(required=True)
input_group.add_argument('-n', '--online', '--interface', action='store', dest='input_interface',
help='capture online data from INPUT_INTERFACE')
input_group.add_argument('-f', '--offline', '--file', action='store', dest='input_file',
help='capture offline data from INPUT_FILE')
output_group = parser.add_mutually_exclusive_group(required=True)
output_group.add_argument('-c', '--csv', '--flow', action='store_const', const='flow', dest='output_mode',
help='output flows as csv')
output_group.add_argument('-s', '--json', '--sequence', action='store_const', const='sequence', dest='output_mode',
help='output flow segments as json')
parser.add_argument('output', help='output file name (in flow mode) or directory (in sequence mode)')
args = parser.parse_args()
load_layer('tls')
sniffer = create_sniffer(args.input_file, args.input_interface, args.output_mode, args.output)
sniffer.start()
try:
sniffer.join()
except KeyboardInterrupt:
sniffer.stop()
finally:
sniffer.join()
if __name__ == '__main__':
main()
|
import jsonpatch
import re
import subprocess
import sys
import typer
import yaml
from pathlib import Path
from loguru import logger
app = typer.Typer(add_completion=False)
def _setup_logging(debug):
"""
Setup the log formatter for this script
"""
log_level = "INFO"
if debug:
log_level = "DEBUG"
logger.remove()
logger.add(
sys.stdout,
colorize=True,
format="<level>{message}</level>",
level=log_level,
)
def process_node(hostname: str, domain: str, template: str, config_patches: list) -> dict:
logger.info(f"Processing node {hostname}")
with template.open('r') as fp:
node_template = yaml.safe_load(fp)
node_configpatches = [
{'op': 'add', 'path': '/machine/network/hostname', 'value': f"{hostname}.{domain}"}
]
if config_patches:
node_configpatches = node_configpatches + config_patches
node_configpatches = jsonpatch.JsonPatch(node_configpatches)
result = node_configpatches.apply(node_template)
return result
def _load_variables_from_file(path: Path) -> dict:
file_parts = path.name.split('.')
if file_parts[-2] == 'sops':
logger.info("Detected encrypted variables file, trying to decrypt.")
sops_result = subprocess.run(
[
"sops", "-d", str(path)
],
capture_output=True,
encoding="utf8"
)
if sops_result.returncode != 0:
logger.error("Could not decrypt variables file.")
data = sops_result.stdout
else:
with open(path, 'r') as fh:
data = fh.readlines()
output = yaml.safe_load(data)
return(output)
def parse_variables(input: dict, variable_pattern: str, variables: dict) -> dict:
if isinstance(input, dict):
return {k: parse_variables(v, variable_pattern, variables) for k, v in input.items()}
elif isinstance(input, list):
return [parse_variables(v, variable_pattern, variables) for v in input]
elif isinstance(input, str):
return re.sub(variable_pattern, lambda line: _replace_variable(line, variables), input)
return input
def _replace_variable(variable: re.Match, variables: dict) -> str:
from functools import reduce
variable_path = variable.groups()[0]
try:
env_var_value = reduce(
lambda a, b: a[b], variable_path.split("."), variables
)
except KeyError:
env_var_value = ""
return env_var_value
@app.command()
def main(
cluster_config_file: Path = typer.Argument(
..., help="The YAML file containing the cluster configuration."),
output_folder: Path = typer.Option(
None, help="Folder where the output should be written."),
variables_file: Path = typer.Option(
None, help="File containing variables to load."),
debug: bool = False,
):
_setup_logging(debug)
if not cluster_config_file.is_file():
logger.error(f"Could not find file {str(cluster_config_file)}")
raise typer.Exit()
base_folder = cluster_config_file.parent
if output_folder is None:
output_folder = Path.joinpath(base_folder, "machineConfigs")
if not output_folder.is_dir():
create_output_folder = typer.confirm(
f"Folder '{str(output_folder)}' does not exist. Create it?")
if not create_output_folder:
raise typer.Abort()
output_folder.mkdir()
with cluster_config_file.open('r') as fp:
cluster_config = yaml.safe_load(fp)
logger.info(
f"Generating talos configuration files for cluster '{cluster_config['name']}'")
template_controlplane = Path.joinpath(base_folder, 'controlplane.yaml')
template_worker = Path.joinpath(base_folder, 'worker.yaml')
template_taloscfg = Path.joinpath(base_folder, 'talosconfig')
if not (template_controlplane.exists() and template_worker.exists()):
logger.info(
f"No existing configuration templates found in {str(base_folder)}, generating new ones.")
subprocess.run(
[
"talosctl", "gen", "config",
cluster_config['name'],
cluster_config['controlplane']['endpoint'],
"--output-dir", str(base_folder)
],
stdout=subprocess.DEVNULL
)
template_taloscfg.unlink()
variables = None
if variables_file and variables_file.exists():
variables = _load_variables_from_file(variables_file)
# Render control plane nodes
for node in cluster_config['nodes']:
hostname = node['hostname']
domain = node['domain']
if node.get('controlplane') and node['controlplane']:
template = template_controlplane
config_patches = cluster_config['controlplane'].get(
'configPatches') or []
else:
template = template_worker
config_patches = cluster_config['workers'].get(
'configPatches') or []
config_patches = config_patches + (node.get('configPatches') or [])
result = process_node(hostname, domain, template, config_patches)
if variables:
result = parse_variables(result, r"\$\{(.*)\}", variables)
with Path.joinpath(output_folder, f"{hostname}.yaml") .open('w') as fp:
yaml.safe_dump(result, fp)
# Render worker nodes
logger.info("Done")
if __name__ == "__main__":
app()
|
"""
Copyright: Wenyi Tang 2017-2018
Author: Wenyi Tang
Email: wenyi.tang@intel.com
Created Date: Oct 15th 2018
Extend the pre-Environment module, provide different and extensible
training methodology for SISR, VSR or other image tasks.
"""
# Copyright (c): Wenyi Tang 2017-2019.
# Author: Wenyi Tang
# Email: wenyi.tang@intel.com
# Update Date: 2019/4/3 下午8:28
import csv
import time
from pathlib import Path
import numpy as np
import tensorflow as tf
import tqdm
from ..Util.Config import Config
from ..Util.Utility import to_list
def _make_ckpt_name(name, scale, step):
return '{}-sc{}-ep{:04d}.ckpt'.format(name, scale, step)
def _parse_ckpt_name(name):
# sample name: {model}-sc{scale}-ep{epoch}.ckpt(.index)
if not name:
return 0
model_name, scale, epochs = Path(name).stem.split('.')[0].split('-')
return int(epochs[2:])
def _ensemble_expand(feature):
r0 = feature
r1 = np.rot90(feature, 1, axes=[-3, -2])
r2 = np.rot90(feature, 2, axes=[-3, -2])
r3 = np.rot90(feature, 3, axes=[-3, -2])
r4 = np.flip(feature, axis=-2)
r5 = np.rot90(r4, 1, axes=[-3, -2])
r6 = np.rot90(r4, 2, axes=[-3, -2])
r7 = np.rot90(r4, 3, axes=[-3, -2])
return r0, r1, r2, r3, r4, r5, r6, r7
def _ensemble_reduce_mean(outputs):
results = []
for i in outputs:
outputs_ensemble = [
i[0],
np.rot90(i[1], 3, axes=[-3, -2]),
np.rot90(i[2], 2, axes=[-3, -2]),
np.rot90(i[3], 1, axes=[-3, -2]),
np.flip(i[4], axis=-2),
np.flip(np.rot90(i[5], 3, axes=[-3, -2]), axis=-2),
np.flip(np.rot90(i[6], 2, axes=[-3, -2]), axis=-2),
np.flip(np.rot90(i[7], 1, axes=[-3, -2]), axis=-2),
]
results.append(np.concatenate(outputs_ensemble).mean(axis=0, keepdims=True))
return results
class Trainer:
"""A pure interface trainer.
A trainer provides following APIs:
>>> Trainer.fit
>>> Trainer.infer
>>> Trainer.benchmark
>>> Trainer.export
Args:
model: the SR model object. @see SuperResolution
work_dir: the dir to save training checkpoints and logs
verbose: tf logging level
"""
def __init__(self, model, work_dir, verbose=tf.logging.INFO):
self._m = model
self._saved = Path(work_dir) / 'save'
self._logd = Path(work_dir) / 'log'
self._verb = verbose
self._restored = False
self._csv = verbose <= tf.logging.INFO
def _startup(self):
tf.logging.set_verbosity(self._verb)
self._saved.mkdir(parents=True, exist_ok=True)
self._logd.mkdir(parents=True, exist_ok=True)
if self._csv:
self._csv_file = open(Path(self._logd / 'train_metrics.csv'), 'a')
self._csv_writer = csv.writer(self._csv_file)
if self.model.compiled:
self.graph = tf.get_default_graph()
else:
with tf.Graph().as_default() as g:
self.model.compile()
self.graph = g
def __enter__(self):
"""Create session of tensorflow and build model graph"""
self._startup()
conf = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True))
sess = tf.Session(graph=self.graph, config=conf)
sess.__enter__()
self.savers = self.model.savers
sess.run(tf.global_variables_initializer())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Close session"""
sess = tf.get_default_session()
sess.__exit__(exc_type, exc_val, exc_tb)
def _find_last_ckpt(self):
# restore the latest checkpoint in save dir
ckpt = tf.train.get_checkpoint_state(self._saved)
if ckpt and ckpt.model_checkpoint_path:
return tf.train.latest_checkpoint(self._saved)
# try another way
ckpt = to_list(self._saved.glob('*.ckpt.index'))
# sort as modification time
ckpt = sorted(ckpt, key=lambda x: x.stat().st_mtime_ns)
return self._saved / ckpt[-1].stem if ckpt else None
def _restore_model(self, sess):
last_checkpoint_step = 0
for name in self.savers:
saver = self.savers.get(name)
ckpt = to_list(self._saved.glob('{}*.index'.format(name)))
if ckpt:
ckpt = sorted(ckpt, key=lambda x: x.stat().st_mtime_ns)
ckpt = self._saved / ckpt[-1].stem
try:
saver.restore(sess, str(ckpt))
except tf.errors.NotFoundError:
tf.logging.warning(
'{} of model {} could not be restored'.format(
name, self.model.name))
last_checkpoint_step = _parse_ckpt_name(ckpt)
return last_checkpoint_step
def _save_model(self, sess, step):
for name in self.savers:
saver = self.savers.get(name)
file = self._saved / _make_ckpt_name(name, self.model.scale[0], step)
saver.save(sess, str(file))
def _restore(self):
# restore graph
sess = tf.get_default_session()
if sess is None:
raise RuntimeError('No session initialized')
if self._restored:
return sess
self.last_epoch = self._restore_model(sess)
self._restored = True
return sess
def export(self, export_dir='.', freeze_model=False):
"""Export model as protobuf
Args:
export_dir: directory to save the exported model
freeze_model: freeze all trainable variables
"""
self._restore()
if freeze_model:
self.model.export_freeze_model(export_dir)
else:
self.model.export_saved_model(export_dir)
def set_seed(self, seed):
np.random.seed(seed)
tf.set_random_seed(seed)
def fit(self, *args, **kwargs):
raise NotImplementedError
def infer(self, *args, **kwargs):
raise NotImplementedError
def benchmark(self, *args, **kwargs):
raise NotImplementedError
@property
def model(self):
return self._m
class VSR(Trainer):
"""Default trainer for task SISR or VSR"""
v = Config() # local variables
"""=======================================
components, sub-functions, helpers
=======================================
"""
def query_config(self, config, **kwargs) -> Config:
assert isinstance(config, Config)
config.update(kwargs) # override parameters
self.v.epoch = config.epoch # current epoch
self.v.epochs = config.epochs # total epochs
self.v.lr = config.lr # learning rate
self.v.lr_schedule = config.lr_schedule
self.v.memory_limit = config.memory_limit
self.v.feature_callbacks = config.feature_callbacks or []
self.v.label_callbacks = config.label_callbacks or []
self.v.output_callbacks = config.output_callbacks or []
self.v.validate_every_n_epoch = config.validate_every_n_epoch or 1
self.v.subdir = config.subdir
self.v.random_val = config.random_val
self.v.ensemble = config.ensemble
return self.v
def fit_init(self) -> bool:
v = self.v
v.sess = self._restore()
if self.last_epoch >= v.epochs:
return False
tf.logging.info('Fitting: {}'.format(self.model.name.upper()))
self.model.display()
v.summary_writer = tf.summary.FileWriter(
str(self._logd), graph=tf.get_default_graph())
v.global_step = self.model.global_steps.eval()
return True
def fit_close(self):
# flush all pending summaries to disk
if self.v.summary_writer:
self.v.summary_writer.close()
if self._csv:
self._csv_file.close()
def fn_train_each_epoch(self):
v = self.v
mem = v.memory_limit
train_iter = v.train_loader.make_one_shot_iterator(mem, shuffle=True)
if hasattr(v.train_loader, 'prefetch'):
v.train_loader.prefetch(v.memory_limit)
date = time.strftime('%Y-%m-%d %T', time.localtime())
v.avg_meas = {}
if v.lr_schedule and callable(v.lr_schedule):
v.lr = v.lr_schedule(steps=v.global_step)
print('| {} | Epoch: {}/{} | LR: {:.2g} |'.format(
date, v.epoch, v.epochs, v.lr))
with tqdm.tqdm(train_iter, unit='batch', ascii=True) as r:
for items in r:
label, feature, name, post = items[:4]
self.fn_train_each_step(label, feature, name, post)
r.set_postfix(v.loss)
for _k, _v in v.avg_meas.items():
print('| Epoch average {} = {:.6f} |'.format(_k, np.mean(_v)))
if self._csv:
if self._csv_file.tell() == 0:
self._csv_writer.writerow(v.avg_meas.keys())
self._csv_writer.writerow([np.mean(s) for s in v.avg_meas.values()])
self._csv_file.flush()
if v.epoch % v.validate_every_n_epoch == 0:
self.benchmark(v.val_loader, v, epoch=v.epoch, memory_limit='1GB')
v.summary_writer.add_summary(self.model.summary(), v.global_step)
self._save_model(v.sess, v.epoch)
def fn_train_each_step(self, label=None, feature=None, name=None, post=None):
v = self.v
for fn in v.feature_callbacks:
feature = fn(feature, name=name)
for fn in v.label_callbacks:
label = fn(label, name=name)
loss = self.model.train_batch(feature, label, learning_rate=v.lr,
epochs=v.epoch)
v.global_step = self.model.global_steps.eval()
for _k, _v in loss.items():
v.avg_meas[_k] = \
v.avg_meas[_k] + [_v] if v.avg_meas.get(_k) else [_v]
loss[_k] = '{:08.5f}'.format(_v)
v.loss = loss
def fn_infer_each_step(self, label=None, feature=None, name=None, post=None):
v = self.v
origin_feat = feature
for fn in v.feature_callbacks:
feature = fn(feature, name=name)
if v.ensemble:
# add self-ensemble boosting metric score
feature_ensemble = _ensemble_expand(feature)
outputs_ensemble = []
for f in feature_ensemble:
y, _ = self.model.test_batch(f, None)
outputs_ensemble.append(y)
outputs = []
for i in range(len(outputs_ensemble[0])):
outputs.append([j[i] for j in outputs_ensemble])
outputs = _ensemble_reduce_mean(outputs)
else:
outputs, _ = self.model.test_batch(feature, None)
for fn in v.output_callbacks:
outputs = fn(outputs, input=origin_feat, name=name,
subdir=v.subdir, mode=v.color_format)
def fn_benchmark_each_step(self, label=None, feature=None, name=None,
post=None):
v = self.v
origin_feat = feature
for fn in v.feature_callbacks:
feature = fn(feature, name=name)
for fn in v.label_callbacks:
label = fn(label, name=name)
outputs, metrics = self.model.test_batch(feature, label, epochs=v.epoch)
for _k, _v in metrics.items():
if _k not in v.mean_metrics:
v.mean_metrics[_k] = []
v.mean_metrics[_k] += [_v]
for fn in v.output_callbacks:
outputs = fn(outputs, input=origin_feat, label=label, name=name,
mode=v.color_format, subdir=v.subdir)
def fn_benchmark_body(self):
v = self.v
it = v.loader.make_one_shot_iterator(v.memory_limit, shuffle=v.random_val)
for items in tqdm.tqdm(it, 'Test', ascii=True):
label, feature, name, post = items[:4]
self.fn_benchmark_each_step(label, feature, name, post)
"""=======================================
Interface: fit, benchmark, infer
=======================================
"""
def fit(self, loaders, config, **kwargs):
"""Fit the model.
Args:
loaders: a tuple of 2 loaders, the 1st one is used for training,
and the 2nd one is used for validating.
config: fitting configuration, an instance of `Util.Config.Config`
kwargs: additional arguments to override the same ones in config.
"""
v = self.query_config(config, **kwargs)
v.train_loader, v.val_loader = loaders
if not self.fit_init():
return
for epoch in range(self.last_epoch + 1, v.epochs + 1):
v.epoch = epoch
self.fn_train_each_epoch()
self.fit_close()
def infer(self, loader, config, **kwargs):
"""Infer SR images.
Args:
loader: a loader for enumerating LR images
config: inferring configuration, an instance of `Util.Config.Config`
kwargs: additional arguments to override the same ones in config.
"""
v = self.query_config(config, **kwargs)
v.color_format = loader.color_format
self._restore()
it = loader.make_one_shot_iterator()
if len(it):
tf.logging.info('Inferring {} at epoch {}'.format(
self.model.name, self.last_epoch))
else:
return
# use original images in inferring
for items in tqdm.tqdm(it, 'Infer', ascii=True):
feature = items[0]
name = items[2]
self.fn_infer_each_step(None, feature, name)
def benchmark(self, loader, config, **kwargs):
"""Benchmark/validate the model.
Args:
loader: a loader for enumerating LR images
config: benchmark configuration, an instance of `Util.Config.Config`
kwargs: additional arguments to override the same ones in config.
"""
v = self.query_config(config, **kwargs)
v.color_format = loader.color_format
self._restore()
v.mean_metrics = {}
v.loader = loader
self.fn_benchmark_body()
for _k, _v in v.mean_metrics.items():
print('{}: {:.6f}'.format(_k, np.mean(_v)), end=', ')
print('')
|
from flask import render_template, request, redirect, url_for,abort
from app.main import main
from flask_login import login_required, current_user
from app.main.forms import UpdateProfile, PitchForm, CommentForm
from .. import db,photos
from app.models import User, Pitch, Comment, ProfilePhoto
@main.route('/')
def index():
pitches = Pitch.query.all()
return render_template('index.html', pitches=pitches)
@main.route('/pitches/businesspitches')
def business():
pitches = Pitch.get_pitches('business')
return render_template("business.html", pitches=pitches)
@main.route('/pitches/interviewpitches')
def interview():
pitches = Pitch.get_pitches('interview')
return render_template("interview.html", pitches=pitches)
@main.route('/pitches/productpitches')
def product():
pitches = Pitch.get_pitches('product')
return render_template("product.html", pitches=pitches)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/pitch/new', methods=['GET', 'POST'])
@login_required
def pitch():
"""
View pitch function that returns the pitch page and data
"""
pitch_form = PitchForm()
if pitch_form.validate_on_submit():
title = pitch_form.title.data
description = pitch_form.description.data
category = pitch_form.category.data
user_id = current_user._get_current_object().id
new_pitch = Pitch(title=title, description=description, category=category, user_id=user_id)
new_pitch.save_pitch()
return redirect(url_for('main.index'))
title = 'New Pitch'
return render_template('pitch.html', title=title, pitch_form = pitch_form)
@main.route('/comment/<int:id>', methods=['GET', 'POST'])
@login_required
def comment(id):
form = CommentForm()
pitch = Pitch.query.get(id)
user = User.query.all()
comments = Comment.query.filter_by(pitches_id=id).all()
if form.validate_on_submit():
comment = form.comment.data
user_id = current_user._get_current_object().id
new_comment = Comment(
comment=comment,
user_id=user_id,
pitches_id=pitch.id
)
new_comment.save_comment()
new_comments = [new_comment]
print(new_comments)
return redirect(url_for('.index'))
return render_template('comment.html', form=form, comments=comments, user=user)
@main.route('/like/<int:post_id>/<action>')
@login_required
def like_action(post_id, action):
post = Pitch.query.filter_by(id=post_id).first_or_404()
if action == 'like':
current_user.like_post(post)
db.session.commit()
if action == 'unlike':
current_user.unlike_post(post)
db.session.commit()
return redirect(request.referrer)
|
# -*- coding: utf-8 -*-
"""
test_utils.py
Copyright 2017 CodeRatchet
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
"""
from scrapytest.utils import find_first, merge_dict
def test_find_first_returns_none_on_condition_not_found():
assert find_first({'foo': 'bar', 'baz': 'spam'}, lambda x, y: False) is None
def test_merge_dict_sees_correct_values():
a = {'first': {'all_rows': {'pass': 'dog', 'number': '1'}}}
b = {'first': {'all_rows': {'fail': 'cat', 'number': '5'}}}
assert merge_dict(b, a) == {'first': {'all_rows': {'pass': 'dog', 'fail': 'cat', 'number': '5'}}}
|
from ._celery import app as celery_app
__all__ = ["celery_app"]
|
import sys, socket, os, re, time
from index import Index
THREADS = 32 # num of threads processing the queue
def process_file_name_and_text(path):
with open(path) as f:
return path.replace("/","\\"), f.read()
def load_directories(path):
docs = [path]
while docs:
top = docs.pop()
if os.path.isdir(top):
for i in os.listdir(top):
abs_path = os.path.join(top, i)
docs.append(abs_path)
elif top.endswith('.txt'):
try:
yield process_file_name_and_text(top) # generators
except:
pass
def create_dictionary_index(path):
index = Index()
docs_from_load = load_directories(path)
start_time = time.perf_counter()
index.bulk_index(docs_from_load, threads=THREADS)
print("%s lasts to process 2000 files with %s threads" % (time.perf_counter() - start_time, THREADS))
return index
def main():
print(sys.argv)
if len(sys.argv) == 2:
path = sys.argv[1]
print('Inverted index would be created on data from folder %s' % path)
index = create_dictionary_index(path)
# local - console
# print('Enter word to get top 10 documents that contains it')
# while True:
# try:
# token = input('\nInput: ')
# print(index.get_documents_containing_word(token, count=10, text_=True))
# except KeyboardInterrupt:
# break
# socket solution - https://habr.com/ru/post/149077/
print('Sent info from client to get top documents contains the word')
while True:
sock = socket.socket()
sock.bind(('', 9090)) # on port 9090
sock.listen(10)
conn, addr = sock.accept()
while True:
try:
data = conn.recv(1024)
except: # connection closed
break
if not data:
break
client_res_list = data.decode().replace("'","").strip(')(').split(', ')
word = client_res_list[0]
count = int(client_res_list[1])
if client_res_list[2].lower() == "y":
send_text = True
else:
send_text = False
index_res = index.get_documents_containing_word(word, count, text_=send_text)
print(str(len(index_res)) + " Docs was sent to " + str(addr))
conn.send(str(index_res).encode())
conn.close()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 3/28/2018 2:27 PM
# @Author : sunyonghai
# @File : simple_argparse.py.py
# @Software: ZJ_AI
# =========================================================
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--name", required=True, help="name of the user")
args = vars(ap.parse_args())
# display a friendly message to the user
print("Hi there {}, it's nice to meet you!".format(args["name"])) |
from collections import deque
class Solution:
def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:
# build graph
graph, indeg = {i:[] for i in range(numCourses)}, {i:0 for i in range(numCourses)}
for end, start in prerequisites:
graph[start].append(end)
indeg[end] += 1
# output topological ordering
queue = deque([node for node, deg in indeg.items() if deg == 0])
visited = set() # or use set(queue)
while queue:
node = queue.popleft()
visited.add(node)
for neighbor in graph[node]:
if neighbor in visited: # then a circle found
return False
indeg[neighbor] -= 1
if indeg[neighbor] == 0:
queue.append(neighbor)
return len(visited) == numCourses |
import datetime
import unittest
from pikax import params, ClientException, APIUserError
from pikax import settings
from pikax.api.androidclient import AndroidAPIClient
class AndroidClientTest(unittest.TestCase):
def setUp(self) -> None:
self.client = AndroidAPIClient(settings.username, settings.password)
self.user = self.client.visits(user_id=38088)
def test_following(self):
ids = self.client.followings(user_id=18526689, limit=10)
self.assertEqual(len(ids), 10)
def test_search(self):
ids = self.client.search(keyword='arknights', limit=7)
self.assertEqual(len(ids), 7)
def test_bookmark(self):
ids = self.client.bookmarks(limit=15)
self.assertEqual(len(ids), 15)
def test_manga(self):
# TODO: add some mangas into the account
ids = self.client.mangas(limit=0)
self.assertEqual(len(ids), 0)
def test_search2(self):
ids = self.client.search(keyword='arknights', limit=23, sort=params.Sort.DATE_DESC,
search_type=params.SearchType.ILLUST_OR_MANGA,
match=params.Match.PARTIAL,
search_range=params.Range.A_YEAR)
self.assertEqual(len(ids), 23)
def test_rank_rookie(self):
ids = self.client.rank(rank_type=params.RankType.ROOKIE, date=datetime.date.today(),
content=params.Content.MANGA, limit=19)
self.assertEqual(len(ids), 19)
def test_user_illust(self):
ids = self.user.illusts(limit=45)
self.assertEqual(len(ids), 45)
def test_user_manga(self):
ids = self.user.mangas(limit=2)
self.assertEqual(len(ids), 2)
def test_following_invalid_id(self):
with self.assertRaises(ClientException):
self.client.followings(user_id=0, limit=123)
def test_visits_invalid_id(self):
with self.assertRaises(APIUserError):
self.client.visits(user_id=0)
def test_account(self):
# default account's account name
self.assertEqual(self.client.account, 'crawl_user')
def test_name(self):
# default account name is crawler
self.assertEqual(self.client.name, 'crawler')
def test_id(self):
# default account's id
self.assertEqual(self.client.id, '41689219')
if __name__ == '__main__':
unittest.main()
|
import numpy as np
from common.constant import *
from utils.data_load import normalize
class ClusteringAlgorithm():
""" 聚类算法 """
def __init__(self, X : np.ndarray, cluster_num : int):
self.X = X
self.c = cluster_num
self.n = X.shape[MatrixShapeIndex.column]
self.feature_num = X.shape[MatrixShapeIndex.row]
def __get_initial_U__(self) -> np.ndarray:
U = np.random.rand(self.c, self.n)
U = normalize(U)
return U
def __Update_U__(self, V : np.ndarray, U : np.ndarray) -> np.ndarray:
# U for data input
pass
def __Update_V__(self, U : np.ndarray) -> np.ndarray:
pass
def iteration(self, iter_num = 200, quit_epsilon = epsilon):
import time
start = time.clock()
random_choice = np.random.randint(0, self.n, self.c)
initial_V = self.X[:, random_choice]
V = initial_V.copy()
U = self.__get_initial_U__()
for t in range(iter_num):
U_save = U.copy()
U = self.__Update_U__(V, U)
U_f = Frobenius(U, U_save)
if U_f < quit_epsilon:
break
V_save = V.copy()
V = self.__Update_V__(U)
V_f = Frobenius(V, V_save)
if V_f < quit_epsilon:
break
elapsed = (time.clock() - start)
return {
'U' : U,
'V' : V,
't' : t,
'U_f' : U_f,
'V_f' : V_f,
'use_time' : elapsed
}
class Semi_SupervisedClusteringAlgorithm(ClusteringAlgorithm):
""" 半监督聚类算法 """
def inputSemi_SupervisedInformaintion(self):
pass
class Semi_SupervisedWithPrioriKnowledgeClusteringAlgorithm(Semi_SupervisedClusteringAlgorithm):
""" 以先验知识半监督的聚类算法 """
def __init__(self, X : np.ndarray, cluster_num : int):
super(Semi_SupervisedWithPrioriKnowledgeClusteringAlgorithm, self).__init__(X, cluster_num)
self.gave_tilde_U = False
self.tilde_U = np.zeros((self.c, self.n))
self.initial_V = np.random.rand(self.feature_num, self.c)
def inputSemi_SupervisedInformaintion(self, tilde_U : np.ndarray, initial_V : np.ndarray):
assert tilde_U.shape[MatrixShapeIndex.row] == self.c and \
tilde_U.shape[MatrixShapeIndex.column] == self.n, '先验隶属度矩阵大小不匹配!'
assert initial_V.shape[MatrixShapeIndex.column] == self.c and \
initial_V.shape[MatrixShapeIndex.row] == self.feature_num, '初始聚类中心矩阵大小不匹配!'
self.tilde_U = tilde_U.copy()
self.initial_V = initial_V
self.gave_tilde_U = True
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from sedona.core.geom.envelope import Envelope
from tests.test_base import TestBase
class TestGeomPrimitives(TestBase):
def test_jvm_envelope(self):
envelope = Envelope(0.0, 5.0, 0.0, 5.0)
jvm_instance = envelope.create_jvm_instance(self.spark.sparkContext._jvm)
envelope_area = jvm_instance.getArea()
assert envelope_area == 25.0, f"Expected area to be equal 25 but {envelope_area} was found"
|
import re
import unicodedata
from django.contrib.auth.models import PermissionsMixin
from django.core import validators
from django.utils.deconstruct import deconstructible
from django.core import exceptions
from django.utils.translation import gettext_lazy as _
from taggit.managers import TaggableManager
from taggit.models import TagBase, CommonGenericTaggedItemBase
from .enums import *
@deconstructible
class ASCIIUsernameValidator(validators.RegexValidator):
regex = r'^[\w.][\w.-]*[\w-]\Z'
message = _(
'Enter a valid username. This value must be longer than 2 characters and may contain only English letters, '
'numbers, and ./-/_ characters but cannot start with a "-" or end with a ".".'
)
flags = re.ASCII
def validate_username_for_gitlab(value: str):
if value.endswith('.git') or value.endswith('.atom'):
raise exceptions.ValidationError(_('Enter a valid username. This value cannot end in ".git" or ".atom".'))
class AccountTag(TagBase):
"""
Used for tags like "test" or "beta tester" for now.
"""
class Meta:
verbose_name = _("Account Tag")
verbose_name_plural = _("Account Tags")
class TaggedAccount(CommonGenericTaggedItemBase):
object_id = models.CharField(max_length=50, verbose_name=_("object ID"), db_index=True)
tag = models.ForeignKey(AccountTag, models.CASCADE, related_name="%(app_label)s_%(class)s_items")
class User(PermissionsMixin):
fusion_user_id = models.CharField(max_length=50, primary_key=True)
gitlab_user_id = models.PositiveIntegerField(unique=True, null=True, blank=True)
discourse_user_id = models.PositiveIntegerField(unique=True, null=True, blank=True)
username = models.CharField(
_('username'),
max_length=55,
unique=True,
help_text=_('Required. 2 to 55 characters. Letters, digits and ./-/_ only.'),
validators=[ASCIIUsernameValidator(), validate_username_for_gitlab],
error_messages={
'unique': _("A user with that username already exists!")
}
)
email = models.EmailField(_('email'), unique=True)
full_name = models.CharField(_('full name'), max_length=100)
tags = TaggableManager(through=TaggedAccount, blank=True)
date_joined = models.DateTimeField(_('date joined'), auto_now_add=True, editable=False)
last_login = models.DateTimeField(_('last login'), blank=True, null=True, editable=False)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into the admin site.')
)
is_active = models.BooleanField(
_('active'),
default=False,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
)
)
is_verified = models.BooleanField(default=False, help_text=_("Designates whether this user is a verified person."))
REQUIRED_FIELDS = []
EMAIL_FIELD = 'email'
USERNAME_FIELD = 'username'
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_short_name(self):
"""Return the short name for the user."""
return self.full_name
def get_full_name(self):
"""Return the first_name."""
return self.get_short_name()
def get_username(self):
"""Return the username for this User."""
return getattr(self, self.USERNAME_FIELD)
def clean(self):
setattr(self, self.USERNAME_FIELD, self.normalize_username(self.get_username()))
def natural_key(self):
return self.get_username(),
@property
def is_anonymous(self):
"""
Always return False. This is a way of comparing User objects to
anonymous users.
"""
return False
@property
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
@classmethod
def get_email_field_name(cls):
try:
return cls.EMAIL_FIELD
except AttributeError:
return 'email'
@classmethod
def normalize_username(cls, username):
return unicodedata.normalize('NFKC', username) if isinstance(username, str) else username
def check_password(self, raw_password):
return False
class Team(models.Model):
creator = models.ForeignKey(User, models.CASCADE, related_name='teams_created')
name = models.CharField(max_length=100, blank=True)
individual = models.BooleanField(default=True)
members = models.ManyToManyField(User, through='Member', related_name='teams')
date_created = models.DateTimeField(_('date created'), auto_now_add=True, editable=False)
class Member(models.Model):
user = models.ForeignKey(User, models.CASCADE)
team = models.ForeignKey(Team, models.CASCADE)
access_level = models.PositiveSmallIntegerField(choices=MemberAccessLevel.choices, default=MemberAccessLevel.MEMBER)
status = models.PositiveSmallIntegerField(choices=MembershipStatus.choices, default=MembershipStatus.PENDING)
date_joined = models.DateTimeField(auto_now_add=True)
|
import unittest
from datetime import timedelta
import simplekv.memory
from flask import Flask
from flask_jwt_extended.config import get_access_expires, get_refresh_expires, \
get_algorithm, get_blacklist_enabled, get_blacklist_store, \
get_blacklist_checks, get_auth_header
from flask_jwt_extended import JWTManager
class TestEndpoints(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.secret_key = 'super=secret'
JWTManager(self.app)
self.client = self.app.test_client()
def test_default_configs(self):
with self.app.test_request_context():
self.assertEqual(get_access_expires(), timedelta(minutes=15))
self.assertEqual(get_refresh_expires(), timedelta(days=30))
self.assertEqual(get_algorithm(), 'HS256')
self.assertEqual(get_blacklist_enabled(), False)
self.assertEqual(get_blacklist_store(), None)
self.assertEqual(get_blacklist_checks(), 'refresh')
self.assertEqual(get_auth_header(), 'Bearer')
def test_override_configs(self):
self.app.config['JWT_ACCESS_TOKEN_EXPIRES'] = timedelta(minutes=5)
self.app.config['JWT_REFRESH_TOKEN_EXPIRES'] = timedelta(days=7)
self.app.config['JWT_ALGORITHM'] = 'HS512'
self.app.config['JWT_BLACKLIST_ENABLED'] = True
self.app.config['JWT_BLACKLIST_STORE'] = simplekv.memory.DictStore()
self.app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = 'all'
self.app.config['JWT_AUTH_HEADER'] = 'JWT'
with self.app.test_request_context():
self.assertEqual(get_access_expires(), timedelta(minutes=5))
self.assertEqual(get_refresh_expires(), timedelta(days=7))
self.assertEqual(get_algorithm(), 'HS512')
self.assertEqual(get_blacklist_enabled(), True)
self.assertIsInstance(get_blacklist_store(), simplekv.memory.DictStore)
self.assertEqual(get_blacklist_checks(), 'all')
self.assertEqual(get_auth_header(), 'JWT')
|
#!/usr/bin/env python
# coding=utf-8
"""Unit tests for pyiosxr, a module to interact with Cisco devices running IOS-XR."""
import os
import sys
import time
import unittest
from lxml import etree as ET
from six import binary_type
# ~~~ import pyIOSXR modules ~~~
from napalm.pyIOSXR import IOSXR
# exceptions
from napalm.pyIOSXR.exceptions import LockError
from napalm.pyIOSXR.exceptions import UnlockError
from napalm.pyIOSXR.exceptions import XMLCLIError
from napalm.pyIOSXR.exceptions import CommitError
from napalm.pyIOSXR.exceptions import TimeoutError
from napalm.pyIOSXR.exceptions import IteratorIDError
from napalm.pyIOSXR.exceptions import InvalidInputError
from napalm.pyIOSXR.exceptions import InvalidXMLResponse
class _MockedNetMikoDevice(object):
"""
Defines the minimum attributes necessary to mock a SSH connection using netmiko.
"""
def __init__(self):
class _MockedParamikoTransport(object):
def close(self):
pass
self.remote_conn = _MockedParamikoTransport()
@staticmethod
def get_mock_file(command, format="xml"):
filename = (
command.replace(
'<?xml version="1.0" encoding="UTF-8"?><Request MajorVersion="1" MinorVersion="0">',
"",
)
.replace("</Request>", "")
.replace("<", "")
.replace(">", "_")
.replace("/", "")
.replace("\n", "")
.replace(".", "_")
.replace(" ", "_")
.replace('"', "_")
.replace("=", "_")
.replace("$", "")
.replace(":", "")
.replace("!", "")[:150]
)
curr_dir = os.path.dirname(os.path.abspath(__file__))
filename = "{filename}.{fmt}".format(filename=filename, fmt=format)
fullpath = os.path.join(curr_dir, "mock", filename)
with open(fullpath) as file_data:
return file_data.read()
def find_prompt(self):
return self.get_mock_file("\n", format="txt")
def send_command(
self,
command_string,
expect_string="",
read_timeout=10,
strip_prompt=True,
strip_command=True,
):
return self.get_mock_file(command_string)
def send_command_timing(self, command_string, **kvargs):
return self.get_mock_file(command_string)
def receive_data_generator(self):
return ["", ""] # to have an iteration inside private method _netmiko_recv
def send_command_expect(
self,
command_string,
expect_string=None,
delay_factor=0.2,
max_loops=500,
auto_find_prompt=True,
strip_prompt=True,
strip_command=True,
):
# for the moment returns the output from send_command only
# this may change in time
return self.send_command(command_string)
class _MockedIOSXRDevice(IOSXR):
"""
Overrides only the very basic methods from the main device driver, that cannot be mocked.
"""
def open(self):
self.device = _MockedNetMikoDevice()
self._cli_prompt = self.device.find_prompt()
self._enter_xml_mode()
def is_alive(self):
return True
class TestIOSXRDevice(unittest.TestCase):
"""
Tests IOS-XR basic functions.
"""
HOSTNAME = "localhost"
USERNAME = "vagrant"
PASSWORD = "vagrant"
PORT = 12205
TIMEOUT = 0.1 # for tests, smaller values are prefferred
LOCK = False
LOG = sys.stdout
MOCK = True
def __repr__(self):
return (
"Connected as {user}@{host}:{port}, timeout is {tout}".format(
user=self.USERNAME,
host=self.HOSTNAME,
port=self.PORT,
tout=self.TIMEOUT,
)
if not self.MOCK
else "Simulates device behaviour using mocked data."
)
__str__ = __repr__
@classmethod
def setUpClass(cls):
"""
Opens the connection with the IOS-XR device.
"""
if cls.MOCK:
__cls = _MockedIOSXRDevice
else:
__cls = IOSXR
cls.device = __cls(
cls.HOSTNAME,
cls.USERNAME,
cls.PASSWORD,
port=cls.PORT,
lock=cls.LOCK,
logfile=cls.LOG,
timeout=cls.TIMEOUT,
)
cls.device.open()
@classmethod
def tearDownClass(cls):
"""
Closes the connection with the device.
"""
cls.device.close()
def test_mock_lock_connection_open(self):
if self.MOCK:
self.device.lock_on_connect = True
# because there's one single mock file
# and it is already used for the lock test
# will tesst if raises LockError on connect
self.assertRaises(LockError, self.device.lock)
self.device.lock_on_connect = False
# enough to see that will try to lock during connect
def test_mock_close(self):
"""Testing if unlocking when connection is closed"""
if self.MOCK:
self.device.locked = True
self.device.close()
self.assertFalse(self.device.locked, msg="Cannot unlock the DB.")
def test_execute_rpc_method(self):
"""Testing private method _execute_rpc"""
self.assertIsInstance(
self.device._execute_rpc(
"<Get><Configuration><NTP></NTP></Configuration></Get>"
),
ET._Element,
msg="Privat emethod _execute_rpc did not return a valid XML object.",
)
def test__getttr__show_(self):
"""Testing special attribute __getattr___ against valid show command"""
self.assertIsInstance(
self.device.show_ntp_ass(),
str,
"Special attribute __getattr___ did not return a valid string.",
)
def test__getttr__show_args(self):
"""Testing special attribute __getattr___ against valid show command with arguments"""
self.assertIsInstance(self.device.show_ntp("ass"), str)
def test_acquire_xml_agent(self):
"""Testing if able to acquire the XML agent."""
self.device._lock_xml_agent(time.time())
self.assertTrue(self.device._xml_agent_locker.locked())
self.device._unlock_xml_agent()
def test_acquire_locked_agent_raises_timeout_error(self):
"""Testing if trying to acquire the XML agent while locked raises TimeoutError."""
self.device._lock_xml_agent(time.time()) # acquiring
self.assertRaises(
TimeoutError,
self.device._lock_xml_agent, # trying to acquire again
time.time(),
)
self.device._unlock_xml_agent() # releasing back
def test_release_xml_agent(self):
"""Testing releasing of XML agent."""
self.device._lock_xml_agent(time.time())
self.assertTrue(self.device._xml_agent_locker.locked())
self.device._unlock_xml_agent()
self.assertFalse(self.device._xml_agent_locker.locked())
def test_in_cli_mode(self):
"""Testing the private method _in_cli_mode."""
self.assertTrue(self.device._in_cli_mode())
def test__getattr_show_config(self):
"""Testing special attribute __getattr___ against valid show config command"""
self.assertIsInstance(self.device.show_run_ntp(config=True), str)
def test__getattr__no_show(self):
"""Test special attribute __getattr__ agains a no-show command"""
raised = False
try:
self.device.configure_exclusive()
except AttributeError:
raised = True
self.assertTrue(raised)
def test_make_rpc_call_returns_XML(self):
"""Test if public method make_rpc_call returns str"""
self.assertIsInstance(
self.device.make_rpc_call(
"<Get><Configuration><NTP></NTP></Configuration></Get>"
),
binary_type,
)
def test_acquired_xml_agent(self):
"""
Testing if raises TimeoutError if the XML agent is alredy acquired and released when
exception thrown
"""
self.device._lock_xml_agent(time.time()) # acquiring the XML agent
self.assertRaises(
TimeoutError,
self.device.make_rpc_call,
"<Get><Operational><SystemTime/><PlatformInventory/></Operational></Get>",
)
self.assertFalse(
self.device._xml_agent_locker.locked()
) # Exception raised => xml agent released
def test_try_to_read_till_timeout(self):
"""Testing if will try to read from the device till time out"""
if self.MOCK:
# hard to reproduce without mock data
# as this event is not deterministic
self.assertRaises(
TimeoutError, self.device.make_rpc_call, "<This/><Does/><Not/><Exist/>"
)
def test_multiple_read_attempts_till_timeout(self):
"""Testing if will try to read non-empty replies from the device till time out"""
if self.MOCK:
# hard to reproduce without mock data
# as this event is not deterministic
self.assertRaises(
TimeoutError, self.device.make_rpc_call, "<Empty/><Reply/>"
)
def test_iterator_id_raises_IteratorIDError(self):
"""Testing if reply containing the IteratorID attribute raises IteratorIDError"""
self.device.load_candidate_config(config="xml agent tty iteration on size 1")
# minimum iteration size
self.device.commit_config(comment="pyIOSXR-test_xml-agent-iteration-on")
# turning on iteration
# and a very small value
# requesting something that we know for sure will be a big output
self.assertRaises(
IteratorIDError,
self.device.make_rpc_call,
"<Get><Operational><IPV4Network></IPV4Network></Operational></Get>",
)
self.device.rollback()
# going to prev state
def test_channel_acquired_enter_xml_mode(self):
"""Test if not raises ConnectError when the channel is busy with other requests"""
self.device._lock_xml_agent()
self.assertIsNone(self.device._enter_xml_mode())
def test_truncated_response_raises_InvalidXMLResponse(self):
"""Testing if truncated XML reply raises InvalidXMLResponse"""
if self.MOCK:
# hard to reproduce without mock data
# as this event is not deterministic
self.assertRaises(
InvalidXMLResponse,
self.device._execute_rpc,
"<Get><Configuration><Fake/></Configuration></Get>",
)
def test_iosxr_bug_0x44318c06(self):
"""Tests if IOS-XR bug returns error 0x44318c06 and raise XMLCLIError"""
if self.MOCK:
# hard to reproduce this without mock data
# as this event is not deterministic
self.assertRaises(
XMLCLIError,
self.device._execute_config_show,
"show commit changes diff",
)
def test_empty_reply_raises_TimeoutError(self):
"""Testing if empty reply raises TimeoutError"""
if self.MOCK:
# hard to reproduce this without mock data
# as this event is not deterministic
self.assertRaises(TimeoutError, self.device._execute_rpc, "<Empty/>")
def test_multiple_requests_raise_0xa3679e00(self):
"""Testing if simultaneuous requests trigger XMLCLIError"""
if self.MOCK:
self.assertRaises(
XMLCLIError,
self.device._execute_rpc,
"<Get><Operational><ARP></ARP></Operational></Get>",
)
else:
# must create a multithreading and send a couple of simultaneous requests to the device
pass
def test_execute_show(self):
"""Testing private method _execute_show"""
self.assertIsInstance(self.device._execute_show("show ntp ass"), str)
def test_execute_invalid_show_raises_InvalidInputError(self):
"""Testing if invalid show command raises InvalidInputError"""
self.assertRaises(InvalidInputError, self.device._execute_show, "sh fake")
def test_execute_config_show(self):
"""Testing private method _execute_config_show"""
self.assertIsInstance(self.device._execute_config_show("show run ntp"), str)
def test_execute_invalid_config_show_raises_InvalidInputError(self):
"""Testing if invalid config show command raises InvalidInputError"""
self.assertRaises(
InvalidInputError, self.device._execute_config_show, "sh run fake"
)
def test_lock_raises_LockError(self):
"""Tests if DB already locked raises LockError"""
if self.MOCK:
self.assertRaises(LockError, self.device.lock)
self.assertFalse(self.device.locked)
else:
self.device.unlock() # make sure the config is not locked
same_device = IOSXR(
self.HOSTNAME,
self.USERNAME,
self.PASSWORD,
port=self.PORT,
lock=self.LOCK,
logfile=self.LOG,
timeout=self.TIMEOUT,
)
same_device.open()
same_device.lock()
# the other instance locks the config DB
try:
# trying to acquire the config DB
self.device.lock()
except LockError:
self.assertFalse(self.device.locked)
else:
self.assertTrue(self.device.locked)
same_device.close()
def test_unlock(self):
"""Testing unlock feature"""
if self.MOCK:
self.device.lock = True # make sure it is locked
self.device.unlock()
self.assertFalse(self.device.locked)
else:
# make sure this process acquires the config DB
self.device.lock()
try:
self.device.unlock()
except UnlockError:
# still locked
self.assertTrue(self.device.locked)
else:
# not locked anymore
self.assertFalse(self.device.locked)
def _load_dummy_config(self):
"""Helper that loads some dummy data before committing."""
config = """
ntp peer 172.17.17.1
"""
return self.device.load_candidate_config(config=config)
def test_load_invalid_config_raises_InvalidInputError(self):
"""Testing if loading config with mistakes raises InvalidInputError"""
self.assertRaises(
InvalidInputError,
self.device.load_candidate_config,
config="ntp beer 256.257.258.259",
)
self.device.discard_config()
def test_load_candidate_config_file(self):
"""Testing loading candidate config from file"""
self.assertIsNone(
self.device.load_candidate_config(
filename=os.path.join(
os.path.dirname(os.path.abspath(__file__)), "mock", "good.cfg"
)
)
)
def test_load_invalid_candidate_config_file_raises_InvalidInputError(self):
"""Testing if loading invalid config from a file raises InvalidInputError"""
self.assertRaises(
InvalidInputError,
self.device.load_candidate_config,
filename=os.path.join(
os.path.dirname(os.path.abspath(__file__)), "mock", "bad.cfg"
),
)
def test_load_config(self):
"""Testing if able to load candidate config, then check commit diff and discard changes"""
self._load_dummy_config()
self.assertIsInstance(
self.device.get_candidate_config(),
str,
msg="Unable to retrieve the candidate config",
)
self.assertIsInstance(
self.device.get_candidate_config(merge=True),
str,
msg="Unable to retrieve merge candidate config",
)
self.assertIsInstance(
self.device.get_candidate_config(formal=True),
str,
msg="Unable to retrieve formal candidate config",
)
compare_result = self.device.compare_config()
self.assertIsInstance(
compare_result, str, msg="Unable to compare running and candidate config"
)
# test if the result is string
self.assertGreater(len(compare_result), 0, msg="No config changes applied.")
# test if len > 0
# discarding config
self.device.discard_config()
if not self.MOCK:
# will get the same mock file as above
self.assertEqual(
len(self.device.compare_config()), 0, msg="Unable to discard changes"
)
def test_commit_config(self):
"""Testing commit config"""
self._load_dummy_config()
self.assertIsNone(self.device.commit_config())
self.device.rollback()
def test_commit_config_message(self):
"""Testing commit config with comment message"""
self._load_dummy_config()
self.assertIsNone(self.device.commit_config(comment="good"))
self.device.rollback()
def test_commit_config_label(self):
"""Testing commit config with label"""
self._load_dummy_config()
self.assertIsNone(self.device.commit_config(label="test"))
self.device.rollback()
def test_commit_config_confirmed(self):
"""Testing commit confirmed"""
self._load_dummy_config()
self.assertIsNone(self.device.commit_config(confirmed=60))
self.device.rollback()
def test_commit_config_confirmed_raise_InvalidInputError(self):
"""Testing if incorrect value for confirm time raises InvalidInputError"""
self.assertRaises(InvalidInputError, self.device.commit_config, confirmed=1)
def test_commit_empty_buffer_raises(self):
"""Testing if trying to commit empty changes raises CommitError"""
self.assertRaises(CommitError, self.device.commit_config, comment="empty")
def test_commit_after_other_session_commit(self):
"""Testing if trying to commit after another process commited does not raise CommitError"""
if self.MOCK:
# mock data contains the error message we are looking for
self.assertIsNone(self.device.commit_config(comment="parallel"))
else:
# to test this will neet to apply changes to the same device
# through a different SSH session
same_device = IOSXR(
self.HOSTNAME,
self.USERNAME,
self.PASSWORD,
port=self.PORT,
lock=self.LOCK,
logfile=self.LOG,
timeout=self.TIMEOUT,
)
same_device.open()
# loading something
same_device.load_candidate_config(
config="interface MgmtEth0/RP0/CPU0/0 description testing parallel commits"
)
# committing
same_device.commit_config(comment="pyIOSXR-test_parallel_commits")
# trying to load something from the test instance
self.device.load_candidate_config(
config="interface MgmtEth0/RP0/CPU0/0 description this wont work"
)
# and will fail because of the commit above
self.assertIsNone(self.device.commit_config(comment="parallel"))
# let's rollback the committed changes
same_device.rollback()
# and close the auxiliary connection
same_device.close()
# because this error was raised
self.device.close()
self.device.open()
def _prefetch_running_config_and_append(self):
"""Helper method to be used in the config-replace tests below"""
running_config = "".join(self.device.show_run().splitlines(1)[3:])
self.device.load_candidate_config(config=running_config)
self.device.load_candidate_config(config="ntp server 8.8.8.8")
def test_compare_replace_config(self):
"""Testing compare replace config"""
self._prefetch_running_config_and_append()
self.assertIsInstance(self.device.compare_replace_config(), str)
def test_commit_replace_config(self):
"""Testing commit replace config"""
self._prefetch_running_config_and_append()
self.assertIsNone(self.device.commit_replace_config())
def test_commit_replace_config_message(self):
"""Testing commit replace config with comment message"""
self._prefetch_running_config_and_append()
self.assertIsNone(self.device.commit_replace_config(comment="good"))
def test_commit_replace_config_label(self):
"""Testing commit replace config with label"""
self._prefetch_running_config_and_append()
self.assertIsNone(self.device.commit_replace_config(label="test"))
def test_commit_replace_config_confirmed(self):
"""Testing commit replace confirmed"""
self._prefetch_running_config_and_append()
self.assertIsNone(self.device.commit_replace_config(confirmed=60))
def test_commit_replace_config_confirmed_raise_InvalidInputError(self):
"""Testing if incorrect value for confirmed replace commit time raises InvalidInputError"""
self.assertRaises(
InvalidInputError, self.device.commit_replace_config, confirmed=500
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import sys
# Parse MAINTAINERS file
maintainers = {}
module_template = {'maintainers': [],
'status': '',
'files': [],
}
with open('MAINTAINERS', 'r') as maintainers_file:
for line in maintainers_file:
if line.startswith('=='):
module_name = line[3:-4]
maintainers[module_name] = copy.deepcopy(module_template)
elif line.startswith('M:'):
maintainer_name = line[3:]
maintainers[module_name]['maintainers'] = maintainer_name
elif line.startswith('S:'):
status = line[3:]
maintainers[module_name]['status'] = status
elif line.startswith('F:'):
filename = line[3:-1]
maintainers[module_name]['files'].append(filename)
# Check that all files in the tree are covered in MAINTAINERS
return_value = 0
def find_directory(directory):
for module, values in maintainers.items():
if (directory + '/') in values['files']:
return
print('Directory %s not found in MAINTAINERS' % directory)
global return_value
return_value = 1
def find_file(filename):
for module, values in maintainers.items():
if filename in values['files']:
return
print('File %s not found in MAINTAINERS' % filename)
global return_value
return_value = 1
def check_directory(path):
skipped_entries = ['__init__.py', 'deprecated', '__pycache__']
for i in os.listdir(path):
if i.endswith('.pyc') or i in skipped_entries:
continue
if os.path.isdir(os.path.join(path, i)):
find_directory(i)
elif os.path.isfile(os.path.join(path, i)):
find_file(i)
check_directory('openstack/common')
sys.exit(return_value)
|
from typing import Dict, List
import json
def assert_properties_unset(metadata: Dict, must_not_be_set: Dict):
for (key, must_not_value) in must_not_be_set.items():
if key in metadata:
value = metadata[key]
if isinstance(must_not_value, dict):
assert_properties_unset(value, must_not_value)
elif isinstance(must_not_value, list):
raise Exception("Not configured to compare lists.")
else:
# It's a primitive. Shouldn't be set!
raise Exception(f"Found key '{key}' with value '{value}'.")
def assert_properties_set(metadata: Dict, must_be_set: Dict):
for (key, must_value) in must_be_set.items():
if key not in metadata:
raise Exception(f"Key '{key}' could not be found.")
value = metadata[key]
if isinstance(must_value, dict):
assert_properties_set(value, must_value)
elif isinstance(must_value, list):
_assert_all_items_set_in_list(value, must_value, key)
elif value != must_value:
raise Exception(f"Key '{key}' value is '{value}' but must be '{must_value}'.")
def _dict_item_set_in_list(must_item: Dict, values_list: List[Dict]) -> bool:
for item in values_list:
# noinspection PyBroadException
try:
assert_properties_set(item, must_item)
return True
except:
pass
return False
def _assert_all_items_set_in_list(values_list: List, must_values_list: List, key):
for must_item in must_values_list:
if isinstance(must_item, dict):
if _dict_item_set_in_list(must_item, values_list):
continue
else:
raise Exception(
f"Could not find matching item in Key '{key}' for expected item '{json.dumps(must_item)}'")
elif must_item in values_list:
continue
else:
raise Exception(
f"Could not find matching item in Key '{key}' for expected item '{must_item}'")
|
from stack_and_queue import __version__
import pytest
from stack_and_queue.stack_and_queue import Stack , Queue
def test_version():
assert __version__ == '0.1.0'
def test_push_onto_a_stack():
node = Stack()
node.push(1)
excepted =1
actual = node.top.data
assert excepted == actual
def test_push_multiple_values_onto_a_stack():
node = Stack()
node.push(1)
node.push(2)
excepted =2
actual = node.top.data
assert excepted == actual
def test_pop_off_the_stack():
node = Stack()
node.push(1)
node.push(2)
node.pop()
excepted =1
actual = node.top.data
assert excepted == actual
def test_empty_a_stack_after_multiple_pops():
node = Stack()
node.push(1)
node.push(2)
node.pop()
node.pop()
excepted =True
actual = node.is_empty()
assert excepted == actual
def test_peek_the_next_item_on_the_stack():
node = Stack()
node.push(1)
node.push(2)
excepted =2
actual = node.peek()
assert excepted == actual
def test_instantiate_an_empty_stack():
node = Stack()
assert node.is_empty()
def test_Calling_pop_or_peek_on_empty_stack_raises_exception():
node = Stack()
try:
node.pop()
except Exception as e:
assert str(e) == "empty stack"
def test_enqueue_into_a_queue():
node = Queue()
node.enqueue(1)
excepted =1 , 1
actual = node.rear.data , node.front.data
assert excepted == actual
def test_enqueue_multiple_values_into_a_queue():
node = Queue()
node.enqueue(1)
node.enqueue(2)
excepted =2 , 1
actual = node.rear.data , node.front.data
assert excepted == actual
def test_dequeue_out_of_a_queue_the_expected_value():
node = Queue()
node.enqueue(1)
node.enqueue(2)
excepted =1
actual = node.dequeue()
assert excepted == actual
def test_peek_into_a_queue_seeing_the_expected_value():
node = Queue()
node.enqueue(2)
excepted =2
actual = node.peek()
assert excepted == actual
def test_empty_a_queue_after_multiple_dequeues():
node = Queue()
node.enqueue(1)
node.enqueue(2)
node.dequeue()
node.dequeue()
excepted = True
actual = node.is_empty()
assert excepted == actual
def test_instantiate_an_empty_queue():
node = Queue()
assert node.is_empty()
def test_Calling_dequeue_or_peek_on_empty_queue_raises_exception():
node = Queue()
try:
node.peek()
except Exception as e:
assert str(e) == 'empty queue'
|
"""SCons.Sig.MD5
The MD5 signature package for the SCons software construction
utility.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import imp
import string
# Force Python to load the builtin "md5" module. If we do this with a
# normal import statement, then case-insensitive systems (Windows) get
# confused and thinks there's a case mismatch with *this* MD5.py module.
file, name, desc = imp.find_module('md5')
try:
md5 = imp.load_module('md5', file, name, desc)
finally:
if file:
file.close()
def current(new, old):
"""Return whether a new signature is up-to-date with
respect to an old signature.
"""
return new == old
try:
md5.new('').hexdigest
except AttributeError:
# The md5 objects created by the module have no native hexdigest()
# method (*cough* 1.5.2 *cough*) so provide an equivalent.
class new_md5:
def __init__(self, s):
self.m = md5.new(str(s))
#def copy(self):
# return self.m.copy()
def digest(self):
return self.m.digest()
def hexdigest(self):
h = string.hexdigits
r = ''
for c in self.m.digest():
i = ord(c)
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
return r
def update(self, s):
return self.m.update(s)
else:
new_md5 = lambda s: md5.new(str(s))
def collect(signatures):
"""
Collect a list of signatures into an aggregate signature.
signatures - a list of signatures
returns - the aggregate signature
"""
if len(signatures) == 1:
return signatures[0]
else:
return new_md5(string.join(signatures, ', ')).hexdigest()
def signature(obj):
"""Generate a signature for an object
"""
try:
gc = obj.get_contents
except AttributeError:
raise AttributeError, "unable to fetch contents of '%s'" % str(obj)
return new_md5(gc()).hexdigest()
def to_string(signature):
"""Convert a signature to a string"""
return signature
def from_string(string):
"""Convert a string to a signature"""
return string
|
# Generated by Django 2.0.5 on 2018-09-29 13:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('repository', '0006_auto_20180927_2213'),
]
operations = [
migrations.AlterField(
model_name='article',
name='article_classification',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='classification', to='repository.Classification', verbose_name='个人博客文章分类'),
),
migrations.AlterField(
model_name='article',
name='blog',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='article', to='repository.Blog', verbose_name='所属博客'),
),
migrations.AlterField(
model_name='article_like_dislike',
name='article',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='repository.Article'),
),
migrations.AlterField(
model_name='article_like_dislike',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='repository.UserInfo'),
),
migrations.AlterField(
model_name='classification',
name='blog',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='classification', to='repository.Blog', verbose_name='所属博客'),
),
migrations.AlterField(
model_name='comment',
name='article',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comment', to='repository.Article', verbose_name='评论文章'),
),
migrations.AlterField(
model_name='comment',
name='parent_id',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='repository.Comment'),
),
migrations.AlterField(
model_name='userinfo',
name='nickname',
field=models.CharField(max_length=32, unique=True, verbose_name='昵称'),
),
]
|
from google_trans_new import google_translator
from progressbar import progressbar
from random import shuffle, randint
LANGUAGES = ['af', 'sq', 'am', 'ar', 'hy', 'az', 'eu', 'be', 'bn', 'bs', 'bg', 'ca', 'ceb', 'ny', 'zh-cn', 'zh-tw', 'co', 'hr', 'cs', 'da', 'nl', 'en', 'eo', 'et', 'tl', 'fi', 'fr', 'fy', 'gl', 'ka', 'de', 'el', 'gu', 'ht', 'ha', 'haw', 'iw', 'he', 'hi', 'hmn', 'hu', 'is', 'ig', 'id', 'ga', 'it', 'ja', 'jw', 'kn', 'kk', 'km', 'ko', 'ku', 'ky', 'lo', 'la', 'lv', 'lt', 'lb', 'mk', 'mg', 'ms', 'ml', 'mt', 'mi', 'mr', 'mn', 'my', 'ne', 'no', 'or', 'ps', 'fa', 'pl', 'pt', 'pa', 'ro', 'ru', 'sm', 'gd', 'sr', 'st', 'sn', 'sd', 'si', 'sk', 'sl', 'so', 'es', 'su', 'sw', 'sv', 'tg', 'ta', 'te', 'th', 'tr', 'uk', 'ur', 'ug', 'uz', 'vi', 'cy', 'xh', 'yi', 'yo', 'zu']
with open('in.txt') as f:
txt = f.read()
lang_num = int(input(f'Enter the number of languages you would like to translate your text to before returning to your main language(there are up to {len(LANGUAGES)} languages): '))
translator = google_translator()
src_lang = translator.detect(txt)[0]
is_right_src = input(f'Detected source language is "{src_lang}". Is it true? [Y/n] ').lower()
if is_right_src != 'y' and is_right_src != '':
src_lang = input('Enter input lang in short format("en" for english): ').lower()
langs = LANGUAGES.copy()
shuffle(langs)
langs = [src_lang, *langs[:lang_num], src_lang]
lang_to_txt, src_txt = {}, txt
for i in progressbar(range(len(langs) - 1)):
txt = translator.translate(txt, lang_src=langs[i], lang_tgt=langs[i + 1])
lang_to_txt[langs[i + 1]] = txt
print('The result is: ', end='\n\n')
print(txt, end='\n\n')
list_langs = lambda: print('Your text have made it through this: \n' + ' -> '.join(langs), end='\n\n')
list_langs()
lang = src_lang
while True:
print('Enter the language from the list above, you\'d like to view text in.')
info_lang = input('(\'src\' to view the meaning in source language): ')
if info_lang == 'src':
txt = translator.translate(txt, lang_src=lang, lang_tgt=src_lang)
print(f'\n{txt}', end='\n\n')
elif (txt := lang_to_txt.get(info_lang)) is not None:
print(f'\n{txt}', end='\n\n')
else:
print('\n!!!Ooops, there are no such language, try again!!!', end='\n\n')
list_langs()
lang = info_lang
|
from setuptools import setup
setup(
name="Flask-WTF",
install_requires=["Flask", "WTForms", "itsdangerous"],
extras_require={"email": ["email-validator"]},
)
|
#!/usr/bin/python3
from asyncore import write
from fileinput import close
import math
import subprocess
import pandas as pd
import os
import sys
from optparse import OptionParser
from bio_processing import bio_processing
#option_parser
optParser = OptionParser()
print("check file path: ")
print(os.path.dirname(os.path.abspath('__file__')))
#testsubprocess
project_name="TEST"
input_dir=os.path.dirname(os.path.abspath('__file__'))
print(os.listdir(input_dir))
workdir_name=project_name + "_workdir"
print(f"Checking the input directory:{input_dir}")
subprocess.run(["cd", input_dir])
subprocess.run(["mkdir", workdir_name])
subprocess.run(["ls", "-l"])
#wright_nameList
input_dir=os.path.dirname(os.path.abspath('__file__'))
print(input_dir)
print(os.listdir(input_dir))
with open(input_dir + "/test.list", "w") as f:
for i in (os.listdir(input_dir)):
name = str(i)
f.write(name + "\n")
a=bio_processing("TEST",12, '/Users/gaoyang/Documents/COMMENbT/python_learningNote', 3)
a.process_test()
a.fileRead()
#run_PathoFact
# def run_pathofact(self)
# environment_PathoFact=
# subprocess.run(["conda", "activate", environment_PathoFact])
# subprocess.run(["snakemake", "-s", "Snakefile", "--cores", "20", "--reason" "-p" "--use-conda"]) |
data['2012'].mean().plot(kind='bar') |
# -*- coding: utf-8 -*-
'''
ขอขอบคุณ คุณ Korakot Chaovavanich สำหรับโค้ด word_frequency จาก https://www.facebook.com/photo.php?fbid=363640477387469&set=gm.434330506948445&type=3&permPage=1
'''
from __future__ import absolute_import,division,unicode_literals,print_function
import re
import requests
import os
import codecs
def get_word_frequency_all():
'''
get_word_frequency_all()
เป็นคำสั่งสำหรับดึงข้อมูล word frequency ของ TTC มาใช้งาน
โดยแสดงผลเป็น [(word,frequency),...]
ข้อมูลจาก https://raw.githubusercontent.com/korakot/thainlp/master/ttc_freq.txt
'''
url="https://raw.githubusercontent.com/korakot/thainlp/master/ttc_freq.txt"
path = os.path.join(os.path.expanduser("~"), 'pythainlp-data')#os.path.join(, 'pthainlp_trie.data')
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, 'ttc_freq.txt')
if not os.path.exists(path):
response = requests.get(url)
with open(path, 'wb') as f:
f.write(response.content)
f.close()
with codecs.open(path, 'r',encoding='utf8') as f:
lines = f.read().splitlines()
f.close()
listword=[]
for x in lines:
listindata=x.split(" ")
listword.append((listindata[0],listindata[1]))
return listword |
# ---------------------------------------------------------------------
# Vendor: GWD (GW Delight Technology Co., Ltd) http://www.gwdelight.com
# OS: GFA
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "GWD.GFA"
pattern_more = [(rb"Press any key to continue", b" ")]
pattern_syntax_error = rb"% Unknown command.|% Command incomplete."
pattern_unprivileged_prompt = rb"^\S+?>"
pattern_prompt = rb"^\S+\(config\)?#"
command_super = b"enable"
command_disable_pager = "screen lines 0"
def convert_interface_name(self, interface):
if interface.startswith("eth") or interface.startswith("pon"):
interface = interface[3:]
return interface
|
# Generated by Django 2.1.5 on 2019-03-27 18:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gsndb', '0003_auto_20190326_2144'),
]
operations = [
migrations.RenameField(
model_name='student',
old_name='birth_date',
new_name='birthdate',
),
migrations.RenameField(
model_name='student',
old_name='program',
new_name='current_program',
),
]
|
from setuptools import setup, find_packages
from glob import glob
from distutils.extension import Extension
# from Cython.Distutils import build_ext
from os.path import pathsep
import numpy as np
try:
from Cython.Build import cythonize
except ImportError:
cythonize = False
# Cython extensions
ext = '.pyx' if cythonize else '.c'
ext_cpp = '.pyx' if cythonize else '.cpp'
extensions = [
Extension('dstrf.opt', [f'dstrf/opt{ext}']),
Extension('dstrf.dsyevh3C.dsyevh3py', [f'dstrf/dsyevh3C/dsyevh3py{ext_cpp}'], include_dirs=['dsyevh3C']),
]
if cythonize:
extensions = cythonize(extensions)
setup(
name="dstrf",
description="MEG/EEG source localization tool",
long_description='add-on module to eelbrain for neural TRF estimation'
'GitHub: https://github.com/proloyd/DstRF',
version="0.3dev",
python_requires='>=3.6',
install_requires=[
'eelbrain',
],
# metadata for upload to PyPI
author="Proloy DAS",
author_email="proloy@umd.com",
license="apache 2.0",
# cmdclass={'build_ext': build_ext},
include_dirs=[np.get_include()],
packages=find_packages(),
ext_modules=extensions,
url='https://github.com/proloyd/DstRF',
project_urls={
"Source Code": "https://github.com/proloyd/DstRF/archive/0.2.tar.gz",
}
)
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.anovaglm import H2OANOVAGLMEstimator
# Simple test to check correct NA handling skip.
def testFrameTransform():
train = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate_complete.csv.zip"))
y = 'CAPSULE'
x = ['AGE','VOL','DCAPS']
train[10,2] = None
train[20,7] = None
train[y] = train[y].asfactor()
# build model choosing skip
model1 = H2OANOVAGLMEstimator(family='binomial', lambda_=0, missing_values_handling="skip")
model1.train(x=x, y=y, training_frame=train)
# build model deleting the two rows with missing values
train.drop([10, 20], axis=0)
model2 = H2OANOVAGLMEstimator(family='binomial', lambda_=0, missing_values_handling="skip")
model2.train(x=x, y=y, training_frame=train)
# the two models should be the same, compare the model summaries
summary1 = model1._model_json['output']['model_summary']
summary2 = model2._model_json['output']['model_summary']
pyunit_utils.assert_H2OTwoDimTable_equal_upto(summary1, summary2, summary1.col_header)
if __name__ == "__main__":
pyunit_utils.standalone_test(testFrameTransform)
else:
testFrameTransform()
|
from django.urls import path
from .views import ProjectListView, ProjectDetailView
app_name = 'upload'
urlpatterns = [
path('', ProjectListView.as_view(), name='project-list'),
path('projects/<slug:slug>/', ProjectDetailView.as_view(), name='project-detail'),
] |
import maya.cmds as cmds
from functools import partial
# Creates a connection between the 2 selected objects with a reverse node to drive to values in opposite directions of each other based on one attr.
# This is mostly used for having a single attr drive the 2 weights on a constraint with 2 sources in opposite of each other.
# CRA FEATURE: going forward, add the ability to have 3 or even more inputs driven by the single output.
# CRA FEATURE: Add check boxes and filters for filtering the attr lists better.
# To use, select the driver, then the driven, then run the tool.
textScrollList = None
menuDriven0 = None
menuDriven1 = None
def ReverseAttrHookupUI(*args):
print "UI"
selection = cmds.ls(sl=True)
if len(selection) != 2:
cmds.confirmDialog(icon = "warning!!", title = "Reverse Attr Hookup Tool", message = "You must select only 2 objects. Driver then Driven.")
return
else:
driver = selection[0]
driven = selection[1]
attrsListDriver = cmds.listAttr(driver, o=True, c=True, u=True, s=True, k=False)
attrsListDriven = cmds.listAttr(driven, o=True, c=True, u=True, s=True, k=True)
if cmds.window("ReverseAttrHookupWin", exists = True):
cmds.deleteUI("ReverseAttrHookupWin")
window = cmds.window("ReverseAttrHookupWin", w=4300, h=500, title="Reverse Attr Hookup Tool", mxb=True, mnb=True, sizeable=False, rtf=True)
mainLayout = cmds.columnLayout(w = 300, h = 400, rs = 5, co = ["both", 5])
cmds.text(l=driver)
textScrollList = cmds.textScrollList("artProjCharacterList", w = 290, h = 300, parent = mainLayout, ra=True, ann="This is the attribute that will drive the Driven attributes listed below.")
cmds.textScrollList(textScrollList, e=True, a=attrsListDriver)
menuDriven0 = cmds.optionMenuGrp(l=driven+" 0", cw=[1, 175], ann="This attribute will be driven as a direct connection from the driver. When the driver is 1, this input will be 1. When the driver is 0, this input will be 0.")
for attr in attrsListDriven:
cmds.menuItem(l=attr)
menuDriven1 = cmds.optionMenuGrp(l=driven+" 1", cw=[1, 175], ann="This attribute will be driven by the driver through a reverse node. When the driver is 1, this input will be 0. When the driver is 0, this input will be 1.")
for attr in attrsListDriven:
cmds.menuItem(l=attr)
buttonUpdate = cmds.button(w = 290, h = 40, label = "Update", c=ReverseAttrHookupUI, ann = "Refresh the UI.", parent = mainLayout)
buttonConnect = cmds.button(w = 290, h = 40, label = "Connect", c=partial(ReverseAttrHookup, driver, driven, textScrollList, menuDriven0, menuDriven1), ann = "Connect the Selected Attrs.", parent = mainLayout)
cmds.showWindow(window)
# This script hooks up the attrs
def ReverseAttrHookup(driver, driven, textScrollList, menuDriven0, menuDriven1, *args):
print "SCRIPT"
driverAttr = cmds.textScrollList(textScrollList, q=True, si=True)[0]
drivenAttr0 = cmds.optionMenuGrp(menuDriven0, q=True, v=True)
drivenAttr1 = cmds.optionMenuGrp(menuDriven1, q=True, v=True)
print driverAttr
print drivenAttr0
print drivenAttr1
reverseNode = cmds.shadingNode("reverse", asUtility=True, name=driver+"_reverse")
cmds.connectAttr(driver+"."+driverAttr, driven+"."+drivenAttr0, f=True)
cmds.connectAttr(driver+"."+driverAttr, reverseNode+".inputX", f=True)
cmds.connectAttr(reverseNode+".outputX", driven+"."+drivenAttr1, f=True)
|
import unittest
import pytest
import numpy as np
from pathlib import Path
from rearrangement.datasets_cfg import SYNTHETIC_DATASETS, REAL_DATASETS, DS_DIR
from rearrangement.dataset.lmdb import LMDBDataset
from rearrangement.dataset.statedataset import StateDataset
from rearrangement.dataset.real_scene_dataset import RealSceneDataset
ROOT = Path('rearrangement/test')
SAMPLE_ROOT = Path('rearrangement/test/datasets_samples/')
SAMPLE_ROOT.mkdir(exist_ok=True)
class TestDataset(unittest.TestCase):
@pytest.mark.train
def test_synthetic_datasets(self):
for ds in SYNTHETIC_DATASETS:
p = Path(DS_DIR) / ds
scene_ds = LMDBDataset(p)
ds = StateDataset(scene_ds)
for _ in range(5):
ds[next(iter(ds.make_sampler()))]
@pytest.mark.eval
def test_real_datasets(self):
for ds in REAL_DATASETS[:1]:
p = Path(DS_DIR) / ds
scene_ds = RealSceneDataset(p)
ds = StateDataset(scene_ds, epoch_size=len(scene_ds))
for _ in range(5):
ds[np.random.randint(len(ds))]
|
import json
def tojsonsingle(result, file_path="./data.json"):
data = {}
for i in range(len(result['text'])):
data[i] = result['text'][i]['predicted_labels']
with open(file_path, 'w', encoding='utf-8') as outfile:
json.dump(data, outfile, ensure_ascii=False)
def tojsondual(result1, result2, file_path="./data.json"):
data = {}
for i in range(len(result1['text'])):
if result1['text'][i]['confidence_score'] >= result2['text'][i]['confidence_score']:
data[i] = result1['text'][i]['predicted_labels']
else:
data[i] = result2['text'][i]['predicted_labels']
with open(file_path, 'w', encoding='utf-8') as outfile:
json.dump(data, outfile, ensure_ascii=False)
def tojsontrio(result1, result2, result3, file_path="./data.json"):
data = {}
for i in range(len(result1['text'])):
if result1['text'][i]['confidence_score'] >= result2['text'][i]['confidence_score']:
maxresult = result1
else:
maxresult = result2
if maxresult['text'][i]['confidence_score'] >= result3['text'][i]['confidence_score']:
data[i] = maxresult['text'][i]['predicted_labels']
else:
data[i] = result3['text'][i]['predicted_labels']
with open(file_path, 'w', encoding='utf-8') as outfile:
json.dump(data, outfile, ensure_ascii=False)
|
import twitter_mention_frequency, twitter_get_user_timeline
import NLP_stats_repackaged, Doc_to_Vec_Refactored
import network_analysis, network_crawler
import Cluster_Analysis_NLP_with_Word_Clouds
import twitter_make_geojson, twitter_make_map
from nltk import FreqDist
import sys
from subprocess import call
from os import path
import time
#Assuming you're starting with a profile that actually exists...
first_handle = sys.argv[1] #Entering the Twitter handle of person you want to start with.
network_neighbors = int(sys.argv[2]) #The number of people to compare with per node at each level
levels = int(sys.argv[3]) #The degrees of separation of how far one would want to get
def downloading_json(handle): #This function checks if the .jsonl file has been downloaded to the directory.
file_name = 'user_timeline_' + handle + '.jsonl' #Converts the handle to the filename of the existing json file.
for i in range(2):
if path.exists(file_name) == False:
time.sleep(6) #Sleep statements are necessary insertions here. If the program isn't given enough time to download a given json file, downloads will keep restarting until the end of the loop taking much longer.
twitter_get_user_timeline.getting_timeline(handle) #Downloads the json file if it doesn't exist in the directory
continue
else:
break
return file_name #Gives the full username...
def getting_file_names(most_mentioned_list):
f_name_list = [] #List of json files in the closests neighbors...
for i in range(network_neighbors): #The function checks for the most mentioned neighbors specified by network_neighbors parameter.
try:
f_name_list.append('user_timeline_' + most_mentioned_list[i][0] + '.jsonl')
downloading_json(most_mentioned_list[i][0]) #Wait for program to verify that the requisite .jsonl files associated with the user has been downloaded first before proceeding
except Exception as e: #Adding this exception in case any file does not end up downloading, prevents crashing of the program.
print(e)
continue
return f_name_list
if __name__ == '__main__':
json_filenames = network_crawler.network_crawler(first_handle, levels)
json_filenames = json_filenames[len(json_filenames)-1]
doc_model = Doc_to_Vec_Refactored.doc_similarity(json_filenames)
cluster_list, cluster_coordinates, graph_figure, ax = network_analysis.network_building(json_filenames, doc_model, first_handle)
geo_list = Cluster_Analysis_NLP_with_Word_Clouds.frequency_analysis(cluster_list, cluster_coordinates, graph_figure, ax, first_handle)
larger_coordinates_list, larger_text_list, larger_name_list, geo_list = twitter_make_geojson.main_geo_creator(geo_list)
twitter_make_map.make_map(larger_coordinates_list, larger_text_list, larger_name_list, geo_list, first_handle)
print(cluster_list)
|
import sys
if len(sys.argv) != 4:
print("must enter 3 values")
exit(1)
cost = float(sys.argv[1])
tip = float(sys.argv[2])
tax = float(sys.argv[3])
tipPercent = (cost * tip) / 100
taxPercent = (cost * tax) / 100
totalCost = cost + tipPercent + taxPercent
l = totalCost
m = (totalCost * 100) - (1 * 100)
if m >= 50:
l = l + 1
print(f'The total meal cost is {round(l,2)} dollars.')
|
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.postgres.search import SearchVector
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.views.generic import View
from django.views.generic.detail import (DetailView, SingleObjectMixin,
SingleObjectTemplateResponseMixin)
from django.views.generic.edit import BaseUpdateView, CreateView, DeleteView
from django.views.generic.list import ListView
from timetracker.activities.forms import ActivityFilterForm, ActivityForm
from timetracker.activities.models import Activity
class ActivityQuerySetMixin:
"""
Constraint queryset to return activities only created by the
current user.
"""
model = Activity
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
class ActivitySingleObjectMixin(ActivityQuerySetMixin, SingleObjectMixin):
"""
Get SingleObjectMixin with constraints of ActivityQuerysetMixin.
"""
pass
class ActivityCreateView(LoginRequiredMixin, CreateView):
"""
Create a new activity.
"""
form_class = ActivityForm
template_name = 'activities/activity_create.html'
success_url = reverse_lazy('activities:list')
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({'user': self.request.user})
return kwargs
def form_valid(self, form):
response = super().form_valid(form)
messages.success(self.request, _('Successfuly created an activity.'))
return response
class ActivityListView(LoginRequiredMixin, ActivityQuerySetMixin, ListView):
"""
List and serach the activities.
"""
def get_queryset(self):
start_of_range = self.filter_form.cleaned_data.get('start_date')
end_of_range = self.filter_form.cleaned_data.get('end_date')
qs = super().get_queryset()
if start_of_range and end_of_range:
if start_of_range > end_of_range:
start_of_range, end_of_range = end_of_range, start_of_range
start_of_range = timezone.datetime.combine(
start_of_range, timezone.datetime.min.time())
end_of_range = timezone.datetime.combine(
end_of_range, timezone.datetime.max.time())
qs &= super().get_queryset().filter(
start_datetime__gte=start_of_range,
start_datetime__lte=end_of_range)
search_query = self.filter_form.cleaned_data['search_query']
if search_query:
# Search query using Postgres full-text search.
qs &= super().get_queryset().annotate(
search=SearchVector(
'activity', 'project',
'description'), ).filter(search=search_query)
return qs
def get_filter_form(self):
"""
Get a form object of the filters.
"""
filter_form_data = self.request.GET.copy()
filter_form_data.setdefault(
'start_date',
timezone.now().date() - timezone.timedelta(days=7))
filter_form_data.setdefault('end_date', timezone.now().date())
return ActivityFilterForm(filter_form_data)
def get(self, request, *args, **kwargs):
# Initialise filter form on the get request.
self.filter_form = self.get_filter_form()
self.filter_form.is_valid()
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Add filter form to the template context so it can be
# rendered on the index template.
context['filter_form'] = self.filter_form
return context
class ActivityDetailView(LoginRequiredMixin, ActivitySingleObjectMixin,
DetailView):
"""
Display information about singular activity.
"""
pass
class ActivityUpdateView(LoginRequiredMixin, ActivitySingleObjectMixin,
SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
Update an existing activity.
"""
form_class = ActivityForm
template_name = 'activities/activity_update.html'
def form_valid(self, form):
response = super().form_valid(form)
messages.success(self.request, _('Successfully updated an activity.'))
return response
class ActivityStopView(ActivitySingleObjectMixin, View):
"""
Stop an active activity.
"""
def post(self, *args, **kwargs):
obj = self.get_object()
obj.stop()
messages.success(self.request, _('Successfully stopped an activity.'))
return redirect('activities:list')
class ActivityDeleteView(ActivitySingleObjectMixin, DeleteView):
"""
Delete an activity object.
"""
success_url = reverse_lazy('activities:list')
def delete(self, request, *args, **kwargs):
response = super().delete(request, *args, **kwargs)
messages.success(self.request, _('Successfully deleted an activity'))
return response
|
from pytest import fixture
@fixture(scope="module")
def add_end_user_advisory(context, api_test_client):
api_test_client.parties.add_eua_query()
context.end_user_advisory_id = api_test_client.context["end_user_advisory_id"]
context.end_user_advisory_name = api_test_client.context["end_user_advisory_name"]
|
import numpy as np
import os
from random import Random
import glob
import sys
import imageio
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
"""
This code is inspired from
HW2 of https://cs330.stanford.edu/
"""
def get_images(paths, labels, random, nb_samples=None, shuffle=True):
"""
Takes a set of character folders and labels and returns paths to image files
paired with labels.
Args:
paths: A list of character folders
labels: List or numpy array of same length as paths
nb_samples: Number of images to retrieve per character
Returns:
List of (label, image_path) tuples
"""
if nb_samples is not None:
sampler = lambda x: random.sample(x, nb_samples)
else:
sampler = lambda x: x
images_labels = [
(i, os.path.join(path, image))
for i, path in zip(labels, paths)
for image in sampler(os.listdir(path))
]
if shuffle:
random.shuffle(images_labels)
return images_labels
def image_file_to_array(filename, dim_input):
"""
Takes an image path and returns numpy array
Args:
filename: Image filename
dim_input: Flattened shape of image
Returns:
1 channel image
"""
image = imageio.imread(filename)
image = image.reshape([dim_input])
image = image.astype(np.float32) / 255.0
image = 1.0 - image
return image
class Datagenerator(Dataset):
def __init__(
self, num_classes, num_samples_per_class, data_folder, img_size, dataset_type
):
"""
Args:
num_classes: Number of classes for classification
num_samples_per_class: num samples per class
data_folder: Data folder
image_size: Image size
"""
self.num_classes = num_classes
# Multiplied by 2 to get outer and inner inputs
self.num_samples_per_class = 2 * num_samples_per_class
self.dim_input = np.prod(img_size)
self.dim_output = self.num_classes
self.dataset_type = dataset_type
self.random = Random(1)
character_folders = sorted([
os.path.join(data_folder, family, character)
for family in os.listdir(data_folder)
if os.path.isdir(os.path.join(data_folder, family))
for character in os.listdir(os.path.join(data_folder, family))
if os.path.isdir(os.path.join(data_folder, family, character))
])
np.random.seed(111)
self.random.shuffle(character_folders)
num_val = 100
num_train = 1100
if dataset_type == "train":
self.character_folders = character_folders[:num_train]
elif dataset_type == "val":
self.character_folders = character_folders[num_train : num_train + num_val]
elif dataset_type == "test":
self.character_folders = character_folders[num_train + num_val :]
else:
raise ("Wrong dataset type: valid types are train, test and val")
self.image_cache = self.load_images(self.character_folders, self.dim_input)
def __getitem__(self, index):
sampled_character_folders = self.random.sample(
self.character_folders, self.num_classes
)
labels_and_images = get_images(
sampled_character_folders,
range(self.num_classes),
random=self.random,
nb_samples=self.num_samples_per_class,
shuffle=False,
)
labels = [li[0] for li in labels_and_images]
images = [self.image_cache[li[1]] for li in labels_and_images]
ims = np.stack(images)
labels = np.reshape(labels, (self.num_classes, self.num_samples_per_class))
# labels shape = (num_classes, num_samples_per_class)
ims = np.reshape(ims, (self.num_classes, self.num_samples_per_class, -1))
# ims shape = (num_classes, num_samples_per_class, dim_input)
inner_inputs, outer_inputs = (
ims[:, 0 : self.num_samples_per_class // 2, :],
ims[:, self.num_samples_per_class // 2 :, :],
)
inner_labels, outer_labels = (
labels[:, 0 : self.num_samples_per_class // 2],
labels[:, self.num_samples_per_class // 2 :],
)
# Shuffle the order of classes in both inner and outer inputs, so that the model does not memorize the order
perm_inner = np.random.permutation(self.num_classes)
perm_outer = np.random.permutation(self.num_classes)
inner_inputs = inner_inputs[perm_inner, :]
inner_labels = inner_labels[perm_inner, :]
outer_inputs = outer_inputs[perm_outer, :]
outer_labels = outer_labels[perm_outer, :]
return {
"inner_inputs": torch.FloatTensor(inner_inputs),
"inner_labels": torch.LongTensor(inner_labels),
"outer_inputs": torch.FloatTensor(outer_inputs),
"outer_labels": torch.LongTensor(outer_labels),
}
def __len__(self):
return int(1e6)
def load_images(self, folders, dim_input):
images = dict()
for folder in tqdm(folders):
files = glob.glob(folder + "/**/*.png", recursive=True)
for f in files:
images[f] = image_file_to_array(f, dim_input)
return images
|
import pytest
from weaverbird.backends.sql_translator.metadata import ColumnMetadata, SqlQueryMetadataManager
from weaverbird.backends.sql_translator.steps import translate_dateextract
from weaverbird.backends.sql_translator.steps.utils.query_transformation import (
get_query_for_date_extract,
)
from weaverbird.backends.sql_translator.types import SQLQuery
from weaverbird.pipeline.steps import DateExtractStep
@pytest.fixture
def query_date():
return SQLQuery(
query_name='SELECT_STEP_0',
transformed_query='WITH SELECT_STEP_0 AS (SELECT * FROM products)',
selection_query='SELECT TOTO, RAICHU, FLORIZARRE FROM SELECT_STEP_0',
metadata_manager=SqlQueryMetadataManager(
tables_metadata={
'TABLE1': {'TOTO': 'text', 'RAICHU': 'int', 'FLORIZARRE': 'text', 'DATE': 'date'}
},
),
)
def test_translate_simple_date_extract(query_date):
step = DateExtractStep(name='dateextract', column='DATE', date_info=['year'])
query = translate_dateextract(
step,
query_date,
index=1,
)
expected_transformed_query = (
"WITH SELECT_STEP_0 AS (SELECT * FROM products), DATEEXTRACT_STEP_1 AS (SELECT TOTO, RAICHU, FLORIZARRE, "
"DATE, EXTRACT(year from to_timestamp(DATE)) AS DATE_YEAR FROM SELECT_STEP_0)"
)
assert query.transformed_query == expected_transformed_query
assert (
query.selection_query
== 'SELECT TOTO, RAICHU, FLORIZARRE, DATE, DATE_YEAR FROM DATEEXTRACT_STEP_1'
)
assert query.query_name == 'DATEEXTRACT_STEP_1'
# assert on metadatas
assert query.metadata_manager.retrieve_query_metadata_columns() == {
'DATE': ColumnMetadata(
name='DATE',
original_name='DATE',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_YEAR': ColumnMetadata(
name='DATE_YEAR',
original_name='DATE_YEAR',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'FLORIZARRE': ColumnMetadata(
name='FLORIZARRE',
original_name='FLORIZARRE',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'RAICHU': ColumnMetadata(
name='RAICHU',
original_name='RAICHU',
type='INT',
original_type='int',
alias=None,
delete=False,
),
'TOTO': ColumnMetadata(
name='TOTO',
original_name='TOTO',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
}
def test_translate_complex_date_extract(query_date):
step = DateExtractStep(
name='dateextract',
column='DATE',
date_info=[
'year',
'previousDay',
'firstDayOfPreviousYear',
'firstDayOfPreviousMonth',
'firstDayOfPreviousWeek',
'dayOfYear',
'isoYear',
'isoWeek',
'isoDayOfWeek',
'day',
'week',
],
)
query = translate_dateextract(
step,
query_date,
index=1,
)
expected_transformed_query = (
"WITH SELECT_STEP_0 AS (SELECT * FROM products), "
"DATEEXTRACT_STEP_1 AS (SELECT TOTO, RAICHU, FLORIZARRE, DATE, EXTRACT(year from to_timestamp(DATE)) AS DATE_YEAR, "
"(DATE_TRUNC(day, to_timestamp(DATE) - interval '1 day')) AS DATE_PREVIOUSDAY, "
"(TO_TIMESTAMP_NTZ(DATE_TRUNC(year, to_timestamp(DATE))) - interval '1 year') AS DATE_FIRSTDAYOFPREVIOUSYEAR, "
"(TO_TIMESTAMP_NTZ(DATE_TRUNC(month, to_timestamp(DATE))) - interval '1 month') AS DATE_FIRSTDAYOFPREVIOUSMONTH, "
"(DATE_TRUNC(day, DATEADD(day, -(DAYOFWEEKISO(to_timestamp(DATE)) % 7 + 1)+1, to_timestamp(DATE))) - interval '1 week') AS DATE_FIRSTDAYOFPREVIOUSWEEK, "
"EXTRACT(dayofyear from to_timestamp(DATE)) AS DATE_DAYOFYEAR, "
"(YEAROFWEEKISO(to_timestamp(DATE))) AS DATE_ISOYEAR, "
"(WEEKISO(to_timestamp(DATE))) AS DATE_ISOWEEK, "
"(DAYOFWEEKISO(to_timestamp(DATE))) AS DATE_ISODAYOFWEEK, "
"EXTRACT(day from to_timestamp(DATE)) AS DATE_DAY, "
"EXTRACT(week from to_timestamp(DATE)) AS DATE_WEEK FROM SELECT_STEP_0)"
)
assert query.transformed_query == expected_transformed_query
assert (
query.selection_query
== "SELECT TOTO, RAICHU, FLORIZARRE, DATE, DATE_YEAR, DATE_PREVIOUSDAY, DATE_FIRSTDAYOFPREVIOUSYEAR, "
"DATE_FIRSTDAYOFPREVIOUSMONTH, DATE_FIRSTDAYOFPREVIOUSWEEK, DATE_DAYOFYEAR, DATE_ISOYEAR, DATE_ISOWEEK, "
"DATE_ISODAYOFWEEK, DATE_DAY, DATE_WEEK FROM DATEEXTRACT_STEP_1"
)
assert query.query_name == 'DATEEXTRACT_STEP_1'
# assert on metadatas
assert query.metadata_manager.retrieve_query_metadata_columns() == {
'DATE': ColumnMetadata(
name='DATE',
original_name='DATE',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_DAY': ColumnMetadata(
name='DATE_DAY',
original_name='DATE_DAY',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_DAYOFYEAR': ColumnMetadata(
name='DATE_DAYOFYEAR',
original_name='DATE_DAYOFYEAR',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_FIRSTDAYOFPREVIOUSMONTH': ColumnMetadata(
name='DATE_FIRSTDAYOFPREVIOUSMONTH',
original_name='DATE_FIRSTDAYOFPREVIOUSMONTH',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_FIRSTDAYOFPREVIOUSWEEK': ColumnMetadata(
name='DATE_FIRSTDAYOFPREVIOUSWEEK',
original_name='DATE_FIRSTDAYOFPREVIOUSWEEK',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_FIRSTDAYOFPREVIOUSYEAR': ColumnMetadata(
name='DATE_FIRSTDAYOFPREVIOUSYEAR',
original_name='DATE_FIRSTDAYOFPREVIOUSYEAR',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_ISODAYOFWEEK': ColumnMetadata(
name='DATE_ISODAYOFWEEK',
original_name='DATE_ISODAYOFWEEK',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_ISOWEEK': ColumnMetadata(
name='DATE_ISOWEEK',
original_name='DATE_ISOWEEK',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_ISOYEAR': ColumnMetadata(
name='DATE_ISOYEAR',
original_name='DATE_ISOYEAR',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_PREVIOUSDAY': ColumnMetadata(
name='DATE_PREVIOUSDAY',
original_name='DATE_PREVIOUSDAY',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_WEEK': ColumnMetadata(
name='DATE_WEEK',
original_name='DATE_WEEK',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'DATE_YEAR': ColumnMetadata(
name='DATE_YEAR',
original_name='DATE_YEAR',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'FLORIZARRE': ColumnMetadata(
name='FLORIZARRE',
original_name='FLORIZARRE',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'RAICHU': ColumnMetadata(
name='RAICHU',
original_name='RAICHU',
type='INT',
original_type='int',
alias=None,
delete=False,
),
'TOTO': ColumnMetadata(
name='TOTO',
original_name='TOTO',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
}
def test_translate_with_new_columns_date_extract(query_date):
step = DateExtractStep(
name='dateextract', column='DATE', date_info=['year'], new_columns=['ZOZOR']
)
query = translate_dateextract(
step,
query_date,
index=1,
)
expected_transformed_query = (
"WITH SELECT_STEP_0 AS (SELECT * FROM products), DATEEXTRACT_STEP_1 AS (SELECT TOTO, RAICHU, FLORIZARRE, "
"DATE, EXTRACT(year from to_timestamp(DATE)) AS ZOZOR FROM SELECT_STEP_0)"
)
assert query.transformed_query == expected_transformed_query
assert (
query.selection_query
== 'SELECT TOTO, RAICHU, FLORIZARRE, DATE, ZOZOR FROM DATEEXTRACT_STEP_1'
)
assert query.query_name == 'DATEEXTRACT_STEP_1'
# assert on metadatas
assert query.metadata_manager.retrieve_query_metadata_columns() == {
'DATE': ColumnMetadata(
name='DATE',
original_name='DATE',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
'FLORIZARRE': ColumnMetadata(
name='FLORIZARRE',
original_name='FLORIZARRE',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'RAICHU': ColumnMetadata(
name='RAICHU',
original_name='RAICHU',
type='INT',
original_type='int',
alias=None,
delete=False,
),
'TOTO': ColumnMetadata(
name='TOTO',
original_name='TOTO',
type='TEXT',
original_type='text',
alias=None,
delete=False,
),
'ZOZOR': ColumnMetadata(
name='ZOZOR',
original_name='ZOZOR',
type='DATE',
original_type='date',
alias=None,
delete=False,
),
}
def test_utils_query_for_date_extract():
date_infos__expected_sql = {
'year': "EXTRACT(year from to_timestamp(DATE)) AS NEW_COLUMN",
'month': "EXTRACT(month from to_timestamp(DATE)) AS NEW_COLUMN",
'day': "EXTRACT(day from to_timestamp(DATE)) AS NEW_COLUMN",
'week': "EXTRACT(week from to_timestamp(DATE)) AS NEW_COLUMN",
'quarter': "EXTRACT(quarter from to_timestamp(DATE)) AS NEW_COLUMN",
'dayOfWeek': "(DAYOFWEEKISO(to_timestamp(DATE)) % 7 + 1) AS NEW_COLUMN",
'dayOfYear': "EXTRACT(dayofyear from to_timestamp(DATE)) AS NEW_COLUMN",
'isoYear': "(YEAROFWEEKISO(to_timestamp(DATE))) AS NEW_COLUMN",
'isoWeek': "(WEEKISO(to_timestamp(DATE))) AS NEW_COLUMN",
'isoDayOfWeek': "(DAYOFWEEKISO(to_timestamp(DATE))) AS NEW_COLUMN",
'hour': "EXTRACT(hour from to_timestamp(DATE)) AS NEW_COLUMN",
'minutes': "EXTRACT(minute from to_timestamp(DATE)) AS NEW_COLUMN",
'seconds': "EXTRACT(second from to_timestamp(DATE)) AS NEW_COLUMN",
'milliseconds': "(ROUND(EXTRACT(nanosecond FROM to_timestamp(DATE))/1000000)) AS NEW_COLUMN",
'firstDayOfYear': "(TO_TIMESTAMP_NTZ(DATE_TRUNC(year, to_timestamp(DATE)))) AS NEW_COLUMN",
'firstDayOfMonth': "(TO_TIMESTAMP_NTZ(DATE_TRUNC(month, to_timestamp(DATE)))) AS NEW_COLUMN",
'firstDayOfWeek': "(DATE_TRUNC(day, DATEADD(day, -(DAYOFWEEKISO(to_timestamp(DATE)) % 7 + 1)+1, to_timestamp(DATE)))) AS NEW_COLUMN",
'firstDayOfIsoWeek': "(DATE_TRUNC(day, DATEADD(day, -DAYOFWEEKISO(to_timestamp(DATE))+1, to_timestamp(DATE)))) AS NEW_COLUMN",
'firstDayOfQuarter': "(TO_TIMESTAMP_NTZ(DATE_TRUNC(quarter, to_timestamp(DATE)))) AS NEW_COLUMN",
'previousDay': "(DATE_TRUNC(day, to_timestamp(DATE) - interval '1 day')) AS NEW_COLUMN",
'firstDayOfPreviousYear': "(TO_TIMESTAMP_NTZ(DATE_TRUNC(year, to_timestamp(DATE))) - interval '1 year') AS NEW_COLUMN",
'firstDayOfPreviousMonth': "(TO_TIMESTAMP_NTZ(DATE_TRUNC(month, to_timestamp(DATE))) - interval '1 month') AS NEW_COLUMN",
'firstDayOfPreviousWeek': "(DATE_TRUNC(day, DATEADD(day, -(DAYOFWEEKISO(to_timestamp(DATE)) % 7 + 1)+1, to_timestamp(DATE))) - interval '1 week') AS NEW_COLUMN",
'firstDayOfPreviousQuarter': "(TO_TIMESTAMP_NTZ(DATE_TRUNC(quarter, to_timestamp(DATE))) - interval '1 quarter') AS NEW_COLUMN",
'firstDayOfPreviousIsoWeek': "(DATE_TRUNC(day, DATEADD(day, -DAYOFWEEKISO(to_timestamp(DATE))+1, to_timestamp(DATE))) - interval '1 week') AS NEW_COLUMN",
'previousYear': "(YEAR(to_timestamp(DATE) - interval '1 year')) AS NEW_COLUMN",
'previousMonth': "(MONTH(to_timestamp(DATE) - interval '1 month')) AS NEW_COLUMN",
'previousWeek': "(WEEK(to_timestamp(DATE) - interval '1 week')) AS NEW_COLUMN",
'previousQuarter': "(QUARTER(to_timestamp(DATE) - interval '1 quarter')) AS NEW_COLUMN",
'previousIsoWeek': "(WEEKISO(to_timestamp(DATE) - interval '1 week')) AS NEW_COLUMN",
}
# a loop to evaluate all date-info and the sql output
for dd in date_infos__expected_sql:
assert (
get_query_for_date_extract(
dd,
"DATE",
"NEW_COLUMN",
)
== date_infos__expected_sql[dd]
)
|
# encoding:utf-8
from app import app, db
from flask import g, abort
from app.submission.models import Submission
from app.common.models import JudgementStatus
from app.common.user import User
from utils import success, get_day_zero_time
from datetime import datetime, timedelta
from sqlalchemy import and_
@app.route('/rank')
def get_rank():
user_list = User.query.all()
user_dict = {}
user_ac_count = {}
for user in user_list:
user_dict[user.id] = user.to_public_dict()
submissions = Submission.query.filter(and_(
Submission.user_id == user.id, Submission.result == JudgementStatus.ACCEPTED
)).all()
user_ac_count[user.id] = len(submissions)
items = sorted(user_ac_count.items(), key=lambda item: item[1], reverse=True)
resp = []
for item in items:
submissions = Submission.query.filter_by(user_id=item[0]).all()
resp.append({
'user': user_dict[item[0]],
'ac_count': item[1],
'submissions_count': len(submissions)
})
return success(resp)
@app.route('/rank/week')
def get_week_rank():
now = datetime.now()
zero_time = get_day_zero_time(now)
start = zero_time - timedelta(days=zero_time.weekday()) # 本周起始时间
start_str = '{0:%Y-%m-%d %H:%M:%S}'.format(start)
now_str = '{0:%Y-%m-%d %H:%M:%S}'.format(now)
sql = """
select u.id, u.username, u.avatar_url, count(*) from
submissions s left join users u on u.id = s.user_id
where s.timestamp > '{}' and s.timestamp < '{}' and s.result = 0 group by s.user_id;
""".format(start_str, now_str)
cursor = db.session.execute(sql)
result_set = cursor.fetchall()
result = []
for row in result_set:
result.append({
'id': row[0],
'username': row[1],
'avatar_url': row[2],
'count': row[3]
})
if len(result) < 3:
for i in range(3 - len(result)):
result.append({
'id': None,
'username': '暂无人选',
'avatar_url': None,
'count': 0
})
return success(result)
|
from ... import config
if config.get('qapp', False):
from .consolepanel import MainThreadConsole, SubThreadConsole, ChildProcessConsole, ChildThreadConsole
from .consoleproxy import ConsoleGuiProxy |
"""
pip-review lets you smoothly manage all available PyPI updates.
"""
from setuptools import setup
setup(
name='pip-review',
version='1.1.0',
url='https://github.com/jgonggrijp/pip-review',
license='BSD',
author='Julian Gonggrijp, Vincent Driessen',
author_email='j.gonggrijp@gmail.com',
description=__doc__.strip('\n'),
long_description=open('README.rst').read(),
long_description_content_type='text/x-rst',
packages=[
'pip_review',
],
entry_points={
'console_scripts': [
'pip-review = pip_review.__main__:main',
],
},
#include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'packaging',
'pip',
],
python_requires='>=2.7, !=3.0, !=3.1, !=3.2',
classifiers=[
# As from https://pypi.python.org/pypi?%3Aaction=list_classifiers
#'Development Status :: 1 - Planning',
#'Development Status :: 2 - Pre-Alpha',
#'Development Status :: 3 - Alpha',
#'Development Status :: 4 - Beta',
'Development Status :: 5 - Production/Stable',
#'Development Status :: 6 - Mature',
#'Development Status :: 7 - Inactive',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
#'Programming Language :: Python :: 2.3',
#'Programming Language :: Python :: 2.4',
#'Programming Language :: Python :: 2.5',
#'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
#'Programming Language :: Python :: 3.0',
#'Programming Language :: Python :: 3.1',
#'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: System :: Systems Administration',
]
)
|
# Generated by Django 3.1.3 on 2020-12-16 21:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('solver', '0003_parent_solve_attempt'),
]
operations = [
migrations.AddField(
model_name='parent',
name='initialized',
field=models.BooleanField(default=False),
),
]
|
#!/usr/bin/env python3
"""
Load necessary values from the coursera sql extract into a sqlite3 database.
"""
import json
import os
import sys
from csv import reader, field_size_limit
from datetime import datetime
from sqlite3 import connect, Error
from bs4 import BeautifulSoup
from gensim.parsing.preprocessing import preprocess_string
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
DB_NAME = "dump_coursera_partial.sqlite3"
DB_FILE = os.path.join(DIR_PATH, DB_NAME)
DATA_PATH = os.path.join(DIR_PATH, "data")
COURSES = [
"agile_planning_for_software_products",
"client_needs_and_software_requirements",
"design_patterns",
"introduction_to_software_product_management",
"object_oriented_design",
"reviews_and_metrics_for_software_improvements",
"service_oriented_architecture",
"software_architecture",
"software_processes_and_agile_practices",
"software_product_management_capstone"
]
CSV_KWARGS = {
"delimiter": ",",
"quotechar": "\"",
"escapechar": "\\"
}
def create_database(conn):
"""create necessary database tables"""
sql_create_courses = """
CREATE TABLE IF NOT EXISTS courses (
course_id VARCHAR(50),
course_slug VARCHAR(2000),
course_name VARCHAR(2000),
course_launch_ts DATETIME,
course_update_ts DATETIME,
course_deleted BOOLEAN,
course_graded BOOLEAN,
course_desc VARCHAR(10000),
course_restricted BOOLEAN,
course_verification_enabled_at_ts DATETIME,
primary_translation_equivalent_course_id VARCHAR(50),
course_preenrollment_ts DATETIME,
course_workload VARCHAR(100),
course_session_enabled_ts DATETIME,
course_promo_photo_s3_bucket VARCHAR(255),
course_promo_photo_s3_key VARCHAR(10000),
course_level VARCHAR(50),
course_planned_launch_date_text VARCHAR(255),
course_header_image_s3_bucket VARCHAR(255),
course_header_image_s3_key VARCHAR(10000),
PRIMARY KEY (course_id)
)"""
sql_create_course_branches = """
CREATE TABLE IF NOT EXISTS course_branches (
course_id VARCHAR(50),
course_branch_id VARCHAR(50),
course_branch_changes_description VARCHAR(65535),
authoring_course_branch_name VARCHAR(255),
authoring_course_branch_created_ts DATETIME,
PRIMARY KEY (course_id, course_branch_id)
)"""
sql_create_course_branch_modules = """
CREATE TABLE IF NOT EXISTS course_branch_modules (
course_branch_id VARCHAR(50),
course_module_id VARCHAR(50),
course_branch_module_order INT8,
course_branch_module_name VARCHAR(2000),
course_branch_module_desc VARCHAR(10000)
)"""
sql_create_course_branch_lessons = """
CREATE TABLE IF NOT EXISTS course_branch_lessons (
course_branch_id VARCHAR(50),
course_lesson_id VARCHAR(50),
course_module_id VARCHAR(50),
course_branch_lesson_order INT8,
course_branch_lesson_name VARCHAR(10000)
);
"""
sql_create_course_branch_items = """
CREATE TABLE IF NOT EXISTS course_branch_items (
course_branch_id VARCHAR(255),
course_item_id VARCHAR(255),
course_lesson_id VARCHAR(255),
course_branch_item_order INT8,
course_item_type_id INT8,
course_branch_item_name VARCHAR(255),
course_branch_item_optional BOOLEAN,
atom_id VARCHAR(255),
atom_version_id INT8,
course_branch_atom_is_frozen BOOLEAN,
PRIMARY KEY (course_branch_id, course_item_id)
)"""
sql_create_course_item_types = """
CREATE TABLE IF NOT EXISTS course_item_types (
course_item_type_id INT8,
course_item_type_desc VARCHAR(255),
course_item_type_category VARCHAR(255),
course_item_type_graded BOOLEAN,
atom_content_type_id INT8,
PRIMARY KEY (course_item_type_id)
)"""
sql_create_discussion_course_forums = """
CREATE TABLE IF NOT EXISTS discussion_course_forums (
discussion_forum_id VARCHAR(50),
course_branch_id VARCHAR(50),
discussion_course_forum_title VARCHAR(20000),
discussion_course_forum_description VARCHAR(20000),
discussion_course_forum_order INT8
)"""
sql_create_discussion_questions = """
CREATE TABLE IF NOT EXISTS discussion_questions (
discussion_question_id VARCHAR(50),
ualberta_user_id VARCHAR(50) NOT NULL,
discussion_question_title VARCHAR(20000),
discussion_question_details VARCHAR(20000),
discussion_question_context_type VARCHAR(50),
course_id VARCHAR(50),
course_module_id VARCHAR(50),
course_item_id VARCHAR(50),
discussion_forum_id VARCHAR(50),
country_cd VARCHAR(2),
group_id VARCHAR(50),
discussion_question_created_ts DATETIME,
discussion_question_updated_ts DATETIME
)"""
sql_create_discussion_answers = """
CREATE TABLE IF NOT EXISTS discussion_answers (
discussion_answer_id VARCHAR(50),
ualberta_user_id VARCHAR(50) NOT NULL,
course_id VARCHAR(50),
discussion_answer_content VARCHAR(20000),
discussion_question_id VARCHAR(50),
discussion_answer_parent_discussion_answer_id VARCHAR(50),
discussion_answer_created_ts DATETIME,
discussion_answer_updated_ts DATETIME
)"""
c = conn.cursor()
c.execute(sql_create_courses)
c.execute(sql_create_course_branches)
c.execute(sql_create_course_branch_modules)
c.execute(sql_create_course_branch_lessons)
c.execute(sql_create_course_item_types)
c.execute(sql_create_course_branch_items)
c.execute(sql_create_discussion_course_forums)
c.execute(sql_create_discussion_questions)
c.execute(sql_create_discussion_answers)
conn.commit()
def load_data_from_csv(csv_path, conn, tbl_name):
c = conn.cursor()
with open(csv_path) as csvfile:
csv_reader = reader(csvfile, **CSV_KWARGS)
headers = next(csv_reader)
for line in csv_reader:
q_s = ",".join(["?", ] * len(line))
c.execute(
f"INSERT OR REPLACE INTO {tbl_name} VALUES ({q_s})", line)
conn.commit()
def load_course_data(course_data_path, conn):
for course_file in sorted(os.listdir(course_data_path)):
csv_path = os.path.join(course_data_path, course_file)
if course_file == "courses.csv":
load_data_from_csv(csv_path, conn, "courses")
elif course_file == "course_branches.csv":
load_data_from_csv(csv_path, conn, "course_branches")
elif course_file == "course_branch_modules.csv":
load_data_from_csv(csv_path, conn, "course_branch_modules")
elif course_file == "course_branch_lessons.csv":
load_data_from_csv(csv_path, conn, "course_branch_lessons")
elif course_file == "course_branch_items.csv":
load_data_from_csv(csv_path, conn, "course_branch_items")
elif course_file == "course_item_types.csv":
load_data_from_csv(csv_path, conn, "course_item_types")
elif course_file == "discussion_course_forums.csv":
load_data_from_csv(csv_path, conn, "discussion_course_forums")
elif course_file == "discussion_questions.csv":
load_data_from_csv(csv_path, conn, "discussion_questions")
elif course_file == "discussion_answers.csv":
load_data_from_csv(csv_path, conn, "discussion_answers")
def parse_and_load_course_branch_item(course_data_path, conn, course_zip_name):
"""take all of the course branch item content and create vocabulary
"""
content_path = os.path.join(course_data_path, "course_branch_item_content")
course_slug = course_zip_name.replace("_", "-")
sql_select_course_id = (
"SELECT DISTINCT course_branch_items.course_branch_id, " +
"course_item_id, course_branch_module_name, " +
"course_branch_lesson_name, course_branch_item_name FROM " +
"course_branch_modules, course_branch_lessons, course_branch_items, " +
"course_branches, courses WHERE course_slug = (?) " +
"AND courses.course_id == course_branches.course_id " +
"AND course_branches.course_branch_id == course_branch_items.course_branch_id " +
"AND course_branch_items.course_lesson_id == course_branch_lessons.course_lesson_id " +
"AND course_branch_lessons.course_module_id == course_branch_modules.course_module_id"
)
c = conn.cursor()
c.execute(sql_select_course_id, (course_slug,))
# module name > lesson name > item name > to processed vocabulary (list of words)
course_vocabulary = {}
rows = c.fetchmany()
while rows:
for row in rows:
(course_branch_id, course_item_id, course_branch_module_name,
course_branch_lesson_name, course_branch_item_name,) = row
# load the raw json file for branch item
course_branch_item_path = os.path.join(
content_path, "{}-{}.json".format(course_branch_id, course_item_id))
with open(course_branch_item_path, "r") as cbif:
# attempt to load the json file, otherwise continue
try:
raw_cbi = json.load(cbif)
except Exception as e:
print(e)
continue
try:
if raw_cbi["message"] == "" and raw_cbi["statusCode"] == 204 and raw_cbi["reason"] == "ignore assesments":
continue
except KeyError:
pass
try:
if raw_cbi["message"] == "" and raw_cbi["statusCode"] == 404:
continue
except KeyError:
pass
try:
if raw_cbi["message"] == None and raw_cbi["errorCode"] == "Not Authorized":
continue
except KeyError:
pass
try:
if raw_cbi["message"].startswith("No item ItemId(") and raw_cbi["errorCode"] == None:
continue
except KeyError:
pass
normalized_processed_text = None
try:
# try to get the definition value of the item
definition_raw_html = raw_cbi["linked"]["openCourseAssets.v1"][0]["definition"]["value"]
definition_text = " ".join(BeautifulSoup(
definition_raw_html, "html.parser").stripped_strings)
normalized_processed_text = preprocess_string(
definition_text)
update_course_vocabulary(
course_vocabulary, course_branch_module_name,
course_branch_lesson_name, course_branch_item_name,
normalized_processed_text)
continue
except KeyError:
pass
try:
# check if the branch item is a video with subtitles, get subtitles
subtitles_lookup = raw_cbi["linked"]["onDemandVideos.v1"][0]["subtitlesTxt"]
if not subtitles_lookup.keys():
continue # no subtitles for the video
subtitle_filepath = course_branch_item_path + ".subtitles.txt"
with open(subtitle_filepath, "r") as subfp:
subtitle_raw_text = "".join(subfp.readlines())
normalized_processed_text = preprocess_string(
subtitle_raw_text)
update_course_vocabulary(
course_vocabulary, course_branch_module_name,
course_branch_lesson_name, course_branch_item_name,
normalized_processed_text)
continue
except KeyError:
pass
raise Error("unhandled cbi")
rows = c.fetchmany()
# save the course_vocabulary to disk
vocab_filepath = os.path.join(
course_data_path, "..", "vocabulary.{}.json".format(course_slug))
with open(vocab_filepath, "w") as vocab_file:
json.dump(course_vocabulary, vocab_file)
def update_course_vocabulary(course_vocabulary, course_branch_module_name, course_branch_lesson_name, course_branch_item_name, normalized_processed_text):
course_branch_module = course_vocabulary.get(course_branch_module_name, {})
course_branch_lesson = course_branch_module.get(
course_branch_lesson_name, {})
course_branch_item = course_branch_lesson.get(course_branch_item_name, [])
course_branch_item.extend(normalized_processed_text)
course_branch_lesson[course_branch_item_name] = course_branch_item
course_branch_module[course_branch_lesson_name] = course_branch_lesson
course_vocabulary[course_branch_module_name] = course_branch_module
def parse_and_load_discussion_questions(course_data_path, conn, course_zip_name):
"""load, parse, process discussion questions
"""
course_slug = course_zip_name.replace("_", "-")
sql_select_discussion_question = (
"SELECT discussion_question_id, discussion_question_title, " +
"discussion_question_details " +
"FROM discussion_questions, courses WHERE " +
"discussion_questions.course_id == courses.course_id AND " +
"courses.course_slug == (?)"
)
c = conn.cursor()
c.execute(sql_select_discussion_question, (course_slug,))
course_questions = {}
rows = c.fetchmany()
while rows:
for row in rows:
question_id, question_title, question_details = row
course_questions[question_id] = (
preprocess_string(question_title) +
preprocess_string(question_details)
)
rows = c.fetchmany()
# save the course_questions to disk
questions_filepath = os.path.join(
course_data_path, "..", "questions.{}.json".format(course_slug))
with open(questions_filepath, "w") as questions_file:
json.dump(course_questions, questions_file)
def parse_and_load_discussion_answers(course_data_path, conn, course_zip_name):
"""load, parse, process discussion answers
"""
course_slug = course_zip_name.replace("_", "-")
sql_select_discussion_answer = (
"SELECT discussion_answer_id, discussion_answer_content " +
"FROM discussion_answers, courses WHERE " +
"discussion_answers.course_id == courses.course_id AND " +
"courses.course_slug == (?)"
)
c = conn.cursor()
c.execute(sql_select_discussion_answer, (course_slug,))
course_answers = {}
rows = c.fetchmany()
while rows:
for row in rows:
answer_id, answer_content = row
course_answers[answer_id] = preprocess_string(answer_content)
rows = c.fetchmany()
# save the course_answers to disk
answers_filepath = os.path.join(
course_data_path, "..", "answers.{}.json".format(course_slug))
with open(answers_filepath, "w") as answers_file:
json.dump(course_answers, answers_file)
def main():
conn = None
try:
field_size_limit(sys.maxsize) # GHMatches csv threw error
conn = connect(DB_FILE)
sc_start = datetime.now()
print(f"Started {sc_start.now()}")
create_database(conn)
for course in COURSES:
print(course)
course_data_path = os.path.join(DATA_PATH, course)
load_course_data(course_data_path, conn)
parse_and_load_course_branch_item(course_data_path, conn, course)
parse_and_load_discussion_questions(course_data_path, conn, course)
parse_and_load_discussion_answers(course_data_path, conn, course)
conn.commit()
sc_end = datetime.now()
print(f"Ended {sc_end}")
print(f"Elapsed: {sc_end - sc_start}")
except Error as e:
print(e)
finally:
if conn:
conn.close()
if __name__ == "__main__":
main()
|
"""Tests cac.models.dimensionality_reduction.DimensionalityReductionModel"""
import os
from os.path import dirname, join, exists
from copy import deepcopy
import torch
import wandb
import unittest
from tqdm import tqdm
from torch.nn import Conv2d, BatchNorm2d, LeakyReLU
from cac.config import Config
from cac.utils.logger import set_logger, color
from cac.models.dimensionality_reduction import DimensionalityReductionModel
from cac.models.utils import get_saved_checkpoint_path
class DimensionalityReductionModelTestCase(unittest.TestCase):
"""Class to check the creation of DimensionalityReductionModel"""
@classmethod
def setUpClass(cls):
version = 'defaults/unsupervised.yml'
cls.cfg = Config(version)
cls.cfg.data['dataset']['params'] = {
'all': {
'fraction': 0.01
}
}
cls.cfg.num_workers = 10
def test_pca_model_fitting(self):
"""Test model.fit()"""
set_logger(join(self.cfg.log_dir, 'unsupervised.log'))
tester_cfg = deepcopy(self.cfg)
reduction_model = DimensionalityReductionModel(tester_cfg)
data = reduction_model.fit(return_predictions=True)
X, Z, Y = data['input'], data['latent'], data['labels']
self.assertEqual(Z.shape[-1], 2)
self.assertEqual(X.shape[0], Z.shape[0])
self.assertEqual(Z.shape[0], len(Y))
self.assertTrue(Z.shape[-1] <= X.shape[-1])
def test_tsne_model_fitting(self):
"""Test model.fit()"""
set_logger(join(self.cfg.log_dir, 'unsupervised.log'))
tester_cfg = deepcopy(self.cfg)
tester_cfg.__dict__['model']['method']['name'] = 'TSNE'
reduction_model = DimensionalityReductionModel(tester_cfg)
data = reduction_model.fit(return_predictions=True)
X, Z, Y = data['input'], data['latent'], data['labels']
self.assertEqual(Z.shape[-1], 2)
self.assertEqual(X.shape[0], Z.shape[0])
self.assertEqual(Z.shape[0], len(Y))
self.assertTrue(Z.shape[-1] <= X.shape[-1])
if __name__ == "__main__":
unittest.main()
|
import os
from . import config
class MiscConfigError(Exception):
pass
class Misc(object):
"""Misc config object."""
__KEYS = ['testlib', 'desc', 'out']
def __init__(self, data):
self.testliblib = None
self.desc = None
self.out = None
self.update(data)
def update(self, values):
# Check that all provided values are known keys
for unknown in set(values) - set(Misc.__KEYS):
raise MiscConfigError('Unknown key "%s" in misc config.' % unknown)
for (key, value) in values.items():
# check type
if key == 'testlib':
if not isinstance(value, str):
raise MiscConfigError('testlib path must be a string but is %s' % type(value))
elif key == 'desc':
if not isinstance(value, str):
raise MiscConfigError('desc extension must be a string but is %s' % type(value))
elif key == 'out':
if not isinstance(value, str):
raise MiscConfigError('output extension path must be a string but is %s' % type(value))
self.__dict__[key] = value
self.__check()
@staticmethod
def get_resource_path(res):
if res is None:
return []
return [
os.path.join(os.path.split(os.path.realpath(__file__))[0], 'res', res),
os.path.join(os.getcwd(), res)
]
def __check(self):
testlib, self.testlib = self.testlib, None
for res in Misc.get_resource_path(testlib):
if os.path.isfile(res):
self.testlib = res
if self.testlib is None:
raise MiscConfigError('testlib has not found.')
if self.desc is None:
raise MiscConfigError('desc extension has not found.')
if self.out is None:
raise MiscConfigError('output extension has not found.')
def load_misc_config():
"""Load misc configuration.
Returns: Misc object for misc config.
"""
return Misc(config.load_config('misc.yaml'))
|
import Quandl
import pandas as pd
import pickle
import matplotlib.pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
api_key = open('quandlapikey.txt', 'r').read()
def mortgage_30y():
df = Quandl.get('FMAC/MORTG', trim_start="1975-01-01", authtoken=api_key)
df['Value'] = (df['Value'] - df['Value'][0]) / df['Value'][0] * 100.0
df.columns = ['M30']
df = df.resample('D').mean()
df = df.resample('M').mean()
return df
def HPI_Benchmark():
df = Quandl.get('FMAC/HPI_USA', authtoken=api_key)
# df['United States'] = (df['United States'] - df['United States'][0]) / df['United States'][0] * 100.0
df['Value'] = (df['Value'] - df['Value'][0]) / df['Value'][0] * 100.0
return df
m30 = mortgage_30y()
HPI_data = pd.read_pickle('fiddy_states3.pickle')
HPI_bench = HPI_Benchmark()
state_HPI_M30 = HPI_data.join(m30)
# print(state_HPI_M30.corr())
print(state_HPI_M30.corr()['M30'].describe())
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.core.management.base import BaseCommand
from django.db import transaction
from django.utils import timezone
from djanban.apps.recurrent_cards.models import WeeklyRecurrentCard
# Create cards based on recurrent cards
class Command(BaseCommand):
help = u'Create real cards from the recurrent cards'
def __init__(self, stdout=None, stderr=None, no_color=False):
super(Command, self).__init__(stdout, stderr, no_color)
# Handle de command action
def handle(self, *args, **options):
today = timezone.now().today()
weekday = today.isoweekday()
recurrent_cards_filter = {
"is_active": True,
"board__is_archived": False
}
if weekday == 1:
recurrent_cards_filter["create_on_mondays"] = True
elif weekday == 2:
recurrent_cards_filter["create_on_tuesdays"] = True
elif weekday == 3:
recurrent_cards_filter["create_on_wednesdays"] = True
elif weekday == 4:
recurrent_cards_filter["create_on_thursdays"] = True
elif weekday == 5:
recurrent_cards_filter["create_on_fridays"] = True
elif weekday == 6:
recurrent_cards_filter["create_on_saturdays"] = True
elif weekday == 7:
recurrent_cards_filter["create_on_sundays"] = True
recurrent_cards = WeeklyRecurrentCard.objects.filter(**recurrent_cards_filter)
num_created_cards = 0
with transaction.atomic():
for recurrent_card in recurrent_cards:
# Check if has already created a card today
has_created_a_card_today = recurrent_card.has_created_a_card_today
# In case a card has not been already created today for this recurrent card,
# create it (also in its backend)
if not has_created_a_card_today:
card = recurrent_card.create_card()
num_created_cards += 1
self.stdout.write(
self.style.SUCCESS(
u"{0} successfully created".format(card.name))
)
# In case a card has been already created for this recurrent card, show a warning
else:
self.stdout.write(
self.style.WARNING(
u"card {0} already created today".format(recurrent_card.name))
)
# If there has been at least one creation of card, show a message
if num_created_cards > 0:
self.stdout.write(
self.style.SUCCESS(
u"Creation of {0} card(s) from recurrent cards completed successfully".format(num_created_cards)
)
)
# Otherwise, show another "less happy" message
else:
self.stdout.write(
self.style.SUCCESS(
u"No recurrent cards for this day, hence, no cards were created"
)
) |
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict
from polyaxon.client.init import get_client_or_raise
from traceml.artifacts import V1ArtifactKind
def create_dockerfile_lineage(dockerfile_path: str, summary: Dict):
if not dockerfile_path:
return
filename = os.path.basename(dockerfile_path)
run_client = get_client_or_raise()
if not run_client:
return
run_client.log_artifact_ref(
path=dockerfile_path,
kind=V1ArtifactKind.DOCKERFILE,
name=filename,
summary=summary,
is_input=True,
)
|
from .pointer_network import PointerNetwork |
import pytest
from django.urls import reverse
from {{ cookiecutter.project_slug }}.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdmin:
def test_changelist(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
data = {
{% if cookiecutter.user.username_field == "username" -%}
"username": "test",
{% else -%}
"email": "email@demo.com",
{% endif -%}
"password1": "My_R@ndom-P@ssw0rd",
"password2": "My_R@ndom-P@ssw0rd",
}
response = admin_client.post(
url,
data=data,
)
assert response.status_code == 302
assert User.objects.filter({{cookiecutter.user.username_field}}=data["{{cookiecutter.user.username_field}}"]).exists()
def test_view_user(self, admin_client, user: User):
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
|
import sys
import tempfile
import re
import subprocess
def compile_krass_conditional(krass_conditional):
"""
Compile Krass conditional statements to Python conditional statements.
"""
# change true to True, && to and etc.
changes = [
("true", "True"),
("false", "False"),
("&&", " and "),
("||", " or "), # keep an eye on this one, for regex or non
("!", " not ")
]
for change in changes:
krass_conditional = krass_conditional.replace(change[0], change[1])
return krass_conditional
def compile_krass(krass_file_contents):
"""
Compile a section of krass code.
For any boolean operation not in a if/else if/while,
Python formatting is required (as of now)
"""
tf = tempfile.NamedTemporaryFile(delete=False)
original_path = tf.name
# generate python file. writing to `tf`
# since Krass needs proper formatting, line by line execution should be ok.
# if a line fits a special requirement, such as imports, function declarations,
# if statements or loops, appropriate action will take place. Indent level will be
# controlled by curly braces ( '{' and '}' ), in the case of blocks of code. Indentation
# in Krass code doesn't matter, so it will be stripped off, and added in post translation
# Failure to format code properly will lead to broken Python translations,
indent_level = 0
struct_mode = False # if defining a struct
for line in krass_file_contents.split("\n"):
line = line.strip() # remove whitespace
output_line = "\t" * indent_level
if struct_mode:
if line == "}":
struct_mode = False
indent_level -= 2
if "=" in line:
# default value
output_line += line.replace(";", "") # remove the ; if it was added.
else:
# no default value
output_line += line.replace(";", "") + " = None"
output_line += "\n"
tf.write(bytes(output_line, 'utf-8'))
continue
special_line = False
# check for function block
z = re.match("function\\s+(\\w+)\\(((\\w+){0,1}|((\\w+,\\s*)+(\\w+)))\\)\\s*\\{", line)
if z:
special_line = True
# create function declaration.
# isolate function signature
function_signature = line[8:-1].strip()
output_line += "def " + function_signature + ":"
indent_level += 1
# If blocks
z = re.match("if\\s*\\((.*)\\)\\s*{", line)
if z:
special_line = True
# create if block declaration
conditional = z.group(1)
output_line += "if " + compile_krass_conditional(conditional) + ":"
indent_level += 1
# Else if blocks
z = re.match("}\\s*else\\s+if\\s*\\((.*)\\)\\s*{", line)
if z:
special_line = True
conditional = z.group(1)
# remove a tab and add in the elif block, don't change indentation.
output_line = output_line[:-1] + "elif " + compile_krass_conditional(conditional) + ":"
# Else blocks
z = re.match("}\\s*else\\s*{", line)
if z:
special_line = True
output_line = output_line[:-1] + "else:"
# For Loops
z = re.match("for\\s*\\((.*)\\s*:\\s*(.*)\\)\\s*{", line)
if z:
special_line = True
item = z.group(1)
iterator = z.group(2)
output_line += "for " + item + " in " + iterator + ":"
indent_level += 1
# While Loops
z = re.match("while\\s*\\((.*)\\)\\s*{", line)
if z:
special_line = True
conditional = z.group(1)
output_line += "while " + compile_krass_conditional(conditional) + ":"
indent_level += 1
# structs
z = re.match("struct\\s+(.*)\\s*{", line)
if z:
special_line = True
name = z.group(1)
output_line += "class "+name+":\n\t"+("\t"*indent_level)+"def __init__(self):"
struct_mode = True
indent_level += 2
# End of blocks: i.e. '}' as a line.
z = re.match("}", line)
if z:
special_line = True
indent_level -= 1
# for now, no exception handling will be implementable.
# not a special line, so treat it as pure python.
if not special_line:
output_line += line.replace(";", "")
output_line += "\n"
tf.write(bytes(output_line, 'utf-8'))
# file needs to be closed first.
tf.close()
# create a subprocess with file generated
cmd = subprocess.run(["python3", original_path], stdout=subprocess.PIPE)
stdout = cmd.stdout.decode() # bytes => str
return stdout
# sys.argv should = [script name, raw file, compiled file]
if len(sys.argv) != 3:
# this should never happen, because this is called from a shell script.
# If it does, theres an issue passing files along between Shell and Python
exit(1)
raw_file = sys.argv[1]
compiled_file = sys.argv[2]
compiled_file = open(compiled_file, 'w')
contents = open(raw_file, 'r').read()
for chunk in contents.split("?::"):
if "::?" in chunk:
# contains a krass chunk
tex_chunk, krass_chunk = chunk.split("::?")[0], chunk.split("::?")[1]
compiled_file.write(tex_chunk)
compiled_file.write(compile_krass(krass_chunk))
else:
# probably end, no krass chunk, or malformed.
compiled_file.write(chunk)
|
from .trial_registry import Registry
from .trial_bot import TrialBot, Events, State
from .updater import Updater
from .updaters.training_updater import TrainingUpdater
from .updaters.testing_updater import TestingUpdater
from . import extensions as bot_extensions
|
import math
import numpy as np
from .detail.timeindex import to_timestamp
from .calibration import ForceCalibration
class Slice:
"""A lazily evaluated slice of a timeline/HDF5 channel
Users will only ever get these as a result of slicing a timeline/HDF5
channel or slicing another slice (via this class' `__getitem__`), i.e.
the `__init__` method will never be invoked by users.
Parameters
----------
data_source : Any
A slice data source. Can be `Continuous`, `TimeSeries`, 'TimeTags',
or any other source which conforms to the same interface.
labels : Dict[str, str]
Plot labels: "x", "y", "title".
calibration: ForceCalibration
"""
def __init__(self, data_source, labels=None, calibration=None):
self._src = data_source
self.labels = labels or {}
self._calibration = calibration
def __len__(self):
return len(self._src)
def __getitem__(self, item):
"""All indexing is in timestamp units (ns)"""
if not isinstance(item, slice):
raise IndexError("Scalar indexing is not supported, only slicing")
if item.step is not None:
raise IndexError("Slice steps are not supported")
if len(self) == 0:
return self
src_start = self._src.start
src_stop = self._src.stop
start = src_start if item.start is None else item.start
stop = src_stop if item.stop is None else item.stop
start, stop = (to_timestamp(v, src_start, src_stop) for v in (start, stop))
return self._with_data_source(self._src.slice(start, stop))
def _with_data_source(self, data_source):
"""Return a copy of this slice with a different data source, but keep other properties"""
return self.__class__(data_source, self.labels, self._calibration)
@property
def data(self):
"""The primary values of this channel slice"""
return self._src.data
@property
def timestamps(self):
"""Absolute timestamps (since epoch) which correspond to the channel data"""
return self._src.timestamps
@property
def calibration(self) -> list:
"""Calibration data slicing is deferred until calibration is requested to avoid
slicing values that may be needed."""
if self._calibration:
try:
return self._calibration.filter_calibration(self._src.start, self._src.stop)
except IndexError:
return []
else:
return []
@property
def sample_rate(self) -> int:
"""The data frequency for continuous data sources or `None` if it's variable"""
try:
return self._src.sample_rate
except AttributeError:
return None
def downsampled_over(self, range_list, reduce=np.mean, where='center'):
"""Downsample channel data based on timestamp ranges. The downsampling function (e.g. np.mean) is evaluated for
the time between a start and end time of each block. A list is returned that contains the data corresponding to
each block.
Parameters
----------
range_list : list of tuples
A list of (start, stop) tuples indicating over which ranges to apply the function.
Start and stop have to be specified in nanoseconds.
reduce : callable
The `numpy` function which is going to reduce multiple samples into one.
The default is `np.mean`, but `np.sum` could also be appropriate for some
cases, e.g. photon counts.
where : str
Where to put the final time point.
'center' time point is put at start + stop / 2
'left' time point is put at start
Examples
--------
::
from lumicks import pylake
file = pylake.File("example.h5")
stack = pylake.CorrelatedStack("example.tiff")
file.force1x.downsampled_over(stack.timestamps)
"""
if not isinstance(range_list, list):
raise TypeError("Did not pass timestamps to range_list.")
assert len(range_list[0]) == 2, "Did not pass timestamps to range_list."
assert self._src.start < range_list[-1][1], "No overlap between CorrelatedStack and selected channel."
assert self._src.stop > range_list[0][0], "No overlap between CorrelatedStack and selected channel"
if where != 'center' and where != 'left':
raise ValueError("Invalid argument for where. Valid options are center and left")
t = np.zeros(len(range_list))
d = np.zeros(len(range_list))
for i, time_range in enumerate(range_list):
start, stop = time_range
subset = self[start:stop]
t[i] = (start + stop) // 2 if where == 'center' else start
d[i] = reduce(subset.data)
return Slice(TimeSeries(d, t), self.labels)
def downsampled_by(self, factor, reduce=np.mean):
"""Return a copy of this slice which is downsampled by `factor`
Parameters
----------
factor : int
The size and sample rate of the data will be divided by this factor.
reduce : callable
The `numpy` function which is going to reduce multiple samples into one.
The default is `np.mean`, but `np.sum` could also be appropriate for some
cases, e.g. photon counts.
"""
return self._with_data_source(self._src.downsampled_by(factor, reduce))
def plot(self, **kwargs):
"""A simple line plot to visualize the data over time
Parameters
----------
**kwargs
Forwarded to :func:`matplotlib.pyplot.plot`.
"""
import matplotlib.pyplot as plt
seconds = (self.timestamps - self.timestamps[0]) / 1e9
plt.plot(seconds, self.data, **kwargs)
plt.xlabel(self.labels.get("x", "Time") + " (s)")
plt.ylabel(self.labels.get("y", "y"))
plt.title(self.labels.get("title", "title"))
def _downsample(data, factor, reduce):
def round_down(size, n):
"""Round down `size` to the nearest multiple of `n`"""
return int(math.floor(size / n)) * n
data = data[:round_down(data.size, factor)]
return reduce(data.reshape(-1, factor), axis=1)
class Continuous:
"""A source of continuous data for a timeline slice
Parameters
----------
data : array_like
Anything that's convertible to an `np.ndarray`.
start : int
Timestamp of the first data point.
dt : int
Delta between two timestamps. Constant for the entire data range.
"""
def __init__(self, data, start, dt):
self._src_data = data
self._cached_data = None
self.start = start
self.stop = start + len(data) * dt
self.dt = dt
def __len__(self):
return len(self._src_data)
@staticmethod
def from_dataset(dset, y_label="y", calibration=None):
start = dset.attrs["Start time (ns)"]
dt = int(1e9 / dset.attrs["Sample rate (Hz)"])
return Slice(Continuous(dset[()], start, dt),
labels={"title": dset.name.strip("/"), "y": y_label}, calibration=calibration)
@property
def data(self):
if self._cached_data is None:
self._cached_data = np.asarray(self._src_data)
return self._cached_data
@property
def timestamps(self):
return np.arange(self.start, self.stop, self.dt)
@property
def sample_rate(self):
return int(1e9 / self.dt)
def slice(self, start, stop):
def to_index(t):
"""Convert a timestamp into a continuous channel index (assumes t >= self.start)"""
return (t - self.start + self.dt - 1) // self.dt
fraction = (start - self.start) % self.dt
start = max(start if fraction == 0 else start + self.dt - fraction, self.start)
start_idx = to_index(start)
stop_idx = to_index(stop)
return self.__class__(self.data[start_idx:stop_idx], start, self.dt)
def downsampled_by(self, factor, reduce):
return self.__class__(_downsample(self.data, factor, reduce),
start=self.start + self.dt * (factor - 1) // 2, dt=self.dt * factor)
class TimeSeries:
"""A source of time series data for a timeline slice
Parameters
----------
data : array_like
Anything that's convertible to an `np.ndarray`.
timestamps : array_like
An array of integer timestamps.
"""
def __init__(self, data, timestamps):
assert len(data) == len(timestamps)
# TODO: should be lazily evaluated
self.data = np.asarray(data)
self.timestamps = np.asarray(timestamps)
def __len__(self):
return len(self.data)
@staticmethod
def from_dataset(dset, y_label="y", calibration=None):
return Slice(TimeSeries(dset["Value"], dset["Timestamp"]),
labels={"title": dset.name.strip("/"), "y": y_label}, calibration=calibration)
@property
def start(self):
if len(self.timestamps) > 0:
return self.timestamps[0]
else:
raise IndexError("Start of empty time series is undefined")
@property
def stop(self):
if len(self.timestamps) > 0:
return self.timestamps[-1] + 1
else:
raise IndexError("End of empty time series is undefined")
def slice(self, start, stop):
idx = np.logical_and(start <= self.timestamps, self.timestamps < stop)
return self.__class__(self.data[idx], self.timestamps[idx])
def downsampled_by(self, factor, reduce):
raise NotImplementedError("Downsampling is currently not available for time series data")
class TimeTags:
"""A source of time tag data for a timeline slice
Parameters
----------
data : array_like
Anything that's convertible to an `np.ndarray`
start : int
Timestamp of the start of the channel slice
stop : int
Timestamp of the end of the channel slice
"""
def __init__(self, data, start=None, stop=None):
self.data = np.asarray(data, dtype=np.int64)
self.start = start if start is not None else \
(self.data[0] if self.data.size > 0 else 0)
self.stop = stop if stop is not None else \
(self.data[-1]+1 if self.data.size > 0 else 0)
def __len__(self):
return self.data.size
@staticmethod
def from_dataset(dset, y_label="y"):
return Slice(TimeTags(dset[()]))
@property
def timestamps(self):
# For time tag data, the data is the timestamps!
return self.data
def slice(self, start, stop):
idx = np.logical_and(start <= self.data, self.data < stop)
return self.__class__(self.data[idx], min(start, stop), max(start, stop))
def downsampled_by(self, factor, reduce):
raise NotImplementedError("Downsampling is not available for time tag data")
class Empty:
"""A lightweight source of no data
Both `Continuous` and `TimeSeries` can be empty, but this is a lighter
class which can be returned an empty slice from properties.
"""
def __len__(self):
return 0
@property
def data(self):
return np.empty(0)
@property
def timestamps(self):
return np.empty(0)
empty_slice = Slice(Empty())
def channel_class(dset):
"""Figure out the right channel source class given an HDF5 dataset"""
if "Kind" in dset.attrs:
# Bluelake HDF5 files >=v2 mark channels with a "Kind" attribute:
kind = dset.attrs["Kind"]
if isinstance(kind, bytes):
kind = kind.decode()
if kind == "TimeTags":
return TimeTags
elif kind == "TimeSeries":
return TimeSeries
elif kind == "Continuous":
return Continuous
else:
raise RuntimeError("Unknown channel kind " + str(kind))
elif dset.dtype.fields is None:
# For compatibility with Bluelake HDF5 files v1
return Continuous
else:
return TimeSeries
|
from typing import Optional
from starlite import get
from starlite.utils import create_function_signature_model
def test_create_function_signature_model_parameter_parsing():
@get()
def my_fn(a: int, b: str, c: Optional[bytes], d: bytes = b"123", e: Optional[dict] = None) -> None:
pass
model = create_function_signature_model(my_fn.fn, [])
fields = model.__fields__
assert fields.get("a").type_ == int
assert fields.get("a").required
assert fields.get("b").type_ == str
assert fields.get("b").required
assert fields.get("c").type_ == bytes
assert fields.get("c").allow_none
assert fields.get("c").default is None
assert fields.get("d").type_ == bytes
assert fields.get("d").default == b"123"
assert fields.get("e").type_ == dict
assert fields.get("e").allow_none
assert fields.get("e").default is None
|
import unittest
from io import BytesIO
from id3vx.frame import FrameHeader, Frame, TextFrame, Frames, PCNT
from id3vx.frame import CHAP, MCDI, NCON, COMM, TALB, APIC, PRIV
from id3vx.tag import TagHeader
class FramesTests(unittest.TestCase):
def test_reads_frames_from_file(self):
# Arrange
header_a = FrameHeader("TALB", 9, FrameHeader.Flags.Compression, False)
frame_a = PRIV.read(BytesIO(bytes(header_a) + b'\x00thealbum'))
header_b = FrameHeader("TIT2", 10, FrameHeader.Flags.Encryption, False)
frame_b = PRIV.read(BytesIO(bytes(header_b) + b'\x00theartist'))
tag_header = TagHeader('ID3', 3, 0, TagHeader.Flags(0), 39)
byte_string = bytes(frame_a) + bytes(frame_b)
stream = BytesIO(byte_string)
# Act
frames = Frames.read(stream, tag_header)
# Assert
self.assertEqual(len(frames), 2)
self.assertEqual(frames[0].id(), 'TALB')
self.assertEqual(frames[0].text, 'thealbum')
self.assertEqual(frames[1].id(), 'TIT2')
self.assertEqual(frames[1].text, 'theartist')
def test_handles_padding(self):
"""Stops on first padding frame"""
# Arrange
header = FrameHeader("TALB", 9, FrameHeader.Flags.Compression, False)
fields = b'\x00thealbum'
stream = BytesIO(bytes(header) + fields)
frame = PRIV.read(stream)
padding = b'\x00' * 81
tag_header = TagHeader('ID3', 3, 0, TagHeader.Flags(0), 100)
byte_string = bytes(frame) + padding
# Act
frames = Frames.read(BytesIO(byte_string), tag_header)
# Assert
self.assertEqual(len(frames), 1)
self.assertEqual(frames[0].id(), 'TALB')
self.assertEqual(frames[0].text, 'thealbum')
class FrameHeaderTests(unittest.TestCase):
def test_reads_header_from_stream(self):
"""Reads FrameHeader from a bytes stream"""
# Arrange
frame_id = b'PRIV'
size = b'\x00\x00\x00\xFF'
flags = b'\x00\x00'
stream = BytesIO(frame_id + size + flags)
# Act
header = FrameHeader.read(stream)
# Assert
self.assertEqual(header.frame_size, 255)
self.assertEqual(header.flags, FrameHeader.Flags(0))
self.assertEqual(header.identifier, "PRIV")
def test_read_synchsafe_size(self):
"""Reads FrameHeader from a bytes stream"""
# Arrange
frame_id = b'PRIV'
size = b'\x00\x00\x02\x01' # would be 513 in plain binary
flags = b'\x00\x00'
expected_size = 257 # ... but is 257 in synchsafe world
stream = BytesIO(frame_id + size + flags)
# Act
header = FrameHeader.read(stream, synchsafe_size=True)
# Assert
self.assertEqual(header.frame_size, expected_size)
def test_reads_all_flags(self):
"""Reads all flags correctly"""
# Arrange
frame_id = b'PRIV'
size = b'\x00\x00\x00\xFF'
flags = 0b1110000011100000.to_bytes(2, "big")
stream = BytesIO(frame_id + size + flags)
# Act
header = FrameHeader.read(stream)
# Assert
self.assertIn(FrameHeader.Flags.Compression, header.flags)
self.assertIn(FrameHeader.Flags.Encryption, header.flags)
self.assertIn(FrameHeader.Flags.FileAlterPreservation, header.flags)
self.assertIn(FrameHeader.Flags.GroupingIdentity, header.flags)
self.assertIn(FrameHeader.Flags.ReadOnly, header.flags)
self.assertIn(FrameHeader.Flags.TagAlterPreservation, header.flags)
def test_reads_some_flags(self):
"""Reads some flags correctly"""
# Arrange
frame_id = b'PRIV'
size = b'\x00\x00\x00\xFF'
flags = 0b0000000011100000.to_bytes(2, "big")
stream = BytesIO(frame_id + size + flags)
# Act
header = FrameHeader.read(stream)
# Assert
self.assertIn(FrameHeader.Flags.Compression, header.flags)
self.assertIn(FrameHeader.Flags.Encryption, header.flags)
self.assertIn(FrameHeader.Flags.GroupingIdentity, header.flags)
def test_reads_header_if_size_bigger_than_zero(self):
"""Reads FrameHeader as long as size is present"""
# Arrange
frame_id = b'\x00\x00\x00\x00'
frame_size = b'\x00\x00\x00\x01'
flags = b'\x00\x00'
stream = BytesIO(frame_id + frame_size + flags)
# Act
header = FrameHeader.read(stream)
# Assert
self.assertEqual(header.frame_size, 1)
self.assertEqual(header.identifier, frame_id.decode("latin1"))
self.assertEqual(header.flags, FrameHeader.Flags(0))
@unittest.SkipTest
def test_no_header_from_too_short_stream(self):
"""Fails to read FrameHeader from a too short byte stream"""
# Arrange
frame_id = b'PRIV'
size = b'\x00\x00\x00\xFF'
stream = BytesIO(frame_id + size)
# Act
header = FrameHeader.read(stream)
# Assert
self.assertFalse(bool(header)) # TODO: fix this with proper None
def test_reads_no_header_if_size_is_zero(self):
"""Fails to read FrameHeader if size is zero"""
# Arrange
frame_id = b'PRIV'
size = b'\x00\x00\x00\x00'
flags = b'\x00\x00'
stream = BytesIO(frame_id + size + flags)
# Act
header = FrameHeader.read(stream)
# Assert
self.assertFalse(header)
def test_converts_back_to_bytes(self):
# Arrange
frame_id = 'PRIV'
size = 3333
flags = 0b1100_0000_0000_0000
expected_bytes = b'PRIV\x00\x00\r\x05\xc0\x00'
# System under test
header = FrameHeader(frame_id, size, flags, False)
# Act
header_bytes = bytes(header)
# Assert
self.assertEqual(header_bytes, expected_bytes)
class FrameTests(unittest.TestCase):
def test_exposes_fields(self):
"""Exposes relevant fields"""
# Arrange
frame_size = 100
header = FrameHeader('PRIV', frame_size, 0, False)
fields = b'\x0a\x0f\x00\x0f\x0c'
# System under test
frame = Frame(header, fields)
# Assert
self.assertEqual(frame.header, header)
self.assertEqual(frame.id(), "PRIV")
self.assertEqual(frame.fields, fields)
self.assertIn(str(fields), repr(frame))
self.assertEqual(len(frame), frame_size + len(header))
def test_serializes_to_bytes(self):
"""Serializes itself to bytes"""
# Arrange
header = FrameHeader('PRIV', 100, 0, False)
header_bytes = bytes(header)
fields = b'\x0a\x0f\x00\x0f\x0c'
# System under test
frame = Frame(header, fields)
# Act
byte_string = bytes(frame)
# Assert
self.assertEqual(byte_string, header_bytes + fields)
def test_no_frame_if_header_invalid(self):
"""Defaults to Frame ID if name is unknown"""
# Arrange
broken_header = bytes(10)
fields = bytes(100)
stream = BytesIO(broken_header + fields)
# System under test
frame = Frame.read(stream)
# Act - Assert
self.assertIsNone(frame)
def test_read_frame_from_stream(self):
"""Defaults to Frame ID if name is unknown"""
# Arrange
fields = b'\x00Album'
size = len(fields)
header = FrameHeader('TALB', size, 0, False)
frame = TextFrame.read(BytesIO(bytes(header) + fields))
stream = BytesIO(bytes(frame))
# System under test
frame = Frame.read(stream)
# Act - Assert
self.assertEqual(type(frame), TALB)
self.assertEqual(frame.text, "Album")
class APICTests(unittest.TestCase):
def test_initialize_from_fields(self):
# Arrange
header = FrameHeader('APIC', 1000, 0, False)
encoding = b'\x02'
mime_type = b'image/paper\x00'
picture_type = b'\x11' # bright colored fish
description = "You can see a fish here"
desc_bytes = description.encode("utf-16-be") + b'\x00\x00'
data = b'\xFF\xD8\xFF\xE0\x00\x10\x4A\x46\x49\x46\x00\x01'
fields = encoding + mime_type + picture_type + desc_bytes + data
expected_pic_type = APIC.PictureType.BRIGHT_COLORED_FISH
expected_mime_type = "image/paper"
# System under test
frame = APIC.read(BytesIO(bytes(header) + fields))
# Act - Assert
self.assertEqual(type(frame), APIC)
self.assertEqual(frame.description, description)
self.assertEqual(frame.picture_type, expected_pic_type)
self.assertEqual(frame.mime_type, "image/paper")
self.assertEqual(frame.data, data)
self.assertIn(description, repr(frame))
self.assertIn(str(data), repr(frame))
self.assertIn(str(expected_pic_type), repr(frame))
self.assertIn(expected_mime_type, repr(frame))
class CHAPTests(unittest.TestCase):
def test_initialize_from_fields(self):
# Arrange
header = FrameHeader('CHAP', 1000, 0, False)
element_id = 'chp'
element_id_bytes = element_id.encode("latin1")
t_start = b'\x00\xFF\xFF\xEE'
t_end = b'\x00\x0A\x0F\xEE'
o_start = b'\x00\xFF\xFF\xEE'
o_end = b'\x00\x0A\x0F\xEE'
offset_start = int.from_bytes(o_start, "big")
offset_end = int.from_bytes(t_end, "big")
fields = element_id_bytes + b'\x00' + t_start + t_end + o_start + o_end
expected_bytes = bytes(header) + fields
stream = BytesIO(bytes(header) + fields)
# System under test
frame = CHAP.read(stream)
# Act - Assert
self.assertEqual(type(frame), CHAP)
self.assertEqual(frame.element_id, element_id)
self.assertEqual(frame.start_time, 0xFFFFEE)
self.assertEqual(frame.end_time, 0x0A0FEE)
self.assertEqual(frame.start_offset, offset_start)
self.assertEqual(frame.end_offset, offset_end)
self.assertEqual(bytes(frame), expected_bytes)
def test_subframes(self):
"""FIXME: this test sucks"""
# Arrange
sub_fields = b'\x00sometext\x00'
sub_header = FrameHeader('TIT2', 1000, 0, False)
sub_frame = TextFrame.read(BytesIO(bytes(sub_header) + sub_fields))
header = FrameHeader('CHAP', 1000, 0, False)
element_id = 'chp'
element_id_bytes = element_id.encode("latin1")
t_start = b'\x00\xFF\xFF\xEE'
t_end = b'\x00\x0A\x0F\xEE'
o_start = b'\x00\xFF\xFF\xEE'
o_end = b'\x00\x0A\x0F\xEE'
fields = element_id_bytes + b'\x00' + t_start + t_end + o_start + o_end
fields += bytes(sub_frame)
# System under test
frame = CHAP.read(BytesIO(bytes(header) + fields))
# Act
sub_frames = list(frame.sub_frames())
# Act - Assert
self.assertEqual(1, len(sub_frames))
self.assertEqual('TIT2', sub_frames[0].id())
self.assertEqual("sometext", sub_frames[0].text)
class MCDITests(unittest.TestCase):
def test_exposes_toc(self):
# Arrange
header = FrameHeader('MCDI', 1000, 0, False)
fields = b'\xf0\xfa\xccsometocdata\xff'
stream = BytesIO(bytes(header) + fields)
# System under test
frame = MCDI.read(stream)
# Act - Assert
self.assertEqual(type(frame), MCDI)
self.assertEqual(fields, frame.toc)
class NCONTests(unittest.TestCase):
def test_recognizes_music_match_frames(self):
# Arrange
header = FrameHeader('NCON', 1000, 0, False)
fields = b'\xf0\xfa\xccweirdbinaryblob\xff'
stream = BytesIO(bytes(header) + fields)
# System under test
frame = NCON.read(stream)
# Act - Assert
self.assertEqual(type(frame), NCON)
self.assertEqual(fields, frame.fields)
class COMMTests(unittest.TestCase):
def test_reads_from_file(self):
# Arrange
header = b'COMM\x00\x00\x00\x0a\x00\x00'
fields = b'\x01\x65\x6e\x67\xff\xfe\x00\x00\xff\xfe'
stream = BytesIO(header + fields)
# Act
frame = COMM.read(stream)
# Assert
self.assertEqual(type(frame), COMM)
self.assertEqual(frame.id(), 'COMM')
self.assertEqual(frame.language, 'eng')
self.assertEqual(frame.description, '')
self.assertEqual(frame.comment, '')
class PCNTTests(unittest.TestCase):
def test_reads_pcnt_frame_from_stream(self):
"""Counts all 18446744073709550304 plays of Old Time Road"""
# Arrange
header = b'PCNT\x00\x00\x00\x0a\x00\x00'
fields = b'\xff\xff\xff\xff\xff\xff\xfa\xe0'
expected_play_count = 0xfffffffffffffae0
stream = BytesIO(header + fields)
# Act
frame = PCNT.read(stream)
# Assert
self.assertEqual(type(frame), PCNT)
self.assertEqual(frame.counter, expected_play_count)
|
import os
import sys
from .neural_net import *
from .io import *
# # train_autoencoder(8,3,8)
test_predictions(pickle = False)
# cross_validation()
# vary_hidden_layer_size()
# vary_iterations()
|
from data import AudioPipeline, NoisedAudPipeline, dataset, get_data_loader
from torch.nn.parallel import DistributedDataParallel
from torch.multiprocessing import spawn
from torch.utils.tensorboard import SummaryWriter
from data import get_distributed_loader
from torch.utils.data import DataLoader
from abc import ABC, abstractmethod
from typing import Callable, Tuple
from torch.optim import Optimizer
import torch.distributed as dist
from torch.nn import Module
from functools import wraps
from hprams import get_melkwargs, get_snr_params, hprams
from utils import SNR, MinMax, load_model
from model import Model
from tqdm import tqdm
import torch
import os
OPT = {
'adam': torch.optim.Adam
}
LOSS = {
'mae': torch.nn.L1Loss(),
'mse': torch.nn.MSELoss()
}
def save_checkpoint(func, *args, _counter=[0]) -> Callable:
"""Save a checkpoint after each iteration
"""
@wraps(func)
def wrapper(obj, *args, **kwargs):
_counter[0] += 1
result = func(obj, *args, **kwargs)
if not os.path.exists(hprams.training.checkpoints_dir):
os.mkdir(hprams.training.checkpoints_dir)
if hprams.dist_configs.use_dist:
if obj.rank != 0:
return result
model_path = os.path.join(
hprams.training.checkpoints_dir,
'checkpoint_' + str(_counter[0]) + '.pt'
)
state_dict = obj.model.state_dict()
state_dict = {
key.replace('module.', ''): value
for key, value in state_dict.items()
}
torch.save(state_dict, model_path)
print(f'checkpoint saved to {model_path}')
return result
return wrapper
class ITrainer(ABC):
@abstractmethod
def fit():
pass
@abstractmethod
def train():
pass
@abstractmethod
def test():
pass
class BaseTrainer(ITrainer):
_train_loss_key = 'train_loss'
_test_loss_key = 'test_loss'
def __init__(
self,
criterion: Module,
optimizer: Optimizer,
model: Module,
device: str,
train_loader: DataLoader,
test_loader: DataLoader,
epochs: int,
logdir: str
) -> None:
self.criterion = criterion
self.optimizer = optimizer
self.model = model
self.train_loader = train_loader
self.test_loader = test_loader
self.device = device
self.epochs = epochs
self.step_history = dict()
self.history = dict()
self.tensorboard = SummaryWriter(logdir)
def log_results(self, epoch):
"""logs the results after each epoch
"""
result = ''
for key, value in self.history.items():
self.tensorboard.add_scalar(key, value[-1], epoch)
result += f'{key}: {str(value[-1])}, '
print(result[:-2])
def fit(self, *args, **kwargs):
"""The main training loop that train the model on the training
data then test it on the test set and then log the results
"""
for epoch in range(self.epochs):
self.train()
self.test()
self.log_results(epoch)
def set_train_mode(self) -> None:
"""Set the models on the training mood
"""
self.model = self.model.train()
def set_test_mode(self) -> None:
"""Set the models on the testing mood
"""
self.model = self.model.eval()
class Trainer(BaseTrainer):
def __init__(
self,
criterion: Module,
optimizer: Optimizer,
model: Module,
device: str,
train_loader: DataLoader,
test_loader: DataLoader,
epochs: int,
logdir: str
) -> None:
super().__init__(
criterion,
optimizer,
model,
device,
train_loader,
test_loader,
epochs,
logdir
)
def test(self):
"""Iterate over the whole test data and test the models
for a single epoch
"""
total_loss = 0
self.set_test_mode()
for (x, y, lengths) in tqdm(self.test_loader):
x = x.permute(0, 2, 1)
x = x.to(self.device)
y = y.to(self.device)
preds = self.model(x, lengths)
preds = preds.squeeze()
y = y[:, :preds.shape[1]]
loss = self.criterion(y, preds)
total_loss += loss.item()
total_loss /= len(self.train_loader)
if self._test_loss_key in self.history:
self.history[self._test_loss_key].append(total_loss)
else:
self.history[self._test_loss_key] = [total_loss]
@save_checkpoint
def train(self):
"""Iterates over the whole training data and train the models
for a single epoch
"""
total_loss = 0
self.set_train_mode()
for (x, y, lengths) in tqdm(self.train_loader):
x = x.permute(0, 2, 1)
x = x.to(self.device)
y = y.to(self.device)
self.optimizer.zero_grad()
preds = self.model(x, lengths)
preds = preds.squeeze()
y = y[:, :preds.shape[1]]
loss = self.criterion(y, preds)
loss.backward()
self.optimizer.step()
total_loss += loss.item()
total_loss /= len(self.train_loader)
if self._train_loss_key in self.history:
self.history[self._train_loss_key].append(total_loss)
else:
self.history[self._train_loss_key] = [total_loss]
class DistTrainer(BaseTrainer):
def __init__(
self,
criterion: Module,
optimizer: Optimizer,
model: Module,
device: str,
train_loader: DataLoader,
test_loader: DataLoader,
epochs: int,
logdir: str,
url: str,
backend: str,
world_size: int,
rank: int
) -> None:
super().__init__(
criterion,
optimizer,
model,
device,
train_loader,
test_loader,
epochs,
logdir
)
self.url = url
self.backend = backend
self.world_size = world_size
self.rank = rank
self.init()
def init(self):
os.environ['MASTER_ADDR'] = 'localhot'
os.environ['MASTER_PORT'] = '12345'
dist.init_process_group(
self.backend,
init_method=self.url,
world_size=self.world_size,
rank=self.rank
)
def fit(self, *args, **kwargs):
"""The main training loop that train the model on the training
data then test it on the test set and then log the results
"""
for epoch in range(self.epochs):
self.train()
if self.rank == 0:
self.test()
self.log_results(epoch)
dist.destroy_process_group()
def test(self):
"""Iterate over the whole test data and test the models
for a single epoch
"""
total_loss = 0
self.set_test_mode()
for (x, y, lengths) in tqdm(self.test_loader):
x = x.permute(0, 2, 1)
x = x.cuda(self.rank)
y = y.cuda(self.rank)
preds = self.model(x, lengths)
preds = preds.squeeze()
y = y[:, :preds.shape[1]]
loss = self.criterion(y, preds)
total_loss += loss.item()
total_loss /= len(self.train_loader)
if self._test_loss_key in self.history:
self.history[self._test_loss_key].append(total_loss)
else:
self.history[self._test_loss_key] = [total_loss]
@save_checkpoint
def train(self):
"""Iterates over the whole training data and train the models
for a single epoch
"""
total_loss = 0
self.set_train_mode()
self.model.cuda(self.rank)
self.model = DistributedDataParallel(
self.model, device_ids=[self.rank]
)
total = torch.tensor([0]).cuda(self.rank)
for (x, y, lengths) in tqdm(self.train_loader):
x = x.permute(0, 2, 1)
x = x.cuda(self.rank)
y = y.cuda(self.rank)
self.optimizer.zero_grad()
preds = self.model(x, lengths)
preds = preds.squeeze()
y = y[:, :preds.shape[1]]
loss = self.criterion(y, preds)
loss.backward()
total = torch.tensor([loss.item()]).cuda(self.rank)
self.optimizer.step()
dist.all_reduce(total, op=dist.ReduceOp.SUM)
if self.rank == 0:
total_loss += (total.item() / self.world_size)
total_loss /= len(self.train_loader)
if self._train_loss_key in self.history:
self.history[self._train_loss_key].append(total_loss)
else:
self.history[self._train_loss_key] = [total_loss]
def get_scalers() -> dict:
return {
'chunk_length': MinMax(
hprams.data.lengths.min_val,
hprams.data.lengths.max_val
),
'signal_scaler': MinMax(
hprams.data.signal_scaler.min_val,
hprams.data.signal_scaler.max_val
),
'noise_scaler': MinMax(
hprams.data.noise_scaler.min_val,
hprams.data.noise_scaler.max_val
)
}
def get_pipelines() -> dict:
return {
'aud_pipeline': AudioPipeline(
hprams.data.sampling_rate
),
'noisy_pipeline': NoisedAudPipeline(
sample_rate=hprams.data.sampling_rate,
n_mfcc=hprams.data.n_mfcc,
melkwargs=get_melkwargs()
)
}
def get_dataset_params(data_dir: str, seed=None) -> dict:
return dict(
**get_pipelines(),
**get_scalers(),
snr_calc=SNR(**get_snr_params()),
noise_dir=hprams.data.noise_dir,
audio_dir=data_dir,
seed=seed
)
def get_train_test_loaders() -> Tuple[DataLoader, DataLoader]:
train_loader = get_data_loader(
batch_size=hprams.training.batch_size,
dataset=dataset(
**get_dataset_params(
data_dir=hprams.data.training_dir,
seed=hprams.data.train_seed
)
)
)
test_loader = get_data_loader(
batch_size=hprams.training.batch_size,
dataset=dataset(
**get_dataset_params(
data_dir=hprams.data.testing_dir,
seed=hprams.data.test_seed
)
)
)
return (
train_loader,
test_loader
)
def get_train_test_dist_loaders(rank: int) -> Tuple[DataLoader, DataLoader]:
train_loader = get_distributed_loader(
batch_size=hprams.training.batch_size,
dataset=dataset(
**get_dataset_params(
data_dir=hprams.data.training_dir,
seed=hprams.data.train_seed
)
),
world_size=hprams.dist_configs.n_gpus,
rank=rank
)
test_loader = get_data_loader(
batch_size=hprams.training.batch_size,
dataset=dataset(
**get_dataset_params(
data_dir=hprams.data.testing_dir,
seed=hprams.data.test_seed
)
)
)
return (
train_loader,
test_loader
)
def get_trainer() -> Trainer:
device = hprams.device
criterion = LOSS[hprams.training.loss_func]
model = load_model(hprams.model, hprams.checkpoint)
optimizer = OPT[hprams.training.optimizer](
model.parameters(),
lr=hprams.training.learning_rate
)
train_loader, test_loader = get_train_test_loaders()
return Trainer(
criterion=criterion,
optimizer=optimizer,
model=model,
device=device,
train_loader=train_loader,
test_loader=test_loader,
epochs=hprams.training.epochs,
logdir=hprams.training.logdir
)
def get_distributed_trainer(rank: int):
criterion = LOSS[hprams.training.loss_func]
model = model = load_model(hprams.model, hprams.checkpoint)
optimizer = OPT[hprams.training.optimizer](
model.parameters(),
lr=hprams.training.learning_rate
)
train_loader, test_loader = get_train_test_dist_loaders(rank)
return DistTrainer(
criterion=criterion,
optimizer=optimizer,
model=model,
device=hprams.device,
train_loader=train_loader,
test_loader=test_loader,
epochs=hprams.training.epochs,
logdir=hprams.training.logdir,
url=hprams.dist_configs.url,
backend=hprams.dist_configs.backend,
world_size=hprams.dist_configs.n_gpus,
rank=rank
)
def run_dist(rank: int):
trainer = get_distributed_trainer(rank)
trainer.fit()
def main():
if hprams.dist_configs.use_dist:
spawn(
run_dist,
nprocs=hprams.dist_configs.n_gpus
)
else:
trainer = get_trainer()
trainer.fit()
if __name__ == '__main__':
main()
|
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import praw
# email = "samuellawrenceJSE@gmail.com"
# password = "JSEStocksGoUp"
#Praw API login
username = "SamTheIceCreamMan"
password = "Samernator1"
id = "U9EagV0j3qUSbw"
secret = "Su_o5xTzvs4RKygGOBakQFutxw49-A"
user_agent = "scrapermaster"
reddit = praw.Reddit(client_id = id,
client_secret = secret,
password = password, user_agent = user_agent,
username = username)
# subred = reddit.subreddit("investing")
# top = subred.top(limit = 10)
# for i in top:
# print(i.title, i.url)
def app():
selection = ["r/wallstreetbets", "r/stocks","r/investing","r/CyptoCurrency","r/StockMarket","r/pennystocks","r/WallStreetbetsELITE","Multi-select"] # Selections
choice = st.sidebar.selectbox("Dashboard Selection", selection)
if choice == 'r/wallstreetbets':
st.title("r/Wallstreetbets")
subred = reddit.subreddit("Wallstreetbets")
st.write(subred.title)
all_comments = subred.comments.list()
#About subreddit
#Collect top 25 posts from x days
#Take all comments from posts and put into list
# - top stocks listed
# - ideas / themes
if choice == 'r/stocks':
st.title("r/Stocks subreddit")
subred = reddit.subreddit("investing")
st.write(subred.title)
if choice == 'r/investing':
st.title("r/investing subreddit")
subred = reddit.subreddit("investing")
st.write(subred.description)
if choice == 'r/CyptoCurrency':
st.title("r/CyptoCurrency subreddit")
subred = reddit.subreddit("CyptoCurrency")
st.write(subred.description)
if choice == 'r/StockMarket':
st.title("r/StockMarket subreddit")
subred = reddit.subreddit("StockMarket")
st.write(subred.description)
if choice == 'r/pennystocks':
st.title("r/pennystocks subreddit")
subred = reddit.subreddit("pennystocks")
st.write(subred.description)
if choice == 'r/WallStreetbetsELITE':
st.title("r/WallStreetbetsELITE subreddit")
subred = reddit.subreddit("WallStreetbetsELITE")
st.write(subred.description)
# if choice == '"Multi-select"':
# st.title("r/Multi-select subreddit")
# subred = reddit.subreddit("investing")
# st.write(subred.description)
if __name__ == "__main__":
app()
|
#!/usr/bin/python -u
# ./gen_mlp_init.py
# script generateing NN initialization
#
# author: Karel Vesely
#
import math, random
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--dim', dest='dim', help='d1:d2:d3 layer dimensions in the network')
parser.add_option('--gauss', dest='gauss', help='use gaussian noise for weights', action='store_true', default=False)
parser.add_option('--negbias', dest='negbias', help='use uniform [-4.1,-3.9] for bias (defaultall 0.0)', action='store_true', default=False)
parser.add_option('--inputscale', dest='inputscale', help='scale the weights by 3/sqrt(Ninputs)', action='store_true', default=False)
parser.add_option('--normalized', dest='normalized', help='Generate normalized weights according to X.Glorot paper, U[-x,x] x=sqrt(6)/(sqrt(dim_in+dim_out))', action='store_true', default=False)
parser.add_option('--activation', dest='activation', help='activation type tag (def. <sigmoid>)', default='<sigmoid>')
parser.add_option('--activationOutput', dest='outputactivation', help='activation type tag (def. <softmax>)', default='<softmax>')
parser.add_option('--linBNdim', dest='linBNdim', help='dim of linear bottleneck (sigmoids will be omitted, bias will be zero)',default=0)
parser.add_option('--linOutput', dest='linOutput', help='generate MLP with linear output', action='store_true', default=False)
parser.add_option('--seed', dest='seedval', help='seed for random generator',default=0)
(options, args) = parser.parse_args()
if(options.dim == None):
parser.print_help()
sys.exit(1)
#seeding
seedval=int(options.seedval)
if(seedval != 0):
random.seed(seedval)
dimStrL = options.dim.split(':')
dimL = []
for i in range(len(dimStrL)):
dimL.append(int(dimStrL[i]))
#print dimL,'linBN',options.linBNdim
for layer in range(len(dimL)-1):
print '<affinetransform>', dimL[layer+1], dimL[layer]
#precompute...
nomalized_interval = math.sqrt(6.0) / math.sqrt(dimL[layer+1]+dimL[layer])
#weight matrix
print '['
for row in range(dimL[layer+1]):
for col in range(dimL[layer]):
if(options.normalized):
print random.random()*2.0*nomalized_interval - nomalized_interval,
elif(options.gauss):
if(options.inputscale):
print 3/math.sqrt(dimL[layer])*random.gauss(0.0,1.0),
else:
print 0.1*random.gauss(0.0,1.0),
else:
if(options.inputscale):
print (random.random()-0.5)*2*3/math.sqrt(dimL[layer]),
else:
print random.random()/5.0-0.1,
print #newline for each row
print ']'
#bias vector
print '[',
for idx in range(dimL[layer+1]):
if(int(options.linBNdim) == dimL[layer+1]):
print '0.0',
elif(layer == len(dimL)-2):#last layer (softmax)
print '0.0',
elif(options.negbias):
print random.random()/5.0-4.1,
else:
print '0.0',
print ']'
if (int(options.linBNdim) != dimL[layer+1]):
if (layer == len(dimL)-2): #if last layer
if (not(options.linOutput)):
print options.outputactivation, dimL[layer+1], dimL[layer+1]
#print '<softmax>', dimL[layer+1], dimL[layer+1]
else:
#print '<sigmoid>', dimL[layer+1], dimL[layer+1]
print options.activation, dimL[layer+1], dimL[layer+1]
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="populate_secrets_gitlab",
version="0.2.0",
author="Joe Niland",
author_email="joe@deploymode.com",
description="Populate Gitlab CI/CD Variables from .env file",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/deploymode/populate-secrets-gitlab",
project_urls={
"Bug Tracker": "https://github.com/deploymode/populate-secrets-gitlab/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
install_requires=["python-gitlab==2.10.0", "python-dotenv==0.19.0"],
setup_requires=["flake8"],
entry_points={
"console_scripts": [
"populate-gitlab = populate_secrets_gitlab.__main__:main",
],
},
)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from horizon.utils import validators
from openstack_dashboard import api
from openstack_dashboard.api import glance
from openstack_dashboard.usage import quotas
from ..util import CLOUDLET_TYPE
from ..util import find_basevm_by_sha256
from ..util import find_matching_flavor
from ..util import get_resource_size
import requests
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from .. import cloudlet_api
from xml.etree import ElementTree
import urllib2
try:
from elijah.provisioning import msgpack
except ImportError as e:
import msgpack
from elijah.provisioning.configuration import Const as Cloudlet_Const
LOG = logging.getLogger(__name__)
class SelectProjectUserAction(workflows.Action):
project_id = forms.ChoiceField(label=_("Project"))
user_id = forms.ChoiceField(label=_("User"))
def __init__(self, request, *args, **kwargs):
super(SelectProjectUserAction, self).__init__(request, *args, **kwargs)
# Set our project choices
projects = [(tenant.id, tenant.name)
for tenant in request.user.authorized_tenants]
self.fields['project_id'].choices = projects
# Set our user options
users = [(request.user.id, request.user.username)]
self.fields['user_id'].choices = users
class Meta:
name = _("Project & User")
# Unusable permission so this is always hidden. However, we
# keep this step in the workflow for validation/verification purposes.
permissions = ("!",)
class SelectProjectUser(workflows.Step):
action_class = SelectProjectUserAction
contributes = ("project_id", "user_id")
KEYPAIR_IMPORT_URL = "horizon:project:access_and_security:keypairs:import"
class SetResumeDetailAction(workflows.Action):
image_id = forms.ChoiceField(label=_("Image"), required=True)
name = forms.CharField(max_length=80, label=_("Instance Name"),
initial="resumed_vm")
security_group_ids = forms.MultipleChoiceField(label=_("Security Groups"),
required=True,
initial=["default"],
widget=forms.CheckboxSelectMultiple(),
help_text=_("Launch instance in these "
"security groups."))
flavor = forms.ChoiceField(label=_("Flavor"), required=True,
help_text=_("Size of image to launch."))
#keypair_id = forms.DynamicChoiceField(label=_("Keypair"),
# required=False,
# help_text=_("Which keypair to use for "
# "authentication."),
# add_item_link=KEYPAIR_IMPORT_URL)
class Meta:
name = _("Base VM Info")
help_text_template = ("project/cloudlet/instance/"
"_resume_details_help.html")
def clean(self):
cleaned_data = super(SetResumeDetailAction, self).clean()
return cleaned_data
def _get_available_images(self, request, context):
project_id = context.get('project_id', None)
if not hasattr(self, "_public_images"):
public = {"is_public": True,
"status": "active"}
try:
image_detail = api.glance.image_list_detailed(
request, filters=public
)
if len(image_detail) == 2: # icehouse
public_images, _more = image_detail
elif len(image_detail) == 3: # kilo
public_images, _more , has_prev_data = image_detail
except:
public_images = []
exceptions.handle(request,
_("Unable to retrieve public images."))
self._public_images = public_images
# Preempt if we don't have a project_id yet.
if project_id is None:
setattr(self, "_images_for_%s" % project_id, [])
if not hasattr(self, "_images_for_%s" % project_id):
owner = {"property-owner_id": project_id,
"status": "active"}
try:
image_detail = api.glance.image_list_detailed(
request, filters=owner
)
if len(image_detail) == 2: # icehouse
owned_images, _more = image_detail
elif len(image_detail) == 3: # kilo
owned_images, _more , has_prev_data = image_detail
except:
owned_images = []
exceptions.handle(request,
_("Unable to retrieve images for "
"the current project."))
setattr(self, "_images_for_%s" % project_id, owned_images)
owned_images = getattr(self, "_images_for_%s" % project_id)
images = owned_images + self._public_images
base_vms = list()
for image in images:
if hasattr(image, 'properties') == True:
properties = getattr(image, 'properties')
cloudlet_type = properties.get('cloudlet_type', None)
if cloudlet_type == CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK:
base_vms.append(image)
# Remove duplicate images
image_ids = []
final_images = []
for image in base_vms:
if image.id not in image_ids:
image_ids.append(image.id)
final_images.append(image)
return [image for image in final_images
if image.container_format not in ('aki', 'ari')]
def populate_image_id_choices(self, request, context):
images = self._get_available_images(request, context)
choices = [(image.id, image.name)
for image in images
if image.properties.get("image_type", '') == "snapshot"]
if choices:
choices.insert(0, ("", _("Select Base VM")))
else:
choices.insert(0, ("", _("No Base VM is available.")))
return choices
def get_help_text(self):
extra = {}
try:
extra['usages'] = quotas.tenant_quota_usages(self.request)
extra['usages_json'] = json.dumps(extra['usages'])
flavors = json.dumps([f._info for f in
api.nova.flavor_list(self.request)])
extra['flavors'] = flavors
except:
exceptions.handle(self.request,
_("Unable to retrieve quota information."))
return super(SetResumeDetailAction, self).get_help_text(extra)
def populate_keypair_id_choices(self, request, context):
try:
keypairs = api.nova.keypair_list(request)
keypair_list = [(kp.name, kp.name) for kp in keypairs]
except:
keypair_list = []
exceptions.handle(request,
_('Unable to retrieve keypairs.'))
if keypair_list:
if len(keypair_list) == 1:
self.fields['keypair_id'].initial = keypair_list[0][0]
#keypair_list.insert(0, ("", _("Select a keypair")))
else:
keypair_list = (("", _("No keypairs available.")),)
return keypair_list
def populate_security_group_ids_choices(self, request, context):
try:
groups = api.network.security_group_list(request)
#groups = api.nova.SecurityGroupManager.list(request)
security_group_list = [(sg.name, sg.name) for sg in groups]
except:
exceptions.handle(request,
_('Unable to retrieve list of security groups'))
security_group_list = []
return security_group_list
def populate_flavor_choices(self, request, context):
# return all flavors of Base VM image
try:
matching_flavors = set()
flavors = api.nova.flavor_list(request)
basevm_images = self._get_available_images(request, context)
for basevm_image in basevm_images:
if basevm_image.properties is None or\
len(basevm_image.properties) == 0:
continue
libvirt_xml_str = basevm_image.properties.get(
'base_resource_xml_str', None)
if libvirt_xml_str is None:
continue
cpu_count, memory_mb = get_resource_size(libvirt_xml_str)
disk_gb = basevm_image.min_disk
ret_flavors = find_matching_flavor(flavors,
cpu_count,
memory_mb,
disk_gb)
matching_flavors.update(ret_flavors)
if len(matching_flavors) > 0:
self.fields['flavor'].initial = list(matching_flavors)[0]
else:
self.fields['flavor'].initial = (0, "No valid flavor")
except Exception as e:
matching_flavors= set()
exceptions.handle(request,
_('Unable to retrieve instance flavors.'))
return sorted(list(matching_flavors))
class SetSynthesizeDetailsAction(workflows.Action):
overlay_url = forms.CharField(max_length=200, required=True,
label=_("URL for VM overlay"),
initial="http://")
name = forms.CharField(max_length=80, label=_("Instance Name"),
initial="synthesized_vm")
security_group_ids = forms.MultipleChoiceField(
label=_("Security Groups"),
required=True,
initial=["default"],
widget=forms.CheckboxSelectMultiple(),
help_text=_("Launch instance in these "
"security groups."))
flavor = forms.ChoiceField(label=_("Flavor"), required=True,
help_text=_("Size of image to launch."))
#keypair_id = forms.DynamicChoiceField(label=_("Keypair"),
# required=False,
# help_text=_("Which keypair to use for "
# "authentication."),
# add_item_link=KEYPAIR_IMPORT_URL)
class Meta:
name = _("VM overlay Info")
help_text_template = ("project/cloudlet/instance/"
"_synthesis_details_help.html")
def clean(self):
cleaned_data = super(SetSynthesizeDetailsAction, self).clean()
overlay_url = cleaned_data.get('overlay_url', None)
if overlay_url is None:
raise forms.ValidationError(_("Need URL to fetch VM overlay"))
# check url format
val = URLValidator()
try:
val(overlay_url)
except ValidationError, e:
raise forms.ValidationError(_("Malformed URL for VM overlay"))
# check url accessibility
try:
header_ret = requests.head(overlay_url)
if header_ret.ok == False:
raise
except Exception as e:
msg = "URL is not accessible : %s" % overlay_url
raise forms.ValidationError(_(msg))
if cleaned_data.get('name', None) is None:
raise forms.ValidationError(_("Need name for the synthesized VM"))
# finally check the header file of VM overlay
# to make sure that associated Base VM exists
from elijah.provisioning.package import VMOverlayPackage
matching_image = None
requested_basevm_sha256 = ''
try:
overlay_package = VMOverlayPackage(overlay_url)
metadata = overlay_package.read_meta()
overlay_meta = msgpack.unpackb(metadata)
requested_basevm_sha256 = overlay_meta.get(Cloudlet_Const.META_BASE_VM_SHA256, None)
matching_image = find_basevm_by_sha256(self.request, requested_basevm_sha256)
except Exception as e:
msg = "Error while finding matching Base VM with %s" % (requested_basevm_sha256)
raise forms.ValidationError(_(msg))
if matching_image == None:
msg = "Cannot find matching base VM with UUID(%s)" % (requested_basevm_sha256)
raise forms.ValidationError(_(msg))
else:
# specify associated base VM from the metadata
cleaned_data['image_id'] = str(matching_image.id)
return cleaned_data
def get_help_text(self):
extra = {}
try:
extra['usages'] = quotas.tenant_quota_usages(self.request)
extra['usages_json'] = json.dumps(extra['usages'])
flavors = json.dumps([f._info for f in
api.nova.flavor_list(self.request)])
extra['flavors'] = flavors
except:
exceptions.handle(self.request,
_("Unable to retrieve quota information."))
return super(SetSynthesizeDetailsAction, self).get_help_text(extra)
def populate_keypair_id_choices(self, request, context):
try:
keypairs = api.nova.keypair_list(request)
keypair_list = [(kp.name, kp.name) for kp in keypairs]
except:
keypair_list = []
exceptions.handle(request,
_('Unable to retrieve keypairs.'))
if keypair_list:
if len(keypair_list) == 1:
self.fields['keypair_id'].initial = keypair_list[0][0]
#keypair_list.insert(0, ("", _("Select a keypair")))
else:
keypair_list = (("", _("No keypairs available.")),)
return keypair_list
def populate_security_group_ids_choices(self, request, context):
try:
groups = api.network.security_group_list(request)
#groups = api.nova.SecurityGroupManager.list(request)
security_group_list = [(sg.name, sg.name) for sg in groups]
except:
exceptions.handle(request,
_('Unable to retrieve list of security groups'))
security_group_list = []
return security_group_list
def _get_available_images(self, request, context):
project_id = context.get('project_id', None)
public_images = []
owned_images = []
public = {"is_public": True,
"status": "active"}
try:
image_detail = api.glance.image_list_detailed(
request, filters=public
)
if len(image_detail) == 2: # icehouse
public_images, _more = image_detail
elif len(image_detail) == 3: # kilo
public_images, _more , has_prev_data = image_detail
except:
public_images = []
pass
# Preempt if we don't have a project_id yet.
if project_id is None:
setattr(self, "_images_for_%s" % project_id, [])
if not hasattr(self, "_images_for_%s" % project_id):
owner = {"property-owner_id": project_id,
"status": "active"}
try:
image_detail = api.glance.image_list_detailed(
request, filters=owner
)
if len(image_detail) == 2: # icehouse
owned_images, _more = image_detail
elif len(image_detail) == 3: # kilo
owned_images, _more , has_prev_data = image_detail
except:
owned_images = []
pass
images = owned_images + public_images
base_vms = list()
for image in images:
if hasattr(image, 'properties') == True:
properties = getattr(image, 'properties')
cloudlet_type = properties.get('cloudlet_type', None)
if cloudlet_type == CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK:
base_vms.append(image)
# Remove duplicate images
image_ids = []
final_images = []
for image in base_vms:
if image.id not in image_ids:
image_ids.append(image.id)
final_images.append(image)
return [image for image in final_images
if image.container_format not in ('aki', 'ari')]
def populate_flavor_choices(self, request, context):
# return all flavors of Base VM image
try:
matching_flavors = set()
flavors = api.nova.flavor_list(request)
basevm_images = self._get_available_images(request, context)
for basevm_image in basevm_images:
if basevm_image.properties is None or\
len(basevm_image.properties) == 0:
continue
libvirt_xml_str = basevm_image.properties.get(
'base_resource_xml_str', None)
if libvirt_xml_str is None:
continue
cpu_count, memory_mb = get_resource_size(libvirt_xml_str)
disk_gb = basevm_image.min_disk
ret_flavors = find_matching_flavor(flavors,
cpu_count,
memory_mb,
disk_gb)
matching_flavors.update(ret_flavors)
if len(matching_flavors) > 0:
self.fields['flavor'].initial = list(matching_flavors)[0]
except:
matching_flavors= set()
exceptions.handle(request,
_('Unable to retrieve instance flavors.'))
return sorted(list(matching_flavors))
class SetResumeAction(workflows.Step):
action_class = SetResumeDetailAction
contributes = ("image_id", "name", "security_group_ids", "flavor", "keypair_id")
def prepare_action_context(self, request, context):
source_type = request.GET.get("source_type", None)
source_id = request.GET.get("source_id", None)
if source_type != None and source_id != None:
context[source_type] = source_id
return context
class SetSynthesizeAction(workflows.Step):
action_class = SetSynthesizeDetailsAction
contributes = ("image_id", "overlay_url", "name", "security_group_ids", "flavor", "keypair_id")
class SetAccessControlsAction(workflows.Action):
keypair = forms.DynamicChoiceField(label=_("Keypair"),
required=False,
help_text=_("Which keypair to use for "
"authentication."),
add_item_link=KEYPAIR_IMPORT_URL)
groups = forms.MultipleChoiceField(label=_("Security Groups"),
required=True,
initial=["default"],
widget=forms.CheckboxSelectMultiple(),
help_text=_("Launch instance in these "
"security groups."))
class Meta:
name = _("Access & Security")
help_text = _("Control access to your instance via keypairs, "
"security groups, and other mechanisms.")
def populate_keypair_choices(self, request, context):
try:
keypairs = api.nova.keypair_list(request)
keypair_list = [(kp.name, kp.name) for kp in keypairs]
except:
keypair_list = []
exceptions.handle(request,
_('Unable to retrieve keypairs.'))
if keypair_list:
if len(keypair_list) == 1:
self.fields['keypair'].initial = keypair_list[0][0]
#keypair_list.insert(0, ("", _("Select a keypair")))
else:
keypair_list = (("", _("No keypairs available.")),)
return keypair_list
def populate_groups_choices(self, request, context):
try:
groups = api.network.security_group_list(request)
#groups = api.nova.SecurityGroupManager.list(request)
security_group_list = [(sg.name, sg.name) for sg in groups]
except:
exceptions.handle(request,
_('Unable to retrieve list of security groups'))
security_group_list = []
return security_group_list
class ResumeInstance(workflows.Workflow):
slug = "cloudlet resume base instance"
name = _("Cloudlet Resume Base VM")
finalize_button_name = _("Launch")
success_message = _('Cloudlet launched %(count)s named "%(name)s".')
failure_message = _('Cloudlet is unable to launch %(count)s named "%(name)s".')
success_url = "horizon:project:cloudlet:index"
default_steps = (SelectProjectUser,
SetResumeAction,
)
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
count = self.context.get('count', 1)
if int(count) > 1:
return message % {"count": _("%s instances") % count,
"name": name}
else:
return message % {"count": _("instance"), "name": name}
def handle(self, request, context):
dev_mapping = None
user_script = None
try:
api.nova.server_create(request,
context['name'],
context['image_id'],
context['flavor'],
context['keypair_id'],
user_script,
context['security_group_ids'],
dev_mapping,
nics=None,
instance_count=1,
)
return True
except:
exceptions.handle(request)
return False
class SynthesisInstance(workflows.Workflow):
slug = "cloudlet syntehsize VM"
name = _("Cloudlet Synthesize VM")
finalize_button_name = _("Synthesize")
success_message = _('Cloudlet synthesized %(count)s named "%(name)s".')
failure_message = _('Cloudlet is unable to synthesize %(count)s named "%(name)s".')
success_url = "horizon:project:cloudlet:index"
default_steps = (SelectProjectUser,
SetSynthesizeAction,)
def format_status_message(self, message):
name = self.context.get('name', 'unknown instance')
count = self.context.get('count', 1)
if int(count) > 1:
return message % {"count": _("%s instances") % count,
"name": name}
else:
return message % {"count": _("instance"), "name": name}
def handle(self, request, context):
try:
ret_json = cloudlet_api.request_synthesis(
request,
context['name'],
context['image_id'],
context['flavor'],
context['keypair_id'],
context['security_group_ids'],
context['overlay_url'],
)
error_msg = ret_json.get("badRequest", None)
if error_msg is not None:
msg = error_msg.get("message", "Failed to request VM synthesis")
raise Exception(msg)
return True
except:
exceptions.handle(request)
return False
|
from collections import OrderedDict
from ceci.sites.cori import parse_int_set
from ceci.pipeline import override_config
from ceci.utils import embolden
from ceci.config import cast_value, cast_to_streamable
def test_parse_ints():
assert parse_int_set("1,2,3") == set([1, 2, 3])
assert parse_int_set("10-12") == set([10, 11, 12])
assert parse_int_set("10-12,15,19-21") == set([10, 11, 12, 15, 19, 20, 21])
def test_override_config():
config = {
"a": "b",
"c": {"d": "e"},
"h": True,
"i": 8,
"j": 17.5,
}
override_config(config, ["a=a", "c.d=e", "f.x.y.z=g", "h=False", "i=9", "j=19.5"])
assert config["a"] == "a"
assert config["c"] == {"d": "e"}
assert config["f"] == {"x": {"y": {"z": "g"}}}
assert config["h"] is False
assert config["i"] == 9
assert config["j"] == 19.5
def test_embolden():
x = 'hj6_9xx0'
y = embolden(x)
assert x in embolden(x)
assert y[4:-4] == x
def test_cast_value():
# dtype is None should allow any value
assert cast_value(None, 5) == 5
assert cast_value(None, "dog") == "dog"
# value is None is always allowed
assert cast_value(float, None) is None
assert cast_value(str, None) is None
# if isinstance(value, dtype) return value
assert cast_value(float, 5.) == 5.
assert cast_value(str, "dog") == "dog"
# if isinstance(value, Mapping) return dtype(**value)
odict = cast_value(dict, dict(key1='dog', key2=5))
assert odict['key1'] == 'dog'
assert odict['key2'] == 5
# if dtype(value) works return that
assert cast_value(float, 5) == 5.
# catch errors
try:
cast_value(float, "dog")
except TypeError:
pass
else:
raise TypeError("Failed to catch type error")
try:
cast_value(int, [])
except TypeError:
pass
else:
raise TypeError("Failed to catch type error")
def test_cast_streamable():
assert cast_to_streamable(dict(key='dog'))['key'] == 'dog'
assert cast_to_streamable(OrderedDict([('key', 'dog')]))['key'] == 'dog'
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Generic algorithms such as registration, statistics, simulation, etc.
"""
from __future__ import absolute_import
__docformat__ = 'restructuredtext'
from . import statistics
from . import fwhm, interpolation, kernel_smooth, diagnostics
from nipy.testing import Tester
test = Tester().test
bench = Tester().bench
|
"""
@Author: NguyenKhacThanh
"""
__all__ = ["init_app"]
def _after_request(response):
"""Add headers to after request
"""
# allowed_origins = [
# re.compile("http?://(.*\.)?i2g\.vn"),
# ]
# origin = flask.request.headers.get("Origin")
# if origin:
# for allowed_origin in allowed_origins:
# if allowed_origin.match(origin):
# response.headers["Access-Control-Allow-Origin"] = origin
# response.headers["Access-Control-Allow-Credentials"] = "true"
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Methods"] = \
"GET, POST, PUT, DELETE, OPTIONS"
response.headers["Access-Control-Allow-Headers"] = \
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
return response
def init_app(app):
"""Fixed error CORS
:param app Flask object
:rtype None
"""
app.after_request(_after_request)
|
# Generated by Django 3.1.2 on 2020-12-18 20:17
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apis', '0019_auto_20201218_2012'),
]
operations = [
migrations.AlterField(
model_name='commentapimodel',
name='created_at',
field=models.DateField(default=datetime.date.today),
),
migrations.AlterField(
model_name='postapimodel',
name='published_at',
field=models.DateField(default=datetime.date.today),
),
]
|
from app import cache
from base26 import Base26Converter
from database import urltable
class ShortURL:
def __init__(self):
self.__cacheUrlExpireTimeout: int = 300
self.__base26 = Base26Converter()
cache.setUrlTableObject(urltable=urltable)
def getShortURL(self, longURL: str):
longURL: str = longURL.strip()
shortURL = urltable.getShortURL(long_url=longURL)
if shortURL is None:
uid: int = cache.getNewCounter()
shortURL: str = self.__base26.encode(uid)
longURL: str = longURL.strip()
urltable.insertURL(uid=uid, long_url=longURL, short_url=shortURL)
cache.storeURL(
url=longURL, short_url=shortURL, expire=self.__cacheUrlExpireTimeout
)
return shortURL
def getLongURL(self, short_url: str):
short_url = short_url.strip()
longURL: str = cache.getLongUrl(short_url=short_url)
if longURL is None:
longURL = urltable.getLongURL(short_url=short_url)
if longURL is None:
return None
cache.storeURL(
url=longURL, short_url=short_url, expire=self.__cacheUrlExpireTimeout
)
return longURL
if __name__ == "__main__":
print("Hello World")
|
from django.apps import AppConfig
class ContractsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'contracts'
|
import argparse,os
parser = argparse.ArgumentParser(description='sequencing saturation')
parser.add_argument('--action', metavar='SELECT', default='mkref', choices=['mkref', 'mkgtf', 'stat'], help='Select the action for your program, include mkref,mkgtf,stat, default is mkref')
parser.add_argument('--ingtf', metavar='FILE' ,help='Set ingtf in mkref,mkgtf or stat')
parser.add_argument('--outgtf',metavar='FILE', help='Set outgtf in mkgtf')
parser.add_argument('--attribute',metavar='DICT',default = ['gene_type=protein_coding'], nargs='+',help='Set the filter parameter in mkgtf, Key-value pair in attributes field to be kept in the GTF, \
default is gene_type:protein_coding, you can set up multiple groups separated by blankspace')
parser.add_argument('--outstat',metavar='FILE', default = 'gtf_type.txt',help='Set the stats outfile in stat, default is "gtf_type.txt" in current dir')
parser.add_argument('--type',metavar='STR', default = 'gene_type',help='Set the the type for stat, default is gene_type')
parser.add_argument('--fasta',metavar='FASTA',help='Set the fasta in mkref')
parser.add_argument('--star_dir',metavar='DIR',default=os.getcwd(), help='Set the star indexdir in mkref, default is current dir')
parser.add_argument('--star',metavar='PROGRAM',help='Set the star program path in mkref')
parser.add_argument('--threads',metavar='INT', help='Set the threads in mkref')
args = parser.parse_args()
from typing import List, Iterable
from collections import Counter
from subprocess import check_call
def read(fp: str, feature: str) -> Iterable[List[str]]:
lines = []
with open(fp) as f:
for line in f:
newline = line.strip()
if newline.startswith('#'):
lines.append(line)
elif newline == '':
continue
else:
lst = newline.split('\t')
if lst[2] == feature:
yield lines
lines = []
lines.append(line)
else:
lines.append(line)
yield lines
def filtergtf(gtf,filtergtf,attribute):
d = dict(i.split(":")[::-1] for i in attribute)
gtfread = read(gtf,'gene')
result = open(filtergtf,'w')
for i in gtfread:
if i[0].startswith('#'):
result.writelines(i)
else:
lst = i[0].split('\t')[-1].replace('"', '').strip().strip(';')
lstlist = lst.split('; ')
aDict = {}
for j in lstlist:
aDict[j.split(' ')[0]] = j.split(' ')[-1]
for key1,value1 in aDict.items():
for key2,value2 in d.items():
if key1 == value2 and key2 == value1:
result.writelines(i)
result.close()
def statgtf(gtf,keyword,outfile):
with open(gtf,'r') as fp:
sumDict = []
for line in fp:
line = line.strip()
if line.startswith("#"):
continue
elif line == '':
continue
else:
lst = line.split('\t')
if lst[2] == 'gene':
lstlist = lst[-1].replace('"', '').strip().strip(';')
llstlist = lstlist.split('; ')
aDict = dict(l.split(' ') for l in llstlist)
sumDict.append(aDict[keyword])
result = Counter(sumDict)
outfile = open(outfile,'w')
outfile.write('Type'+'\t'+'Count'+'\n')
for k,v in sorted(result.items(), key = lambda x:x[1], reverse=True):
outfile.write(f'{k}\t{v}\n')
outfile.close()
def star_index(fasta,gtf,star_dir,star_program,threads):
if not os.path.exists(star_dir):
os.system('mkdir -p %s'%star_dir)
star_cmd = '%s --runMode genomeGenerate --runThreadN %s --genomeDir %s --genomeFastaFiles %s --sjdbGTFfile %s --sjdbOverhang 99 --limitGenomeGenerateRAM 125000000000'\
%(star_program,threads,star_dir,fasta,gtf)
print('STAR verison: 2.7.2b')
print('runMode: genomeGenerate')
print('runThreadN: %s'%threads)
print('genomeDir: %s'%star_dir)
print('fasta: %s'%fasta)
print('gtf: %s'%gtf)
check_call(star_cmd,shell=True)
if __name__=='__main__':
if args.action == 'stat':
statgtf(args.ingtf,args.type,args.outstat)
if args.action == 'mkgtf':
filtergtf(args.ingtf,args.outgtf,args.attribute)
if args.action == 'mkref':
star_index(args.fasta,args.ingtf,args.star_dir,args.star,args.threads)
|
print("\n")
texto = input(" Digite um texto: ")
print("\n")
print("Texto digitado: " + texto)
print("\n") |
# cssyacc/parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = b'\xd5t\xfa\x19\xdba\xab\x8dqF\x98h\xbfOA\x9c'
_lr_action_items = {'BRACES_L':([0,1,2,3,4,5,6,7,8,9,10,11,12,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,66,67,68,69,70,71,72,73,74,75,76,77,78,80,81,83,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,107,108,109,110,111,112,114,115,116,],[-63,-63,-1,-5,32,32,-63,-8,-7,-63,-63,-63,-63,-63,-63,-63,-33,-63,-63,-63,-63,-63,-63,-4,-63,-63,-63,-3,-2,-6,32,-63,32,-11,-10,-63,-63,-63,-63,-63,-63,-63,-63,-63,-53,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-22,-17,-32,-21,-29,-18,-19,-24,-30,-16,-20,-26,-25,-28,-23,-27,-31,32,-63,-15,-61,-52,-40,-35,-50,-39,-47,-36,-37,-42,-48,-34,-38,-44,-43,-46,-51,-41,-45,-49,-56,-54,-14,32,-13,-12,-62,-57,-55,-9,]),'PARENTHESES_R':([4,6,7,8,9,10,11,12,14,15,16,17,18,19,20,21,22,23,25,26,27,31,33,34,35,36,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,66,67,68,69,70,71,72,73,74,75,76,77,78,79,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,106,107,112,113,114,115,116,],[-63,-63,-8,-7,-63,-63,-63,-63,-63,-63,-63,-33,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-58,-63,83,-60,-63,-63,-63,-63,-63,-63,-63,-63,-63,-53,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-22,-17,-32,-21,-29,-18,-19,-24,-30,-16,-20,-26,-25,-28,-23,-27,-31,-63,107,112,-61,-63,-52,-40,-35,-50,-39,-47,-36,-37,-42,-48,-34,-38,-44,-43,-46,-51,-41,-45,-49,-56,115,-54,-62,-59,-57,-55,-9,]),'NUMBER':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,6,-1,-5,6,40,-8,-7,40,40,40,40,6,40,40,40,40,40,40,40,40,40,-4,40,40,40,-3,-2,-6,6,-63,6,-11,-10,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,40,6,6,6,-15,-61,40,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'WS':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,7,-1,-5,7,7,-8,-7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,-4,7,7,7,-3,-2,-6,7,-63,-11,-10,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,-15,-61,7,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'BRACKETS_R':([6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,25,26,27,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,83,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,107,112,114,115,],[-63,-8,-7,-63,-63,-63,-63,-63,-63,-63,-63,-33,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-53,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-22,-17,-32,-21,-29,104,-63,-18,-19,-24,-30,-16,-20,-26,-25,-28,-23,-27,-31,-61,-52,-40,-35,-50,-39,-47,-36,-37,-42,-48,-34,-38,-44,-43,-46,-51,-41,-45,-49,-56,114,-54,-62,-57,-55,]),'BRACES_R':([6,7,8,9,10,11,12,14,15,16,17,18,19,20,21,22,23,25,26,27,32,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,66,67,68,69,70,71,72,73,74,75,76,77,80,81,83,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,107,108,109,110,111,112,114,115,116,],[-63,-8,-7,-63,-63,-63,-63,-63,-63,-63,-33,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-11,-10,-63,-63,-63,-63,-63,-63,-63,-63,-63,-53,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-22,-17,-32,-21,-29,-18,-19,-24,-30,-16,-20,-26,-25,-28,-23,-27,-31,-63,-15,-61,-52,-40,-35,-50,-39,-47,-36,-37,-42,-48,-34,-38,-44,-43,-46,-51,-41,-45,-49,-56,-54,-14,116,-13,-12,-62,-57,-55,-9,]),'COMMENT':([0,1,2,3,6,7,8,9,10,11,12,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,32,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,8,-1,-5,8,-8,-7,8,8,8,8,8,8,8,8,8,8,8,8,8,-4,8,8,8,-3,-2,-6,-63,-11,-10,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,-15,-61,8,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'ATKEYWORD':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,9,-1,-5,9,41,-8,-7,41,41,41,41,9,41,41,41,41,41,41,41,41,41,-4,41,41,41,-3,-2,-6,9,-63,9,-11,-10,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,41,9,9,9,-15,-61,41,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'SEMICOLON':([0,1,2,3,4,5,6,7,8,9,10,11,12,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,106,107,108,109,110,111,112,113,114,115,116,],[-63,-63,-1,-5,-63,37,-63,-8,-7,-63,-63,-63,-63,-63,-63,-63,-33,-63,-63,-63,-63,-63,-63,-4,-63,-63,-63,-3,-2,-6,-63,-63,-58,-63,84,-60,-11,-10,-63,-63,-63,-63,-63,-63,-63,-63,-63,-53,-63,-63,-63,-63,-63,-63,-63,-63,-63,-63,-22,-17,-32,-21,-29,-18,-19,-24,-30,-16,-20,-26,-25,-28,-23,-27,-31,-63,84,-63,-15,84,-61,-63,-52,-40,-35,-50,-39,-47,-36,-37,-42,-48,-34,-38,-44,-43,-46,-51,-41,-45,-49,-56,84,-54,-14,37,-13,-12,-62,-59,-57,-55,-9,]),'DELIM':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,11,-1,-5,11,43,-8,-7,43,43,43,43,11,43,43,43,43,43,43,43,43,43,-4,43,43,43,-3,-2,-6,11,-63,11,-11,-10,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,43,11,11,11,-15,-61,43,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'DASHMATCH':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,12,-1,-5,12,44,-8,-7,44,44,44,44,12,44,44,44,44,44,44,44,44,44,-4,44,44,44,-3,-2,-6,12,-63,12,-11,-10,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,44,12,12,12,-15,-61,44,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'BRACKETS_L':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,13,-1,-5,13,13,-8,-7,13,13,13,13,13,13,13,13,13,13,13,13,13,13,-4,13,13,13,-3,-2,-6,13,-63,13,-11,-10,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,-15,-61,13,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'ATBRACES':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,14,-1,-5,14,45,-8,-7,45,45,45,45,14,45,45,45,45,45,45,45,45,45,-4,45,45,45,-3,-2,-6,14,-63,14,-11,-10,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,45,14,14,14,-15,-61,45,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'COLON':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,15,-1,-5,15,46,-8,-7,46,46,46,46,15,46,46,46,46,46,46,46,46,46,-4,46,46,46,-3,-2,-6,15,-63,15,-11,-10,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,15,15,15,-15,-61,46,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'PERCENTAGE':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,16,-1,-5,16,47,-8,-7,47,47,47,47,16,47,47,47,47,47,47,47,47,47,-4,47,47,47,-3,-2,-6,16,-63,16,-11,-10,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,47,16,16,16,-15,-61,47,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'$end':([0,1,2,3,7,8,24,28,29,30,37,38,116,],[-63,0,-1,-5,-8,-7,-4,-3,-2,-6,-11,-10,-9,]),'FUNCTION':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,4,-1,-5,4,4,-8,-7,4,4,4,4,4,4,4,4,4,4,4,4,4,4,-4,4,4,4,-3,-2,-6,4,-63,4,-11,-10,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,-15,-61,4,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'IDENT':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,19,-1,-5,19,50,-8,-7,50,50,50,50,19,50,50,50,50,50,50,50,50,50,-4,50,50,50,-3,-2,-6,19,-63,19,-11,-10,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,19,19,19,-15,-61,50,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'HASH':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,20,-1,-5,20,51,-8,-7,51,51,51,51,20,51,51,51,51,51,51,51,51,51,-4,51,51,51,-3,-2,-6,20,-63,20,-11,-10,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,20,20,20,-15,-61,51,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'STRING':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,21,-1,-5,21,52,-8,-7,52,52,52,52,21,52,52,52,52,52,52,52,52,52,-4,52,52,52,-3,-2,-6,21,-63,21,-11,-10,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,52,21,21,21,-15,-61,52,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'URI':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,22,-1,-5,22,53,-8,-7,53,53,53,53,22,53,53,53,53,53,53,53,53,53,-4,53,53,53,-3,-2,-6,22,-63,22,-11,-10,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,53,22,22,22,-15,-61,53,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'INCLUDES':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,23,-1,-5,23,54,-8,-7,54,54,54,54,23,54,54,54,54,54,54,54,54,54,-4,54,54,54,-3,-2,-6,23,-63,23,-11,-10,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,54,23,23,23,-15,-61,54,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'DIMENSION':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,25,-1,-5,25,56,-8,-7,56,56,56,56,25,56,56,56,56,56,56,56,56,56,-4,56,56,56,-3,-2,-6,25,-63,25,-11,-10,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,56,25,25,25,-15,-61,56,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'UNICODE_RANGE':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,26,-1,-5,26,57,-8,-7,57,57,57,57,26,57,57,57,57,57,57,57,57,57,-4,57,57,57,-3,-2,-6,26,-63,26,-11,-10,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,26,26,26,-15,-61,57,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),'CDC':([0,1,2,3,7,8,24,28,29,30,37,38,116,],[-63,28,-1,-5,-8,-7,-4,-3,-2,-6,-11,-10,-9,]),'CDO':([0,1,2,3,7,8,24,28,29,30,37,38,116,],[-63,29,-1,-5,-8,-7,-4,-3,-2,-6,-11,-10,-9,]),'PARENTHESES_L':([0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,37,38,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,81,83,84,104,107,108,110,111,112,114,115,116,],[-63,31,-1,-5,31,31,-8,-7,31,31,31,31,31,31,31,31,31,31,31,31,31,31,-4,31,31,31,-3,-2,-6,31,-63,31,-11,-10,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,-15,-61,31,-56,-54,-14,-13,-12,-62,-57,-55,-9,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'comment':([1,6,9,10,11,12,14,15,16,18,19,20,21,22,23,25,26,27,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,80,84,],[3,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,108,39,]),'function':([1,4,6,9,10,11,12,13,14,15,16,18,19,20,21,22,23,25,26,27,31,34,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,84,],[18,18,49,49,49,49,49,18,49,49,49,49,49,49,49,49,49,49,49,49,18,18,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,49,18,18,18,49,]),'parentheses':([1,4,6,9,10,11,12,13,14,15,16,18,19,20,21,22,23,25,26,27,31,34,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,84,],[27,27,58,58,58,58,58,27,58,58,58,58,58,58,58,58,58,58,58,58,27,27,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,27,27,27,58,]),'brackets':([1,4,6,9,10,11,12,13,14,15,16,18,19,20,21,22,23,25,26,27,31,34,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,84,],[10,10,42,42,42,42,42,10,42,42,42,42,42,42,42,42,42,42,42,42,10,10,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,10,10,10,42,]),'text':([1,4,13,31,34,65,78,80,],[5,33,64,33,33,105,33,109,]),'element':([1,80,],[30,111,]),'stylesheet':([0,],[1,]),'elements':([32,],[80,]),'ws':([1,4,6,9,10,11,12,13,14,15,16,18,19,20,21,22,23,25,26,27,31,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,80,84,],[24,34,55,55,55,55,55,65,55,55,55,55,55,55,55,55,55,55,55,55,78,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,110,55,]),'block':([4,5,31,34,78,109,],[36,38,36,36,36,38,]),'ptext':([4,31,34,78,],[35,79,82,106,]),'textsuffix':([6,9,10,11,12,14,15,16,18,19,20,21,22,23,25,26,27,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,84,],[59,60,61,62,63,66,67,68,69,70,71,72,73,74,75,76,77,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,113,]),'empty':([0,1,4,6,9,10,11,12,13,14,15,16,18,19,20,21,22,23,25,26,27,31,32,34,39,40,41,42,43,44,45,46,47,49,50,51,52,53,54,55,56,57,58,65,78,80,84,],[2,17,17,48,48,48,48,48,17,48,48,48,48,48,48,48,48,48,48,48,48,17,81,17,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,48,17,17,17,48,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> stylesheet","S'",1,None,None,None),
('stylesheet -> empty','stylesheet',1,'p_stylesheet','/home/domen/CSSQC/cssyacc/__init__.py',27),
('stylesheet -> stylesheet CDO','stylesheet',2,'p_stylesheet','/home/domen/CSSQC/cssyacc/__init__.py',28),
('stylesheet -> stylesheet CDC','stylesheet',2,'p_stylesheet','/home/domen/CSSQC/cssyacc/__init__.py',29),
('stylesheet -> stylesheet ws','stylesheet',2,'p_stylesheet','/home/domen/CSSQC/cssyacc/__init__.py',30),
('stylesheet -> stylesheet comment','stylesheet',2,'p_stylesheet','/home/domen/CSSQC/cssyacc/__init__.py',31),
('stylesheet -> stylesheet element','stylesheet',2,'p_stylesheet','/home/domen/CSSQC/cssyacc/__init__.py',32),
('comment -> COMMENT','comment',1,'p_comment','/home/domen/CSSQC/cssyacc/__init__.py',39),
('ws -> WS','ws',1,'p_ws','/home/domen/CSSQC/cssyacc/__init__.py',43),
('block -> BRACES_L elements text BRACES_R','block',4,'p_block','/home/domen/CSSQC/cssyacc/__init__.py',47),
('element -> text block','element',2,'p_element','/home/domen/CSSQC/cssyacc/__init__.py',51),
('element -> text SEMICOLON','element',2,'p_element','/home/domen/CSSQC/cssyacc/__init__.py',52),
('elements -> elements element','elements',2,'p_elements','/home/domen/CSSQC/cssyacc/__init__.py',59),
('elements -> elements ws','elements',2,'p_elements','/home/domen/CSSQC/cssyacc/__init__.py',60),
('elements -> elements comment','elements',2,'p_elements','/home/domen/CSSQC/cssyacc/__init__.py',61),
('elements -> empty','elements',1,'p_elements','/home/domen/CSSQC/cssyacc/__init__.py',62),
('text -> IDENT textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',69),
('text -> ATKEYWORD textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',70),
('text -> ATBRACES textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',71),
('text -> COLON textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',72),
('text -> HASH textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',73),
('text -> DELIM textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',74),
('text -> NUMBER textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',75),
('text -> DIMENSION textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',76),
('text -> PERCENTAGE textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',77),
('text -> URI textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',78),
('text -> STRING textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',79),
('text -> UNICODE_RANGE textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',80),
('text -> INCLUDES textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',81),
('text -> DASHMATCH textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',82),
('text -> function textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',83),
('text -> parentheses textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',84),
('text -> brackets textsuffix','text',2,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',85),
('text -> empty','text',1,'p_text','/home/domen/CSSQC/cssyacc/__init__.py',86),
('textsuffix -> IDENT textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',96),
('textsuffix -> ATKEYWORD textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',97),
('textsuffix -> ATBRACES textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',98),
('textsuffix -> COLON textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',99),
('textsuffix -> HASH textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',100),
('textsuffix -> DELIM textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',101),
('textsuffix -> NUMBER textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',102),
('textsuffix -> DIMENSION textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',103),
('textsuffix -> PERCENTAGE textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',104),
('textsuffix -> URI textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',105),
('textsuffix -> STRING textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',106),
('textsuffix -> UNICODE_RANGE textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',107),
('textsuffix -> INCLUDES textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',108),
('textsuffix -> DASHMATCH textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',109),
('textsuffix -> function textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',110),
('textsuffix -> parentheses textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',111),
('textsuffix -> brackets textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',112),
('textsuffix -> ws textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',113),
('textsuffix -> comment textsuffix','textsuffix',2,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',114),
('textsuffix -> empty','textsuffix',1,'p_textsuffix','/home/domen/CSSQC/cssyacc/__init__.py',115),
('parentheses -> PARENTHESES_L ptext PARENTHESES_R','parentheses',3,'p_parentheses','/home/domen/CSSQC/cssyacc/__init__.py',125),
('parentheses -> PARENTHESES_L ws ptext PARENTHESES_R','parentheses',4,'p_parentheses','/home/domen/CSSQC/cssyacc/__init__.py',126),
('brackets -> BRACKETS_L text BRACKETS_R','brackets',3,'p_brackets','/home/domen/CSSQC/cssyacc/__init__.py',133),
('brackets -> BRACKETS_L ws text BRACKETS_R','brackets',4,'p_brackets','/home/domen/CSSQC/cssyacc/__init__.py',134),
('ptext -> text','ptext',1,'p_ptext','/home/domen/CSSQC/cssyacc/__init__.py',141),
('ptext -> ptext SEMICOLON textsuffix','ptext',3,'p_ptext','/home/domen/CSSQC/cssyacc/__init__.py',142),
('ptext -> block','ptext',1,'p_ptext','/home/domen/CSSQC/cssyacc/__init__.py',143),
('function -> FUNCTION ptext PARENTHESES_R','function',3,'p_function','/home/domen/CSSQC/cssyacc/__init__.py',154),
('function -> FUNCTION ws ptext PARENTHESES_R','function',4,'p_function','/home/domen/CSSQC/cssyacc/__init__.py',155),
('empty -> <empty>','empty',0,'p_empty','/home/domen/CSSQC/cssyacc/__init__.py',162),
]
|
# -*- coding: utf-8 -*-
# ------------- Cantidad de segundos que has vivido -------------
# Definición de variables
anios = 30
dias_por_anio = 365
horas_por_dia = 24
segundos_por_hora = 60
# Operación
print (anios * dias_por_anio * horas_por_dia * segundos_por_hora)
|
from .draw import plot |
# System modules.
from datetime import datetime
# 3rd party modules.
from flask import make_response, abort
import requests
# Personal modules.
from config import db
from models import Master, MasterSchema, Detail, DetailSchema
# Get current time as a timestamp string.
def _get_timestamp():
return datetime.now().strftime(("%Y-%m-%d %H:%M:%S"))
# From postal code to city name (Spain only).
def _get_cityname(postalcode):
try:
r = requests.get(
"http://api.geonames.org/postalCodeLookupJSON?postalcode="
+ postalcode
+ "&country=ES&username=jperezvisaires"
)
cityname = r.json()["postalcodes"][-1]["placeName"]
except:
cityname = None
return cityname
# CREATE operations.
def create(user):
"""
This function creates a new user in the database
based on the passed-in user data.
:param user: User to create in database
:return: 201 on success, 400 on bad postal code, 406 on user exists
"""
username = user.get("username", None)
postalcode = user.get("postalcode", None)
cityname = _get_cityname(postalcode)
# Does the user already exist?
existing_master = Master.query.filter(Master.username == username).one_or_none()
if existing_master is None and cityname is not None:
# Create a user instance using the schema and the passed-in user.
user_master = Master(username=username)
db.session.add(user_master)
user_detail = Detail(postalcode=postalcode, cityname=cityname)
db.session.add(user_detail)
# Save changes to database.
db.session.commit()
return make_response(
"{username} successfully created".format(username=username), 201,
)
# If the Postal Code doesn't return any hits in Geonames
elif cityname is None:
abort(
400, "Postal code {postalcode} is invalid".format(postalcode=postalcode),
)
# Otherwise, they exist, and that's an error
else:
abort(
406,
"User with username {username} already exists".format(username=username),
)
# READ operations.
def read_all():
"""
This function responds to a request for /api/users
with the complete lists of users.
:return: JSON string of list of users
"""
# Create the list of people from our data
master = Master.query.all()
# Serialize the data for the response
master_schema = MasterSchema(many=True)
return master_schema.dump(master)
def read_one(username):
"""
This function responds to a request for /api/users/{username}
with one matching user from the database.
:param username: Username of user to find
:return: User matching username
"""
# Get the user requested
master = Master.query.filter(Master.username == username).one_or_none()
# Did we find a user?
if master is not None:
# Serialize the data for the response
master_schema = MasterSchema()
return master_schema.dump(master)
# Otherwise, not found.
else:
abort(404, "User with username {username} not found".format(username=username))
def read_one_details(username):
"""
This function responds to a request for /api/users/{username}
with one matching user from the database.
:param username: Username of user to find
:return: User matching username
"""
# Get the user requested
master = Master.query.filter(Master.username == username).one_or_none()
detail = Detail.query.filter(Detail.user_id == master.user_id).one_or_none()
# Did we find a user?
if detail is not None:
# Serialize the data for the response
detail_schema = DetailSchema()
return detail_schema.dump(detail)
# Otherwise, not found.
else:
abort(404, "User with username {username} not found".format(username=username))
# UPDATE operations.
def update(username, user):
"""
This function updates an existing user in the database.
:param username: Username of user to update in the database
:param user: User to update
:return: Updated user structure
"""
cityname = _get_cityname(user.get("postalcode"))
# Get the user requested from the db into session
master = Master.query.filter(Master.username == username).one_or_none()
detail = Master.query.filter(Master.username == username).one_or_none()
# Does the user already exist?
if username is not None and cityname is not None:
# turn the passed in person into a db object
master_user = {"username": user["username"]}
master_schema = MasterSchema()
master_update = master_schema.load(master_user, session=db.session)
detail_user = {"postalcode": user["postalcode"], "cityname": cityname}
detail_schema = DetailSchema()
detail_update = detail_schema.load(detail_user, session=db.session)
# Set the id to the person we want to update
master_update.user_id = master.user_id
detail_update.user_id = detail.user_id
# merge the new object into the old and commit it to the db
db.session.merge(master_update)
db.session.merge(detail_update)
db.session.commit()
# return updated person in the response
data = master_schema.dump(master_update)
return data, 200
# If the Postal Code doesn't return any hits in Geonames
elif cityname is None:
abort(
400,
"Postal code {postalcode} is invalid".format(
postalcode=user.get("postalcode")
),
)
# Otherwise, that's an error
else:
abort(
404, "User with username {username} not found".format(username=username),
)
# DELETE operations.
def delete(username):
"""
This function deletes a user from the users structure
:param username: Username of user to delete
:return: 200 on successful delete, 404 if not found
"""
# Get the person requested
master = Master.query.filter(Master.username == username).one_or_none()
detail = Detail.query.filter(Detail.user_id == master.user_id).one_or_none()
# Does the person to delete exist?
if master is not None:
db.session.delete(master)
db.session.delete(detail)
db.session.commit()
return make_response(
"{username} successfully deleted".format(username=username), 200
)
# Otherwise, user to delete not found
else:
abort(404, "User with username {username} not found".format(username=username))
|
from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
DEFAULT_UPLOAD_TO = 'product_images'
class ImageProduct(models.ForeignKey):
def __init__(
self,
to='products.Product',
verbose_name=_("Product"),
related_name='images',
on_delete=models.CASCADE,
*args, **kwargs):
super(ImageProduct, self).__init__(
to,
verbose_name=verbose_name,
related_name=related_name,
on_delete=on_delete,
*args, **kwargs)
class ImageFile(models.ImageField):
def __init__(
self,
verbose_name=_("File"),
upload_to=DEFAULT_UPLOAD_TO,
max_length=255,
*args, **kwargs):
super(ImageFile, self).__init__(
verbose_name=verbose_name,
upload_to=upload_to,
max_length=max_length,
*args, **kwargs)
class ProductImageNotFoundError(IOError):
pass
def get_product_image_class(upload_to=DEFAULT_UPLOAD_TO):
class ProductImage(models.Model):
product = ImageProduct()
file = ImageFile(upload_to=upload_to)
def get_preview(self, size):
from sorl.thumbnail import get_thumbnail
if not self.file:
return None
try:
return get_thumbnail(self.file.file, size)
except IOError:
raise ProductImageNotFoundError()
def get_preview_tag(self, width=100, empty_label='-----'):
if not self.file:
return empty_label
try:
url = self.get_preview(str(width)).url
except ProductImageNotFoundError:
return _('Image not found')
return mark_safe(
'<img src="{}" width: {}px; title="{}" />'.format(
url, width, self.file.url))
@property
def preview_tag(self):
return self.get_preview_tag()
preview_tag.fget.short_description = _('Preview')
def __str__(self):
return str(self.product)
class Meta:
abstract = True
ordering = ['id']
verbose_name = _('Product image')
verbose_name_plural = _('Product images')
return ProductImage
def get_ordered_product_image_class(upload_to=DEFAULT_UPLOAD_TO):
from ordered_model.models import OrderedModelBase
ProductImage = get_product_image_class(upload_to)
class OrderedProductImage(OrderedModelBase, ProductImage):
order = models.PositiveIntegerField(_('Ordering'), default=0)
order_field_name = 'order'
order_with_respect_to = 'product'
class Meta(ProductImage.Meta):
abstract = True
ordering = ['order', 'id']
return OrderedProductImage
|
#
# This file is part of GreatFET
#
from .base import GlitchKitModule
from ..protocol import vendor_requests
from ..peripherals.gpio import GPIOPin
class GlitchKitSimple(GlitchKitModule):
"""
Simple trigger module for GlitchKit. Provides simple trigger conditions
(e.g. "the SPI clock ticks 25 times while CE is high and WP is low") for quick construction
of simple glitch conditions. This is suprisingly useful for how simple it is. :)
"""
SHORT_NAME = 'simple'
# Event flags.
COUNT_REACHED = 0x001
# Store the max number of each type of condition we can have.
MAX_EDGE_CONDITIONS = 8
MAX_LEVEL_CONDITIONS = 8
# The types of conditions we support.
# These must match the enumeration in firmware/coimmon/gpio_int.h.
CONDITION_TYPES = {
'DISABLED' : -1, # for convenience, we'll ignore these : )
'LEVEL_HIGH' : 0,
'LEVEL_LOW' : 1,
'EDGE_RISING' : 2,
'EDGE_FALLING' : 3,
'EDGE_BOTH' : 4
}
def __init__(self, board):
"""
Create a new GlitchKit module allowing triggering on simple events.
Args:
board -- A representation of the GreatFET that will perform the actual
triggering.
"""
# Store a reference to the parent board.
self.board = board
def prime_trigger_on_event_count(self, count, conditions):
# TODO: get rid of this alias
self.watch_for_event(condition, count)
def watch_for_event(self, count, conditions):
"""
Sets up the GreatFET to issue an event when a given number of events have passed.
Args:
count -- The count to reach.
conditions -- A list of 2-tuples, which should each contain a value
a key from self.CONDITION_TYPES, and a GPIOPin or GPIOPin name.
"""
# Build a packet that describes each of the conditions to be described.
# This is the bulk of the communication.
packet = self._build_condition_packet(conditions)
# Split the target count into two, so we can wedge it into the setup
# fields.
count_high = count >> 16
count_low = count & 0xFFFF
# Finally, issue the raw request that should generate the relevant count.
self.board.comms._vendor_request_out(vendor_requests.GLITCHKIT_SIMPLE_START_EVENT_COUNT,
value=count_high, index=count_low, data=packet, timeout=3000)
def _build_condition_packet(self, conditions, ensure_input=True):
"""
Builds a packet that can communicate a list of conditions to the GreatFET.
Args:
conditions -- A list of 2-tuples, which should each contain
a _key_ from self.CONDITION_TYPES, and a GPIOPin or GPIOPin name.
ensure_input -- If set, this will ensure that all relevant GPIOPins are
set to input mode before using them for glitching.
Returns:
A list of bytes that will represent the given command.
"""
packet = []
# Keep track of the number of each type of conditions obeserved
num_edge = 0
num_level = 0
# Convert each condition to a set of bytes, and add it to the packet.
for condition in conditions:
# Skip any DISABLED conditions.
if condition[0] == 'DISABLED':
continue
elif condition[0] in ['LEVEL_HIGH', 'LEVEL_LOW']:
num_level += 1
elif condition[0] in ['EDGE_RISING', 'EDGE_FALLING', 'EDGE_BOTH']:
num_edge += 1
else:
raise ValueError("Invalid condition type {}!".format(condition[0]))
# Check to ensure we're not going to genete more packets that the device can handle.
if (num_level > self.MAX_LEVEL_CONDITIONS) or (num_edge > self.MAX_EDGE_CONDITIONS):
raise ValueError('Too many conditions!')
command = self._build_condition_triple(condition, ensure_input)
packet.extend(command)
return packet
def _build_condition_triple(self, condition, ensure_input=True):
"""
Converts a condition into a format that the GreatFET simple glitching module
will accept.
Args:
condition-- A single 2-tuple, which should contain a _key_ from
self.CONDITION_TYPES, and a GPIOPin or GPIOPin name.
ensure_input -- If set, this will ensure that the relevant GPIOPins is
set to input mode before using them for glitching.
Returns:
A list of bytes that will represent the given command.
"""
raw_mode, pin_or_name = condition
# Resolve the arguments to the types we'll need to communicate to the GreatFET.
mode = self.CONDITION_TYPES[raw_mode]
gpio_pin = pin_or_name if isinstance(pin_or_name, GPIOPin) else self.board.gpio.get_pin(pin_or_name)
port_number = gpio_pin.get_port()
pin_number = gpio_pin.get_pin()
# If we're ensuring the relevant pin is an input, configure it accordingly.
if ensure_input:
gpio_pin.set_direction(self.board.gpio.DIRECTION_IN)
# Finally, return the configuration list.
return [mode, port_number, pin_number]
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import labgraph as lg
from typing import Dict, Tuple
# Make the imports work when running from LabGraph root directory
import sys
sys.path.append("./")
# LabGraph WebSockets Components
from labgraph.websockets.ws_server.ws_api_node_server import (
WSAPIServerConfig,
WSAPIServerNode,
)
# LabGraph Monitor Components
from extensions.yaml_support.labgraph_monitor.server.enums.enums import ENUMS
from extensions.yaml_support.labgraph_monitor.aliases.aliases import SerializedGraph
from extensions.yaml_support.labgraph_monitor.server.serializer_node import SerializerConfig, Serializer
from extensions.yaml_support.labgraph_monitor.generate_lg_monitor.generate_lg_monitor import set_graph_topology
# Graph Components
from extensions.graphviz_support.graphviz_support.tests.demo_graph.noise_generator import NoiseGeneratorConfig, NoiseGenerator
from extensions.graphviz_support.graphviz_support.tests.demo_graph.amplifier import AmplifierConfig, Amplifier
from extensions.graphviz_support.graphviz_support.tests.demo_graph.attenuator import AttenuatorConfig, Attenuator
from extensions.graphviz_support.graphviz_support.tests.demo_graph.rolling_averager import RollingConfig, RollingAverager
# LabGraph WebSockets Configurations
APP_ID = 'LABGRAPH.MONITOR'
WS_SERVER = ENUMS.WS_SERVER
STREAM = ENUMS.STREAM
DEFAULT_IP = WS_SERVER.DEFAULT_IP
DEFAULT_PORT = WS_SERVER.DEFAULT_PORT
DEFAULT_API_VERSION = WS_SERVER.DEFAULT_API_VERSION
SAMPLE_RATE = 5
# Graph Configurations
NUM_FEATURES = 100
WINDOW = 2.0
REFRESH_RATE = 2.0
OUT_IN_RATIO = 1.2
ATTENUATION = 5.0
class Demo(lg.Graph):
# LabGraph WebSockets Component
WS_SERVER_NODE: WSAPIServerNode
# LabGraph Monitor Component
SERIALIZER: Serializer
# Graph Components
NOISE_GENERATOR: NoiseGenerator
ROLLING_AVERAGER: RollingAverager
AMPLIFIER: Amplifier
ATTENUATOR: Attenuator
# Used when running `generate_labgraph_monitor(graph)`
def set_topology(self, topology: SerializedGraph, sub_pub_map: Dict) -> None:
self._topology = topology
self._sub_pub_match = sub_pub_map
def setup(self) -> None:
self.WS_SERVER_NODE.configure(
WSAPIServerConfig(
app_id=APP_ID,
ip=WS_SERVER.DEFAULT_IP,
port=ENUMS.WS_SERVER.DEFAULT_PORT,
api_version=ENUMS.WS_SERVER.DEFAULT_API_VERSION,
num_messages=-1,
enums=ENUMS(),
sample_rate=SAMPLE_RATE,
)
)
self.SERIALIZER.configure(
SerializerConfig(
data=self._topology,
sub_pub_match=self._sub_pub_match,
sample_rate=SAMPLE_RATE,
stream_name=STREAM.LABGRAPH_MONITOR,
stream_id=STREAM.LABGRAPH_MONITOR_ID,
)
)
self.NOISE_GENERATOR.configure(
NoiseGeneratorConfig(
sample_rate=float(SAMPLE_RATE),
num_features=NUM_FEATURES,
)
)
self.ROLLING_AVERAGER.configure(
RollingConfig(
window=WINDOW,
)
)
self.AMPLIFIER.configure(
AmplifierConfig(
out_in_ratio=OUT_IN_RATIO,
)
)
self.ATTENUATOR.configure(
AttenuatorConfig(
attenuation=ATTENUATION,
)
)
def connections(self) -> lg.Connections:
return (
(self.NOISE_GENERATOR.NOISE_GENERATOR_OUTPUT, self.ROLLING_AVERAGER.ROLLING_AVERAGER_INPUT),
(self.NOISE_GENERATOR.NOISE_GENERATOR_OUTPUT, self.AMPLIFIER.AMPLIFIER_INPUT),
(self.NOISE_GENERATOR.NOISE_GENERATOR_OUTPUT, self.ATTENUATOR.ATTENUATOR_INPUT),
(self.NOISE_GENERATOR.NOISE_GENERATOR_OUTPUT, self.SERIALIZER.SERIALIZER_INPUT_1),
(self.ROLLING_AVERAGER.ROLLING_AVERAGER_OUTPUT, self.SERIALIZER.SERIALIZER_INPUT_2),
(self.AMPLIFIER.AMPLIFIER_OUTPUT, self.SERIALIZER.SERIALIZER_INPUT_3),
(self.ATTENUATOR.ATTENUATOR_OUTPUT, self.SERIALIZER.SERIALIZER_INPUT_4),
(self.SERIALIZER.SERIALIZER_OUTPUT, self.WS_SERVER_NODE.topic),
)
def process_modules(self) -> Tuple[lg.Module, ...]:
return (
self.NOISE_GENERATOR,
self.ROLLING_AVERAGER,
self.AMPLIFIER,
self.ATTENUATOR,
self.SERIALIZER,
self.WS_SERVER_NODE,
)
if __name__ == "__main__":
graph = Demo()
set_graph_topology(graph=graph)
runner = lg.ParallelRunner(graph=graph)
runner.run() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from gnpy.core.utils import (load_json,
itufs,
freq2wavelength,
lin2db,
db2lin)
from gnpy.core import network
topology = load_json('edfa_example_network.json')
nw = network.network_from_json(topology)
pch2d_legend_data = np.loadtxt('Pchan2DLegend.txt')
pch2d = np.loadtxt('Pchan2D.txt')
ch_spacing = 0.05
fc = itufs(ch_spacing)
lc = freq2wavelength(fc) / 1000
nchan = np.arange(len(lc))
df = np.ones(len(lc)) * ch_spacing
edfa1 = [n for n in nw.nodes() if n.uid == 'Edfa1'][0]
edfa1.gain_target = 20.0
edfa1.tilt_target = -0.7
edfa1.calc_nf()
results = []
for Pin in pch2d:
chgain = edfa1.gain_profile(Pin)
pase = edfa1.noise_profile(chgain, fc, df)
pout = lin2db(db2lin(Pin + chgain) + db2lin(pase))
results.append(pout)
# Generate legend text
pch2d_legend = []
for ea in pch2d_legend_data:
s = ''.join([chr(xx) for xx in ea.astype(dtype=int)]).strip()
pch2d_legend.append(s)
# Plot
axis_font = {'fontname': 'Arial', 'size': '16', 'fontweight': 'bold'}
title_font = {'fontname': 'Arial', 'size': '17', 'fontweight': 'bold'}
tic_font = {'fontname': 'Arial', 'size': '12'}
plt.rcParams["font.family"] = "Arial"
plt.figure()
plt.plot(nchan, pch2d.T, '.-', lw=2)
plt.xlabel('Channel Number', **axis_font)
plt.ylabel('Channel Power [dBm]', **axis_font)
plt.title('Input Power Profiles for Different Channel Loading', **title_font)
plt.legend(pch2d_legend, loc=5)
plt.grid()
plt.ylim((-100, -10))
plt.xlim((0, 110))
plt.xticks(np.arange(0, 100, 10), **tic_font)
plt.yticks(np.arange(-110, -10, 10), **tic_font)
plt.figure()
for result in results:
plt.plot(nchan, result, '.-', lw=2)
plt.title('Output Power w/ ASE for Different Channel Loading', **title_font)
plt.xlabel('Channel Number', **axis_font)
plt.ylabel('Channel Power [dBm]', **axis_font)
plt.grid()
plt.ylim((-50, 10))
plt.xlim((0, 100))
plt.xticks(np.arange(0, 100, 10), **tic_font)
plt.yticks(np.arange(-50, 10, 10), **tic_font)
plt.legend(pch2d_legend, loc=5)
plt.show()
|
from __future__ import print_function
import h5py
import sys
import numpy as np
class NanoporeModel(object):
def __init__(self, fast5File):
self.fastFive = h5py.File(fast5File, "r")
self.stay_prob = 0
self.skip_prob_bins = []
self.model_name = ''
self.model = None
def export_model(self, destination_path):
"""Exports the model to a file. Format:
line 1: [correlation coefficient] [level_mean] [level_sd] [noise_mean]
[noise_sd] [noise_lambda ] (.../kmer) \n
line 2: skip bins \n
line 3: [correlation coefficient] [level_mean] [level_sd, scaled]
[noise_mean] [noise_sd] [noise_lambda ] (.../kmer) \n
"""
def calculate_lambda(noise_mean, noise_stdev):
return (np.power(noise_mean, 3)) / (np.power(noise_stdev, 2))
if self.model is None:
print("This method is meant to be used as part of the child class TemplateModel or ComplementModel",
file=sys.stderr)
# output the model for cPecan to a file
model_path = destination_path + self.model_name
out_file = open(model_path, 'w')
# line 1
print("0", end=' ', file=out_file) # placeholder for correlation parameter
for kmer, level_mean, level_stdev, sd_mean, sd_stdev, weight in self.model:
lam = calculate_lambda(sd_mean, sd_stdev)
print(level_mean, level_stdev, sd_mean, sd_stdev, lam, end=' ', file=out_file)
print("", end="\n", file=out_file)
# line 2
for _ in self.skip_prob_bins:
print(_, end=' ', file=out_file)
print("", end="\n", file=out_file)
# line 3
print("0", end=' ', file=out_file) # placeholder for correlation parameter
for kmer, level_mean, level_stdev, sd_mean, sd_stdev, weight in self.model:
lam = calculate_lambda(sd_mean, sd_stdev)
print(level_mean, (level_stdev*1.75), sd_mean, sd_stdev, lam, end=' ', file=out_file)
print("", end="\n", file=out_file)
return
def get_model_dict(self):
# check
if self.model is None:
print("This method is meant to be used as part of the child class TemplateModel or ComplementModel",
file=sys.stderr)
# go through the model and build a lookup table
model_dict = {}
for kmer, level_mean, level_stdev, sd_mean, sd_stdev, weight in self.model:
model_dict[kmer] = [level_mean, level_stdev, sd_mean, sd_stdev]
return model_dict
def close(self):
self.fastFive.close()
class TemplateModel(NanoporeModel):
def __init__(self, fast5File):
super(TemplateModel, self).__init__(fast5File=fast5File)
self.model = self.fastFive['/Analyses/Basecall_2D_000/BaseCalled_template/Model']
self.stay_prob = np.log2(
self.fastFive["/Analyses/Basecall_2D_000/BaseCalled_template/Model"].attrs["stay_prob"])
self.skip_prob_bins = [0.487, 0.412, 0.311, 0.229, 0.174, 0.134, 0.115, 0.103, 0.096, 0.092,
0.088, 0.087, 0.084, 0.085, 0.083, 0.082, 0.085, 0.083, 0.084, 0.082,
0.080, 0.085, 0.088, 0.086, 0.087, 0.089, 0.085, 0.090, 0.087, 0.096]
self.parse_model_name()
def parse_model_name(self):
model_name = self.fastFive["/Analyses/Basecall_2D_000/Summary/basecall_1d_template"].attrs["model_file"]
model_name = model_name.split('/')[-1]
self.model_name = model_name
return
class ComplementModel(NanoporeModel):
def __init__(self, fast5File):
super(ComplementModel, self).__init__(fast5File=fast5File)
self.model = self.fastFive['/Analyses/Basecall_2D_000/BaseCalled_complement/Model']
self.stay_prob = np.log2(
self.fastFive["/Analyses/Basecall_2D_000/BaseCalled_complement/Model"].attrs["stay_prob"])
self.skip_prob_bins = [0.531, 0.478, 0.405, 0.327, 0.257, 0.207, 0.172, 0.154, 0.138, 0.132,
0.127, 0.123, 0.117, 0.115, 0.113, 0.113, 0.115, 0.109, 0.109, 0.107,
0.104, 0.105, 0.108, 0.106, 0.111, 0.114, 0.118, 0.119, 0.110, 0.119]
self.parse_model_name()
def parse_model_name(self):
model_name = self.fastFive["/Analyses/Basecall_2D_000/Summary/basecall_1d_complement"].attrs["model_file"]
model_name = model_name.split('/')[-1]
self.model_name = model_name
return
|
# -*- coding: iso-8859-1 -*-
#
# http://en.wikipedia.org/wiki/Fourier_series
#
"""
http://rosettacode.org/wiki/Fast_Fourier_transform
http://en.wikipedia.org/wiki/Cooley–Tukey_FFT_algorithm
http://en.wikipedia.org/wiki/Discrete_Fourier_transform
http://es.wikipedia.org/wiki/Transformada_de_Fourier_discreta
"""
from __future__ import print_function, division
import sys
if( sys.version_info[0] == 2 ):
range = xrange
import math
import qm3.maths.integration
import qm3.maths.interpolation
class series( object ):
def __init__( self, size, x, y, interpolant = qm3.maths.interpolation.hermite_spline ):
self.size = size
self.base = 0.0
self.bcof = []
self.acof = []
self.x = x[:]
self.y = y[:]
self.peri = 0.5 * ( self.x[-1] - self.x[0] )
self.inte = interpolant( x, y )
def integrate( self ):
self.base = 0.5 * qm3.maths.integration.Simpson_f( lambda r: self.inte.calc( r )[0], self.x[0], self.x[-1], n = 1000 )[0]
for i in range( self.size ):
f = float( i + 1 ) * math.pi / self.peri
self.acof.append( qm3.maths.integration.Simpson_f( lambda r: self.inte.calc( r )[0] * math.cos( r * f ), self.x[0], self.x[-1], n = 1000 )[0] )
self.bcof.append( qm3.maths.integration.Simpson_f( lambda r: self.inte.calc( r )[0] * math.sin( r * f ), self.x[0], self.x[-1], n = 1000 )[0] )
def calc( self, r, items = -1 ):
o = self.base
n = self.size
if( items > 0 and items <= self.size ):
n = items
for i in range( n ):
f = float( i + 1 ) * math.pi / self.peri
o += self.acof[i] * math.cos( f * r ) + self.bcof[i] * math.sin( f * r )
return( o / self.peri )
|
# coding=utf-8
from terminaltables import AsciiTable
import numpy as np
from random import randint
from mygrid.rnp import Arvore, Aresta
from mygrid.util import Fasor, Base
class Setor(Arvore):
def __init__(self, nome, vizinhos, nos_de_carga, prioridade=0):
assert isinstance(nome, str), 'O parâmetro nome da classe' \
'Setor deve ser do tipo string'
assert isinstance(vizinhos, list), 'O parâmetro vizinhos da classe' \
' Setor deve ser do tipo list'
assert isinstance(nos_de_carga, list), 'O parâmetro nos_de_carga da classe' \
'Setor deve ser do tipo list'
assert (prioridade >= 0 and prioridade <= 10), 'O valo de prioridade' \
'deve estar entre 0-10'
# assert isinstance(prioridade, int), 'O parâmetro Prioridade da classe' \
# 'Setor deve ser do tipo int'
self.nome = nome
self.prioridade = prioridade
self.vizinhos = vizinhos
self.rnp_associadas = {i: None for i in self.vizinhos}
self.nos_de_carga = dict()
for no in nos_de_carga:
no.setor = self.nome
self.nos_de_carga[no.nome] = no
self.no_de_ligacao = None
arvore_de_setor = self._gera_arvore_do_setor()
super(Setor, self).__init__(arvore_de_setor, str)
def _gera_arvore_do_setor(self):
arvore_do_setor = dict()
# for percorre os nós de carga do setor
for i, j in self.nos_de_carga.items():
# print '%-12s vizinhos %s' % (str(j), j.vizinhos)
vizinhos = list()
# for percorre os vizinhos do nó de carga
for k in j.vizinhos:
# condição só considera vizinho o nó de carga que está
# no mesmo setor que o nó de carga analisado
if k in self.nos_de_carga.keys():
vizinhos.append(k)
arvore_do_setor[i] = vizinhos
return arvore_do_setor
def calcular_potencia(self):
potencia = Fasor(real=0.0, imag=0.0, tipo=Fasor.Potencia)
for no in self.nos_de_carga.values():
potencia = potencia + no.potencia
return potencia
def __str__(self):
return 'Setor: ' + self.nome
class NoDeCarga(object):
def __init__(self,
nome,
vizinhos,
potencia=Fasor(real=0.0, imag=0.0, tipo=Fasor.Potencia),
tensao=Fasor(real=0.0, imag=0.0, tipo=Fasor.Tensao),
chaves=None):
assert isinstance(nome, str), 'O parâmetro nome da classe NoDeCarga' \
' deve ser do tipo string'
assert isinstance(vizinhos, list), 'O parâmetro vizinhos da classe' \
' Barra deve ser do tipo string'
self.nome = nome
self.vizinhos = vizinhos
self.potencia = potencia
self.potencia_eq = Fasor(real=0.0, imag=0.0, tipo=Fasor.Potencia)
self.tensao = tensao
if chaves is not None:
assert isinstance(chaves, list), 'O parâmetro chaves da classe NoDeCarga' \
' deve ser do tipo list'
self.chaves = chaves
else:
self.chaves = list()
self.setor = None
def __str__(self):
return 'No de Carga: ' + self.nome
class Barramento(NoDeCarga):
def __init__(self,
nome,
vizinhos,
potencia=Fasor(real=0.0, imag=0.0, tipo=Fasor.Potencia),
tensao=Fasor(real=0.0, imag=0.0, tipo=Fasor.Tensao),
chaves=None):
super(Barramento, self).__init__(nome,
vizinhos,
potencia=Fasor(real=0.0, imag=0.0, tipo=Fasor.Potencia),
tensao=Fasor(real=0.0, imag=0.0, tipo=Fasor.Tensao),
chaves=None)
class Subestacao(object):
def __init__(self,
nome,
alimentadores,
transformadores,
impedancia_equivalente_positiva=0.0+0.0j,
impedancia_equivalente_zero=0.0+0.0j):
assert isinstance(nome, str), 'O parâmetro nome da classe Subestacao ' \
'deve ser do tipo str'
assert isinstance(alimentadores, list), 'O parâmetro alimentadores da classe ' \
'deve ser do tipo list'
assert isinstance(transformadores, list), 'O parâmetro alimentadores da classe ' \
'deve ser do tipo list'
self.nome = nome
self.alimentadores = dict()
for alimentador in alimentadores:
self.alimentadores[alimentador.nome] = alimentador
self.transformadores = dict()
for transformador in transformadores:
self.transformadores[transformador.nome] = transformador
self.impedancia_equivalente_positiva = impedancia_equivalente_positiva
self.impedancia_equivalente_zero = impedancia_equivalente_zero
self.impedancia_positiva = impedancia_equivalente_positiva
self.impedancia_zero = impedancia_equivalente_zero
class Trecho(Aresta):
def __init__(self,
nome,
n1,
n2,
condutor=None,
comprimento=None):
assert isinstance(nome, str), 'O parâmetro nome da classe Trecho ' \
'deve ser do tipo str'
assert isinstance(n1, NoDeCarga) or isinstance(n1, Chave), 'O parâmetro n1 da classe Trecho ' \
'deve ser do tipo No de carga ' \
'ou do tipo Chave'
assert isinstance(n2, NoDeCarga) or isinstance(n2, Chave), 'O parâmetro n2 da classe Trecho ' \
'deve ser do tipo No de carga ' \
'ou do tipo Chave'
super(Trecho, self).__init__(nome)
self.n1 = n1
self.n2 = n2
self.no_montante = None
self.no_jusante = None
self.condutor = condutor
self.comprimento = comprimento
def calcula_impedancia(self):
return (self.comprimento * self.condutor.rp * 1e-3,
self.comprimento * self.condutor.xp * 1e-3)
def __repr__(self):
return 'Trecho: %s' % self.nome
class Alimentador(Arvore):
def __init__(self, nome, setores, trechos, chaves):
assert isinstance(nome, str), 'O parâmetro nome da classe Alimentador' \
'deve ser do tipo string'
assert isinstance(setores, list), 'O parâmetro setores da classe' \
'Alimentador deve ser do tipo list'
assert isinstance(chaves, list), 'O parâmetro chaves da classe' \
'Alimentador deve ser do tipo list'
self.nome = nome
self.setores = dict()
for setor in setores:
self.setores[setor.nome] = setor
self.chaves = dict()
for chave in chaves:
self.chaves[chave.nome] = chave
self.nos_de_carga = dict()
for setor in setores:
for no in setor.nos_de_carga.values():
self.nos_de_carga[no.nome] = no
self.trechos = dict()
for trecho in trechos:
self.trechos[trecho.nome] = trecho
for setor in self.setores.values():
# print 'Setor: ', setor.nome
setores_vizinhos = list()
for chave in self.chaves.values():
if chave.n2 and chave.n1 is setor:
setores_vizinhos.append(chave.n2)
elif chave.n1 and chave.n2 is setor:
setores_vizinhos.append(chave.n1)
for setor_vizinho in setores_vizinhos:
# print 'Setor Vizinho: ', setor_vizinho.nome
nos_de_ligacao = list()
for i in setor.nos_de_carga.values():
for j in setor_vizinho.nos_de_carga.values():
if i.nome in j.vizinhos:
nos_de_ligacao.append((j, i))
for no in nos_de_ligacao:
setor.ordenar(no[1].nome)
setor.rnp_associadas[setor_vizinho.nome] = (no[0],
setor.rnp)
# print 'RNP: ', setor.rnp
_arvore_da_rede = self._gera_arvore_da_rede()
super(Alimentador, self).__init__(_arvore_da_rede, str)
def ordenar(self, raiz):
super(Alimentador, self).ordenar(raiz)
for setor in self.setores.values():
caminho = self.caminho_no_para_raiz(setor.nome)
if setor.nome != raiz:
setor_jusante = caminho[1, 1]
setor.rnp = setor.rnp_associadas[setor_jusante][1]
def _gera_arvore_da_rede(self):
arvore_da_rede = {i: list() for i in self.setores.keys()}
for chave in self.chaves.values():
if chave.estado == 1 and chave.n1.nome in self.setores.keys():
arvore_da_rede[chave.n1.nome].append(chave.n2.nome)
if chave.estado == 1 and chave.n2.nome in self.setores.keys():
arvore_da_rede[chave.n2.nome].append(chave.n1.nome)
return arvore_da_rede
def gerar_arvore_nos_de_carga(self):
# define os nós de carga do setor raiz da subestação como os primeiros
# nós de carga a povoarem a arvore nós de carga e a rnp nós de carga
setor_raiz = self.setores[self.rnp[1][0]]
self.arvore_nos_de_carga = Arvore(arvore=setor_raiz._gera_arvore_do_setor(),
dtype=str)
self.arvore_nos_de_carga.ordenar(raiz=setor_raiz.rnp[1][0])
# define as listas visitados e pilha, necessárias ao
# processo recursivo de visita
# dos setores da subestação
visitados = []
pilha = []
# inicia o processo iterativo de visita dos setores
# em busca de seus respectivos nós de carga
self._gerar_arvore_nos_de_carga(setor_raiz, visitados, pilha)
def _gerar_arvore_nos_de_carga(self, setor, visitados, pilha):
# atualiza as listas de recursão
visitados.append(setor.nome)
pilha.append(setor.nome)
# for percorre os setores vizinhos ao setor atual
# que ainda não tenham sido visitados
vizinhos = setor.vizinhos
for i in vizinhos:
# esta condição testa se existe uma ligação
# entre os setores de uma mesma subestação, mas
# que possuem uma chave normalmente aberta entre eles.
# caso isto seja constatado o laço for é interrompido.
if i not in visitados and i in self.setores.keys():
for c in self.chaves.values():
if (c.n1 and c.n2) and c.n1.nome == setor.nome and c.n2.nome == i:
if c.estado == 1:
break
else:
pass
elif (c.n1 and c.n2) and c.n2.nome == setor.nome and c.n1.nome == i:
if c.estado == 1:
break
else:
pass
else:
continue
prox = i
setor_vizinho = self.setores[i]
no_insersao, rnp_insersao = setor_vizinho.rnp_associadas[setor.nome]
arvore_insersao = setor_vizinho._gera_arvore_do_setor()
setor_vizinho.no_de_ligacao = no_insersao
setor_vizinho.rnp = rnp_insersao
self.arvore_nos_de_carga.inserir_ramo(no_insersao.nome,
(rnp_insersao,
arvore_insersao),
no_raiz=rnp_insersao[1, 0]
)
break
else:
continue
else:
pilha.pop()
if pilha:
anter = pilha.pop()
return self._gerar_arvore_nos_de_carga(self.setores[anter],
visitados, pilha)
else:
return
return self._gerar_arvore_nos_de_carga(self.setores[prox],
visitados,
pilha)
def atualizar_arvore_da_rede(self):
_arvore_da_rede = self._gera_arvore_da_rede()
self.arvore = _arvore_da_rede
def gerar_trechos_da_rede(self):
self.trechos = dict()
j = 0
for i in range(1, np.size(self.arvore_nos_de_carga.rnp, axis=1)):
prof_1 = int(self.arvore_nos_de_carga.rnp[0, i])
prof_2 = int(self.arvore_nos_de_carga.rnp[0, j])
while abs(prof_1 - prof_2) is not 1:
if abs(prof_1 - prof_2) == 0:
j -= 1
elif abs(prof_1 - prof_2) == 2:
j = i - 1
prof_2 = int(self.arvore_nos_de_carga.rnp[0, j])
else:
n_1 = str(self.arvore_nos_de_carga.rnp[1, j])
n_2 = str(self.arvore_nos_de_carga.rnp[1, i])
setor_1 = None
setor_2 = None
# print 'Trecho: ' + n_1 + '-' + n_2
# verifica quais os nós de carga existentes nas extremidades do trecho
# e se existe uma chave no trecho
for setor in self.setores.values():
if n_1 in setor.nos_de_carga.keys():
setor_1 = setor
if n_2 in setor.nos_de_carga.keys():
setor_2 = setor
if setor_1 is not None and setor_2 is not None:
break
else:
if setor_1 is None:
n = n_1
else:
n = n_2
for setor in self.setores.values():
if n in setor.nos_de_carga.keys() and np.size(setor.rnp, axis=1) == 1:
if setor_1 is None:
setor_1 = setor
else:
setor_2 = setor
break
if setor_1 != setor_2:
for chave in self.chaves.values():
if chave.n1 in (setor_1, setor_2) and chave.n2 in (setor_1, setor_2):
self.trechos[n_1 + n_2] = Trecho(nome=n_1 + n_2,
n1=self.nos_de_carga[n_1],
n2=self.nos_de_carga[n_2],
chave=chave)
else:
self.trechos[n_1 + n_2] = Trecho(nome=n_1 + n_2,
n1=self.nos_de_carga[n_1],
n2=self.nos_de_carga[n_2])
def calcular_potencia(self):
potencia = Fasor(real=0.0, imag=0.0, tipo=Fasor.Potencia)
for no in self.nos_de_carga.values():
potencia = potencia + no.potencia
return potencia
def podar(self, no, alterar_rnp=False):
poda = super(Alimentador, self).podar(no, alterar_rnp)
rnp_setores = poda[0]
arvore_setores = poda[1]
if alterar_rnp:
# for povoa dicionario com setores podados
setores = dict()
for i in rnp_setores[1, :]:
setor = self.setores.pop(i)
setores[setor.nome] = setor
# for povoa dicionario com nos de carga podados
nos_de_carga = dict()
for setor in setores.values():
for j in setor.nos_de_carga.values():
if j.nome in self.nos_de_carga.keys():
no_de_carga = self.nos_de_carga.pop(j.nome)
nos_de_carga[no_de_carga.nome] = no_de_carga
# for atualiza a lista de nós de carga da subestação
# excluindo os nós de carga podados
for setor in self.setores.values():
for no_de_carga in setor.nos_de_carga.values():
self.nos_de_carga[no_de_carga.nome] = no_de_carga
if no_de_carga.nome in nos_de_carga.keys():
nos_de_carga.pop(no_de_carga.nome)
# poda o ramo na arvore da subetação
poda = self.arvore_nos_de_carga.podar(setores[no].rnp[1, 0], alterar_rnp=alterar_rnp)
rnp_nos_de_carga = poda[0]
arvore_nos_de_carga = poda[1]
# for povoa dicionario de chaves que estao nos trechos podados
# e retira do dicionario de chaves da arvore que esta sofrendo a poda
# as chaves que não fazem fronteira com os trechos remanescentes
chaves = dict()
chaves_a_remover = list()
for chave in self.chaves.values():
if chave.n1 and chave.n1.nome in setores.keys():
if not chave.n2 or not chave.n2.nome in self.setores.keys():
chaves[chave.nome] = self.chaves[chave.nome]
chaves_a_remover.append(chave.nome)
else:
chave.estado = 0
chaves[chave.nome] = chave
elif chave.n2 and chave.n2.nome in setores.keys():
if not chave.n1 or not chave.n1.nome in self.setores.keys():
chaves[chave.nome] = self.chaves[chave.nome]
chaves_a_remover.append(chave.nome)
else:
chave.estado = 0
chaves[chave.nome] = chave
for chave in chaves_a_remover:
self.chaves.pop(chave) # BRESSAN END
# for poda os trechos dos setores podados e povoa o dicionario trechos
# para que possa ser repassado juntamente com os outros dados da poda
trechos = dict()
trechos_a_remover = list() # BRESSAN START
for no in rnp_nos_de_carga[1, :]:
for trecho in self.trechos.values():
if trecho.n1.nome == no or trecho.n2.nome == no:
trechos[trecho.nome] = self.trechos[trecho.nome]
trechos_a_remover.append(trecho.nome)
for trecho in trechos_a_remover:
self.trechos.pop(trecho) # BRESSAN END
return (
setores,
arvore_setores,
rnp_setores,
nos_de_carga,
arvore_nos_de_carga,
rnp_nos_de_carga,
chaves,
trechos
)
else:
return rnp_setores
def inserir_ramo(self, no, poda, no_raiz=None):
(setores, arvore_setores, rnp_setores,
nos_de_carga, arvore_nos_de_carga, rnp_nos_de_carga,
chaves, trechos) = poda
# atualiza setores do alimentador
self.setores.update(setores)
# atualiza os nos de carga do alimentador
self.nos_de_carga.update(nos_de_carga)
# atualiza as chaves do alimentador
self.chaves.update(chaves)
# atualiza os trechos do alimentador
self.trechos.update(trechos)
if no_raiz is None:
setor_inserir = setores[rnp_setores[1, 0]]
else:
setor_inserir = setores[no_raiz]
setor_insersao = self.setores[no]
# for identifica se existe alguma chave que permita a inserção do ramo na arvore
# da subestação que ira receber a inserção.
chaves_de_lig = dict()
# for percorre os nos de carga do setor de insersão
for i in self.setores[setor_insersao.nome].nos_de_carga.values():
# for percorre as chaves associadas ao no de carga
for j in i.chaves:
# for percorre os nos de carga do setor raiz do ramo a ser inserido
for w in setores[setor_inserir.nome].nos_de_carga.values():
# se a chave pertence aos nos de carga i e w então é uma chave de ligação
if j in w.chaves:
chaves_de_lig[j] = (i, w)
if not chaves_de_lig:
# print 'A insersao não foi possível pois nenhuma chave de fronteira foi encontrada!'
return
i = randint(0, len(chaves_de_lig) - 1)
n1, n2 = chaves_de_lig[list(chaves_de_lig.keys())[i]]
self.chaves[list(chaves_de_lig.keys())[i]].estado = 1
if not no_raiz and setor_inserir.nome == setores[rnp_setores[1, 0]].nome:
super(Alimentador, self).inserir_ramo(no, (rnp_setores, arvore_setores))
else:
super(Alimentador, self).inserir_ramo(no, (rnp_setores, arvore_setores), no_raiz)
# atualiza a arvore de setores do alimentador
self.atualizar_arvore_da_rede()
# atualiza a arvore de nos de carga do alimentador
self.gerar_arvore_nos_de_carga()
class Chave(Aresta):
def __init__(self, nome, estado=1):
assert estado == 1 or estado == 0, 'O parâmetro estado deve ser um inteiro de valor 1 ou 0'
super(Chave, self).__init__(nome)
self.estado = estado
def __str__(self):
if self.n1 is not None and self.n2 is not None:
return 'Chave: %s - n1: %s, n2: %s' % (self.nome, self.n1.nome, self.n2.nome)
else:
return 'Chave: %s' % self.nome
class IED(object):
def __init__(self,nome):
self.nome = nome
class Transformador(object):
def __init__(self, nome, tensao_primario, tensao_secundario, potencia, impedancia):
assert isinstance(nome, str), 'O parâmetro nome deve ser do tipo str'
assert isinstance(tensao_secundario, Fasor), 'O parâmetro tensao_secundario deve ser do tipo Fasor'
assert isinstance(tensao_primario, Fasor), 'O parâmetro tensao_primario deve ser do tipo Fasor'
assert isinstance(potencia, Fasor), 'O parâmetro potencia deve ser do tipo Fasor'
assert isinstance(impedancia, Fasor), 'O parâmetro impedancia deve ser do tipo Fasor'
self.nome = nome
self.tensao_primario = tensao_primario
self.tensao_secundario = tensao_secundario
self.potencia = potencia
self.impedancia = impedancia
class Condutor(object):
def __init__(self, nome, rp, xp, rz, xz, ampacidade):
self.nome = nome
self.rp = float(rp)
self.xp = float(xp)
self.rz = float(rz)
self.xz = float(xz)
self.ampacidade = float(ampacidade)
if __name__ == '__main__':
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.