content
stringlengths 5
1.05M
|
---|
import typing
from PyQt5.Qt import QColor
from PyQt5.QtChart import QChart, QValueAxis, QCandlestickSet, QCandlestickSeries
from PyQt5.QtWidgets import QGroupBox, QLineEdit, QFormLayout, QLabel
from FinChart.SeriesAbstract import SeriesAbstract
class CandleSeries(SeriesAbstract):
def __init__(
self,
_name: str,
_x_list: typing.Sequence,
_y_list: typing.Sequence,
_inc_color: typing.Any = None,
_dec_color: typing.Any = None,
_show_value: bool = True,
):
super().__init__(_name, _x_list, _y_list, None, _show_value)
self.show_open_edit: QLineEdit = None
self.show_high_edit: QLineEdit = None
self.show_low_edit: QLineEdit = None
self.show_close_edit: QLineEdit = None
self.inc_color = _inc_color
self.dec_color = _dec_color
self.type = SeriesAbstract.CANDLE
def calcRangeY(self, _begin_x=None, _end_x=None) -> typing.Tuple:
tmp_y = self.y_list[self._find_begin_idx(_begin_x) : self._find_end_idx(_end_x)]
min_value = min([min(d) for d in tmp_y])
max_value = max([max(d) for d in tmp_y])
return min_value, max_value
def addSeries(
self,
_x2idx: typing.Dict,
_idx2x: list,
_chart: QChart,
_axis_x: QValueAxis,
_axis_y: QValueAxis,
):
series = QCandlestickSeries()
series.setName(self.name)
for x, y in zip(self.x_list, self.y_list):
series.append(QCandlestickSet(*y, _x2idx[x]))
if self.inc_color is not None:
series.setIncreasingColor(self.inc_color)
else:
series.setIncreasingColor(QColor("#c41919"))
if self.dec_color is not None:
series.setDecreasingColor(self.dec_color)
else:
series.setDecreasingColor(QColor("#009f9f"))
_chart.addSeries(series)
_chart.setAxisX(_axis_x, series)
_chart.setAxisY(_axis_y, series)
if self.show_value:
self.createShow()
def createShow(self):
self.show_group = QGroupBox()
self.show_group.setTitle(self.name)
self.show_open_edit = QLineEdit()
self.show_open_edit.setDisabled(True)
self.show_high_edit = QLineEdit()
self.show_high_edit.setDisabled(True)
self.show_low_edit = QLineEdit()
self.show_low_edit.setDisabled(True)
self.show_close_edit = QLineEdit()
self.show_close_edit.setDisabled(True)
layout = QFormLayout()
layout.addWidget(QLabel("open"))
layout.addWidget(self.show_open_edit)
layout.addWidget(QLabel("high"))
layout.addWidget(self.show_high_edit)
layout.addWidget(QLabel("low"))
layout.addWidget(self.show_low_edit)
layout.addWidget(QLabel("close"))
layout.addWidget(self.show_close_edit)
self.show_group.setLayout(layout)
def updateValue(self, _x):
idx = self._find_idx(_x)
if idx is None:
self.show_open_edit.setText("")
self.show_high_edit.setText("")
self.show_low_edit.setText("")
self.show_close_edit.setText("")
else:
value = self.y_list[idx]
self.show_open_edit.setText("{:.5f}".format(value[0]))
self.show_high_edit.setText("{:.5f}".format(value[1]))
self.show_low_edit.setText("{:.5f}".format(value[2]))
self.show_close_edit.setText("{:.5f}".format(value[3]))
|
from typing import List
import torch.nn as nn
from regym.rl_algorithms.agents import Agent
from regym.rl_algorithms.A2C import A2CAlgorithm
from regym.networks.bodies import FCBody
from regym.networks.heads import CategoricalActorCriticNet
from regym.networks.preprocessing import turn_into_single_element_batch
class A2CAgent(Agent):
def __init__(self, name: str,
samples_before_update: int,
algorithm: A2CAlgorithm):
'''
:param name: String identifier for the agent
:param samples_before_update: Number of actions the agent will take before updating
:param algorithm: Reinforcement Learning algorithm used to update the agent's policy.
Contains the agent's policy, represented as a neural network.
'''
super(A2CAgent, self).__init__(name=name)
self.algorithm = algorithm
self.state_preprocess_fn = turn_into_single_element_batch
self.samples_before_update = samples_before_update
self.samples = []
def handle_experience(self, s, a, r, succ_s, done=False):
super(A2CAgent, self).handle_experience(s, a, r, succ_s, done)
if not self.training: return
self.samples.append((s, a, self.current_prediction['log_pi_a'],
r, self.current_prediction['V'], succ_s, done))
if done or len(self.samples) >= self.samples_before_update:
bootstrapped_reward = self.current_prediction['V'] if not done else 0
self.algorithm.train(self.samples, bootstrapped_reward)
self.samples = []
def model_free_take_action(self, state, legal_actions: List[int], multi_action: bool = False):
processed_s = self.state_preprocess_fn(state)
self.current_prediction = self.algorithm.model(processed_s)
return self.current_prediction['a'].item()
def clone(self, training=None):
raise NotImplementedError('Cloning A2CAgent not supported')
def build_A2C_Agent(task, config, agent_name):
'''
:param task: Environment specific configuration
:param agent_name: String identifier for the agent
:param config: Dictionary whose entries contain hyperparameters for the A2C agents:
- 'discount_factor': Discount factor (gamma in standard RL equations) used as a -variance / +bias tradeoff.
- 'n_steps': 'Forward view' timesteps used to compute the Q_values used to approximate the advantage function
- 'samples_before_update': Number of actions the agent will take before updating
- 'learning_rate': Learning rate for the Neural Network optimizer. Recommended: 1.0e-4
- 'adam_eps': Epsilon value used in denominator of Adam update computation. Recommended: 1.0e-5
:returns: Agent using A2C algorithm to act and learn in environments
'''
body = FCBody(task.observation_dim, hidden_units=(256,)) # TODO: remove magic number
model = CategoricalActorCriticNet(body=body,
state_dim=task.observation_dim,
action_dim=task.action_dim)
algorithm = A2CAlgorithm(model=model,
n_steps=config['n_steps'], discount_factor=config['discount_factor'],
adam_eps=config['adam_eps'], learning_rate=config['learning_rate'])
return A2CAgent(name=agent_name, algorithm=algorithm, samples_before_update=config['samples_before_update'])
|
import numpy as np
import pandas as pd
import warnings
import xarray as xr
import dask.dataframe as dd
import dask.array as da
class LCI():
"""Defines a LCI class based on xr.DataArray."""
def __init__(self, name, type, iterations, UP, parameters):
"""Initialization with the phases and substances of the LCI."""
self.name = name
self.type = type
self.UP = UP
self.substances = UP.Substances
self.p = parameters
self.data = None
self.build(iterations)
def __repr__(self):
return f"{self.data}"
def __getitem__(self, phase):
return self.data[phase]
def __setitem__(self, phase, other):
self.data[phase] = other
def build(self, iterations):
"""Builds the xr.DataArray for the LCI."""
if self.data == None:
self.data = xr.Dataset(coords={'Substances': self.substances,
'i': np.arange(iterations)},
attrs={'Name':self.name})
self.data.coords['Units'] = self.substances.Units
return self.data
def substance(self, substance):
"""Locates the specified substance on the data."""
return self.data.loc[{'Substances': substance}]
def iteration(self, iteration):
"""Locates the specified iteration on the data."""
return self.data.loc[{'i': iteration}]
def find(self, phase, substance, iteration):
"""Locates the specified substance, phase and iteration on the data."""
return self.data[phase].loc[{'Substances': substance, 'i':iteration}]
def mean(self, phase):
"""Returns the mean for all iterations of a certain phase."""
return self['Office'].mean('i').load()
def median(self, phase):
"""Returns the median for all iterations of a certain phase."""
return self['Office'].median('i').load()
def office(self):
LCI_E_office = self.electricity(self.p["E_office"]) #per month
LCI_E_office = LCI_E_office * self.p["devmonths"] #per development
LCI_water_office = self.UP["Water"] * self.p["water_office"] \
+ self.UP["Wastewater"] * self.p["wastewater_office"] #per month
LCI_water_office = LCI_water_office * self.p["devmonths"] #per development
self.p["travel"] = 18470 / 12 * self.p["developers"] * self.p["devmonths"] #in km
LCI_travel = self.UP["Car"]*self.p["travel"]*0.1 \
+ self.UP["Airplane"]*self.p["travel"]*0.9 #per development
LCI_paper = self.UP["Paper"]*self.p["developers"]*self.p["paper_use"] #per year
LCI_paper = LCI_paper * self.p["devmonths"] / 12 #per development
LCI_office = (LCI_E_office + LCI_water_office + LCI_paper + LCI_travel) #per development
LCI_office = LCI_office / self.p["pkm_fleet"] #per pkm
self.data['Office'] = LCI_office
def infrastructure(self):
LCI_construction = (self.UP["Facilities"]*self.p["new_factory"]/2.74e5) / self.p["pkm_fleet"]
self.data["Infrastructure"] = LCI_construction
def capital(self):
self.p["new_jigs"] = self.p["OEW"] * 500 # 50t of jigs per 100kg of product
self.UP["Capital"] = self.UP["Steel"] + self.UP["Jigs"] # material plus transformation
LCI_capital = (self.UP["Capital"]*self.p["new_jigs"] + self.UP["Machine"]*self.p["new_machine"])/self.p["pkm_fleet"]
self.data["Capital"] = LCI_capital
def dev(self):
self.office()
self.infrastructure()
self.capital()
def materials(self):
try:
reuse = self.p['reuse']
except:
reuse = 1
self.p["Al"] = self.p['p_Al'] * self.p['b2f_Al'] * self.p['OEW'] * reuse
self.p["steel"] = self.p['p_steel'] * self.p['b2f_steel'] * self.p['OEW'] * reuse
self.p["Ti"] = self.p['p_Ti'] * self.p['b2f_Ti'] * self.p['OEW'] * reuse
self.p["inconel"] = self.p['p_inconel'] * self.p['b2f_inconel'] * self.p['OEW'] * reuse
self.p["GFRP"] = self.p['p_GFRP'] * self.p['b2f_GFRP'] * self.p['OEW'] * reuse
self.p["CFRP"] = self.p['p_CFRP'] * self.p['b2f_CFRP'] * self.p['OEW'] * reuse
LCI_Al = self.UP["Aluminium"] * self.p["Al"]
LCI_steel = self.UP["Steel"] * self.p["steel"]
LCI_Ti = self.UP["Titanium"] * self.p["Ti"]
LCI_inconel = self.UP["Inconel"] * self.p["inconel"]
LCI_GFRP = self.UP["GFRP"] * self.p["GFRP"]
LCI_CFRP = self.UP["CFRP"] * self.p["CFRP"]
#LCI Material Extraction and Transformation
LCI_material = (LCI_Al + LCI_steel + LCI_Ti + LCI_inconel + LCI_GFRP + LCI_CFRP) / self.p["pkm_life"]
self.data["Materials"] = LCI_material
def factory(self):
LCI_E_factory = self.electricity(self.p["E_factory"])
LCI_E_factory = LCI_E_factory * self.p["takt"] / 30 # per aircraft
LCI_water_factory = self.UP["Water"]*self.p["water_factory"] \
+ self.UP["Wastewater"]*self.p["wastewater_factory"] # per month
LCI_water_factory = LCI_water_factory * self.p["takt"] / 30 # per aircraft
LCI_lube = self.UP["Lubricant"] * self.p["lubricant"] # per month
LCI_lube = LCI_lube * self.p["takt"] / 30 # per aircraft
self.p["facilities_maint"] = self.p["OEW"] * 4.58e-10 # use per kg of product
LCI_facilities_maint = self.UP["Facilities"] * self.p["facilities_maint"] * 0.02 # per year
LCI_facilities_maint = LCI_facilities_maint * self.p["takt"] / 365 # per aircraft
LCI_factory = (LCI_E_factory + LCI_water_factory + LCI_lube + LCI_facilities_maint)/self.p["pkm_life"]
self.data["Factory"] = LCI_factory
def logistics(self):
lorry = self.p["d_lorry"] * self.p["m_lorry"] #tonne * km
sea = self.p["d_sea"] * self.p["m_sea"] #tonne * km
air = self.p["d_air"] * self.p["m_air"] #tonne * km
LCI_logistics = (self.UP["Lorry"]*lorry + self.UP["Sea"]*sea \
+ self.UP["Air"]*air) / self.p["pkm_life"]
self.data['Logistics'] = LCI_logistics
def sustaining(self):
LCI_sustaining = self.data["Office"] * 0.01 / 30 #per day
LCI_sustaining = (LCI_sustaining * self.p["takt"])/self.p["pkm_life"]
self.data["Sustaining"] = LCI_sustaining
def mfg(self):
self.materials()
self.factory()
self.logistics()
self.sustaining()
def flights(self):
try:
self.p["t_ccd"] = self.p["FH"]*60 - (self.p["t_app"] + self.p["t_to"] + self.p["t_climb"]) # minutes
except:
self.p["t_ccd"] = self.p['FH']*60 - self.p['ff_lto']
self.p["fuel_ccd"] = self.p["ff_ccd"] * self.p["t_ccd"] * 60 # kg
self.data["LTO"] = self.UP["LTO"] / self.p["pkm_flight"]
self.data["CCD"] = self.UP["CCD"] * self.p["fuel_ccd"] / self.p["pkm_flight"]
def maintenance(self):
LCI_maint = self.UP["Aluminium"]*self.p["maint_Al"] + self.UP["Steel"]*self.p["maint_steel"] \
+ self.UP["Polymer"]*self.p["maint_pol"] + self.UP["Battery"]*self.p['maint_battery'] #por ano
LCI_maint = (LCI_maint / self.p["flights_year"]) / self.p["pkm_flight"]
self.data['Maintenance'] = LCI_maint
def airport(self):
if self.type == "cargo":
ap_impact = 0.132 # 13,2% of airport impacts are due to cargo
elif self.type == "pax":
ap_impact = 0.868
else:
ap_impact = 1
self.p["f_pax_ap"] = self.p["pax_ap"] / 22500000 # fraction of pax relative to zurich in 2000
LCI_ap = self.UP["Airport"] * self.p["f_pax_ap"]/100 / self.p["flights_ap"] # 100 life years for building
LCI_ap = LCI_ap * ap_impact / self.p["pkm_flight"]
self.data["Airport"] = LCI_ap
def fuel(self):
try:
self.p["fuel_lto"] = self.p['ff_lto'] * self.p['t_lto'] * 60
except:
self.p["fuel_lto"] = self.p['t_app']*60*self.p['ff_app'] + self.p['t_idle']*60*self.p['ff_idle'] \
+ self.p['t_to']*60*self.p['ff_to'] + self.p['t_climb']*60*self.p['ff_climb']
LCI_fuel = (self.UP['Kerosene']*(self.p["fuel_ccd"]+self.p["fuel_lto"]))/ self.p["pkm_flight"]
self.data["Fuel"] = LCI_fuel
def ope(self):
self.flights()
self.maintenance()
self.airport()
self.fuel()
def eol(self):
try:
reuse_factor = (2 - p['reuse'])
except:
reuse_factor = 1
E_sort_constant = 0.4645 / 3.6 # kWh/kg of material, on average
self.p["E_sort"] = E_sort_constant * self.p['OEW'] * reuse_factor
LCI_sort = self.electricity(self.p["E_sort"])
materials = ['Al','steel','Ti','inconel','GFRP','CFRP']
scenarios = ['ldf', 'incin','recycl']
chunks = self.data.chunks['i'][0]
iterations = self.data.i.size
iterations
UP_eol = self.UP.rename_vars({'Landfill':'ldf','Incineration':'incin','Aluminium':'Al',
'Titanium':'Ti', 'Inconel':'inconel','Steel':'steel'})
eol = xr.Dataset({scenario: (['Substances','i'],da.empty((1835,iterations), chunks=(1835,chunks)))
for scenario in scenarios}, coords=self.data.coords)
for scenario in scenarios:
for material in materials:
self.p[scenario+"_"+material] = self.p["p_"+scenario+"_"+material]*self.p[material]*reuse_factor
if scenario == 'recycl':
eol[scenario] += UP_eol[material] * self.p[scenario + "_" + material]
else:
eol[scenario] += UP_eol[scenario] * self.p[scenario + "_" + material]
self.data["Recycling"] = (LCI_sort - eol['recycl']) / self.p["pkm_life"]
self.data["Incineration"] = eol["incin"] / self.p["pkm_life"]
self.data["Landfill"] = eol["ldf"] / self.p["pkm_life"]
def run(self):
self.dev()
self.mfg()
self.ope()
self.eol()
MFG = self.data["Logistics"]+self.data["Sustaining"]+self.data["Factory"]+self.data["Materials"]
LCI_prot = (MFG*self.p["prototypes"] + MFG*self.p["ironbirds"]*0.3)/self.p["pkm_fleet"]
self.data["Prototypes"] = LCI_prot
self.p["cert_flights"] = self.p["test_FH"] / self.p["FH"]
self.data["Certification"] = (self.data["LTO"]+self.data["CCD"])*self.p["cert_flights"]/self.p["pkm_fleet"]
return self.data
def electricity(self, E):
"""Calculates the LCI of electricity consumption based on a gas-wind-hydropower electricity grid."""
E_wind = E * self.p['grid_wind']
E_gas = E * self.p['grid_gas']
E_hydro = E * self.p['grid_hydro']
LCI_E = self.UP['Elec_wind']*E_wind \
+ self.UP['Elec_gas']*E_gas + self.UP['Elec_hydro']*E_hydro
return LCI_E
|
# coding=utf-8
'''
same
'''
import torch
from torch.nn import Module
from abc import abstractmethod
import Putil.base.logger as plog
from Putil.data.io_convertor import IOConvertor
class IOConvertorModule(IOConvertor, Module):
def __init__(self, io):
Module.__init__(self)
IOConvertor.__init__(self, io)
pass
def __call__(self, *args):
return self.forward(*args)
@abstractmethod
def forward(self, *args):
pass |
#!/usr/bin/env python
"""
reverse_comp.py <filename>
Prints the reverse complement of a DNA string (in Fasta format).
"""
import sys
import Fasta
import Sequence
if len(sys.argv)!=2 or '-h' in sys.argv or '--help' in sys.argv:
sys.exit(__doc__)
iFilename = sys.argv[1]
header, seq = Fasta.load(iFilename)
seq = Sequence.reverse_complement(seq.upper())
print '>%s' % header
for i in xrange(0, len(seq), 80):
print seq[i:i+80]
|
from django.conf.urls import url
from ..views import asset_views
asset_urlpatterns = [
# ex: /portfolio/assets
url(
regex=r'^assets/$',
view=asset_views.AssetView.as_view(),
name='assets'
),
# ex: /portfolio/4/asset/
url(
regex=r'^(?P<pk>\w+)/asset/$',
view=asset_views.AssetDetail.as_view(),
name='asset'
),
# ex: /portfolio/5/asset/edit/
url(
regex=r'^(?P<pk>[0-9]+)/asset/edit/$',
view=asset_views.AssetUpdate.as_view(),
name='edit_asset'
),
# ex: /portfolio/asset/add/
url(
regex=r'^asset/add/$',
view=asset_views.AssetCreate.as_view(),
name='add_asset'
)
]
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNeuroc(PythonPackage):
"""Python library neuron morphology analysis"""
homepage = "https://bbpgitlab.epfl.ch/nse/neuroc"
git = "git@bbpgitlab.epfl.ch:nse/neuroc.git"
version('develop')
version('0.2.8', tag='neuroc-v0.2.8')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-attrs@19.1.0:', type=('build', 'run'))
depends_on('py-tqdm@4.23.4:', type=('build', 'run'))
depends_on('py-morph-tool@2.9.0:2.999', type=('build', 'run'))
depends_on('py-scikit-learn@0.21.3:', type=('build', 'run'))
depends_on('py-morphio@3.0:3.999', type=('build', 'run'))
depends_on('py-neurom@3:3.999', type=('build', 'run'))
depends_on('py-pandas@1.0.3:', type=('build', 'run'))
depends_on('py-click@6.7:', type=('build', 'run'))
depends_on('py-numpy@1.15.1:', type=('build', 'run'))
# plotly extra requirements
# depends_on('py-dash', type=('build', 'run'))
# depends_on('py-dash-html-components', type=('build', 'run'))
# depends_on('py-dash-core-components', type=('build', 'run'))
# depends_on('py-dash-table', type=('build', 'run'))
def patch(self):
filter_file(".*dash.*", "", "setup.py")
|
from marshmallow_sqlalchemy import ModelSchema
from marshmallow import fields
class TodoSchemaGenerator():
db = None
model = None
def __init__(self, db, model):
self.db = db
self.model = model
def get_schema(self, many = False, only = None):
class TodoSchema(ModelSchema):
class Meta(ModelSchema.Meta):
model = self.model.get_model()
sqla_session = self.db.session
id = fields.Number(dump_only=True)
title = fields.String(required=True)
todo_description = fields.String(required=True)
return TodoSchema(many = many, only = only)
|
import numpy as np
import math
from typing import Optional, Tuple
from agents.common import BoardPiece, PlayerAction, SavedState, PLAYER1, PLAYER2, NO_PLAYER, GameState
from agents.common import connected_four, check_end_state, apply_player_action, check_open_columns
#num_rows = board.shape[0]
#num_columns = board.shape[1]
def generate_move(
board: np.ndarray, player: BoardPiece, saved_state: Optional[SavedState]
) -> Tuple[PlayerAction, Optional[SavedState]]:
alpha = -math.inf
beta = math.inf
depth = 4
# Choose a valid, non-full column that maximizes score and return it as `action`
PlayerAction = minimax(board, depth, alpha, beta, player, True)[0]
return PlayerAction, saved_state
def center_column_score(board: np.ndarray, player: BoardPiece) -> int:
'''
Prefer playing pieces in center column of the board
:param board: current state of board
:param player: agent
:return: increased score for column in the center of board
'''
center_column = int(board.shape[1] / 2)
center_column = list(board[:,center_column])
pieces_count = center_column.count(player)
return pieces_count * 3
def even_odd_row_scores(board: np.ndarray, player: BoardPiece) -> int:
'''
Prefer playing pieces in the odd or even rows depending on player
:param board: current state of the board
:param player: agent
:return: increased score for even or odd rows of the board depending on player
'''
score = 0
if (player == PLAYER1):
# prefer odd rows
start = 0
else: #player must be PLAYER2
#prefer even rows
start = 1
#get even or odd rows depending on start
for row in np.arange(start,board.shape[0], 2):
even_odd_row = list(board[row,:])
row_score = even_odd_row.count(player)
score += row_score * 2
return score
def adjacent_score(adjacent_four: list, player: BoardPiece) -> int:
"""
Counts how many pieces of a specified player are adjacent in all directions and assigns score
:param adjacent_four: four adjacent spots on the board in any direction (horizontal, vertial, diagonal)
:param player: agent
:return: score for playing piece at spot within adjacent_four
"""
score = 0
#check which player score to maximize and which player to block
if player == PLAYER1:
opponent_player = PLAYER2
else:
opponent_player = PLAYER1
#check if agent (player) is close to getting a win by placing 4 adjacent pieces
if adjacent_four.count(player) == 4:
score += 10000
elif adjacent_four.count(player) == 3 and adjacent_four.count(NO_PLAYER) == 1:
score += 100
elif adjacent_four.count(player) == 2 and adjacent_four.count(NO_PLAYER) == 2:
score += 10
#block opponent from getting a win
if adjacent_four.count(opponent_player) == 3 and adjacent_four.count(NO_PLAYER) == 1:
score -= 70
if adjacent_four.count(opponent_player) == 2 and adjacent_four.count(NO_PLAYER) == 2:
score -= 10
return score
def heuristic(board: np.ndarray, player: BoardPiece) -> int:
'''
Calculates score considering 4 adjacent spots of the board in each row, column, and diagonal
(checks how many empty and filled spots there are in 4 adjacent spots in all directions)
:param board: current state of board
:param player: player who wants to maximize score
:return: score that can be achieve by playing open position
'''
num_rows = board.shape[0]
num_columns = board.shape[1]
score = 0
#Prefer moves in the center column
score += center_column_score(board, player)
#Prefer moves in even or odd rows depending on player
score += even_odd_row_scores(board, player)
# first check horizontal
for row in range(num_rows): #loop through every row
row = board[row,:]
#now loop through each column and check 4 adjacent spots
for col in range(num_columns-3): #only need to loop through first 4 cols, since adjacent_4 would touch (j+3)
adjacent_four = list(row[col:col+4]) #convert to list to apply count() later
#now count the number of pieces for each player
score += adjacent_score(adjacent_four,player)
# vertical
for col in range(num_columns):
col = board[:,col]
for row in range(num_rows-3):
adjacent_four = list(col[row:row+4])
score += adjacent_score(adjacent_four, player)
#score positive sloped diagonal
for row in range(num_rows-3): #rows
for col in range(num_columns-3): # cols
adjacent_four = [board[row+i,col+i] for i in range(4)]
score += adjacent_score(adjacent_four,player)
#score negative diagonal
for row in range(num_rows-3): #rows
for col in range(num_columns-3): # cols
adjacent_four = [board[row + 3 - i, col + i] for i in range(4)] #col increases but row decreases
score += adjacent_score(adjacent_four, player)
return score
def minimax(board: np.ndarray, depth: int, alpha: int, beta: int, player: BoardPiece, maximizing_player: bool) -> Tuple[int, int]:
'''
Returns a column where action should be placed and the min and max score for GameState
:param board: current state of board
:param depth: depth of search tree
:param maximizingPlayer: True if we want to max for player
:return: min or max score for action of player
'''
#check which player is the agent so that we don't max/min for wrong player
if player == PLAYER1:
opponent_player = PLAYER2
else:
opponent_player = PLAYER1
#check which columns are currently open
open_cols = np.asarray(check_open_columns(board))
#check if depth is 0
if depth == 0:
score = heuristic(board, player)
return None, score
#check if we're at a leaf/terminal node
if check_end_state(board,player) != GameState.STILL_PLAYING:
if connected_four(board, player): #agent won
return None, 100000
if connected_four(board, opponent_player): #opponent won
return None, -100000
else: #must be a draw
return None, 0
if maximizing_player: #get max score for agent
score = -math.inf
for column in open_cols:
#now simulate making a move and check what score it would get, save the original board in board
board, board_copy = apply_player_action(board, column, player, True)
# recursive call to minimax with depth-1 with board_copy so board isn't modified
next_score = minimax(board_copy, depth-1, alpha, beta, player, False)[1] #only get the score
#if the score is better save score and column
if next_score > score:
score = next_score
action_column = column
#evaluate alpha for early stopping
alpha = max(alpha, score)
if alpha >= beta: #don't evaluate more options down this path of tree
break
return action_column, score
else:
score = math.inf
for column in open_cols:
board, action_board = apply_player_action(board, column, opponent_player, True)
next_score = minimax(action_board, depth-1, alpha, beta, player, True)[1]
if next_score < score:
score = next_score
action_column = column
beta = min(beta, score) #here we want to minimize since we're opponent player
if alpha >= beta:
break
return action_column, score |
waypoints = [(8.12431812286377, 0.9884904026985168), (8.086380004882812, 1.2322419881820679), (8.048443078994751, 1.475993037223816), (8.010504007339478, 1.7197440266609192), (7.972568035125732, 1.963496446609497), (7.934642553329468, 2.2072595357894897), (7.89715838432312, 2.4510819911956787), (7.860599994659424, 2.694797992706299), (7.823238134384155, 2.9386719465255737), (7.785614728927612, 3.1826109886169434), (7.748008966445923, 3.426566004753113), (7.710411787033081, 3.6705124378204346), (7.66268253326416, 3.9129165410995483), (7.606831073760986, 4.123275518417358), (7.538827896118164, 4.330195426940918), (7.473352432250977, 4.504245042800903), (7.418391466140747, 4.615414142608643), (7.350448131561279, 4.7208147048950195), (7.271114349365234, 4.821835517883301), (7.171124458312988, 4.9144744873046875), (7.049574613571167, 5.004922389984131), (6.88931941986084, 5.094431400299072), (6.665685415267944, 5.200993061065674), (6.4503724575042725, 5.3212034702301025), (6.243717193603516, 5.455868482589722), (6.04146146774292, 5.599713563919067), (5.8312668800354, 5.729801416397095), (5.6059699058532715, 5.82970118522644), (5.4196555614471436, 5.8815460205078125), (5.254481077194214, 5.902042865753174), (5.092396974563599, 5.9034295082092285), (4.9177680015563965, 5.875003099441528), (4.7680675983428955, 5.821169853210449), (4.625657796859741, 5.747572422027588), (4.483626008033752, 5.6375555992126465), (4.372556090354919, 5.5129923820495605), (4.273463487625122, 5.367404937744141), (4.186340570449829, 5.175290584564209), (4.132157564163208, 4.970823049545288), (4.1032469272613525, 4.725077867507935), (4.104186415672302, 4.477489471435547), (4.133409023284912, 4.23215913772583), (4.185578465461731, 3.9907249212265015), (4.255492568016052, 3.75332248210907), (4.335228085517883, 3.519709587097168), (4.413285851478577, 3.285557508468628), (4.490407943725586, 3.051064968109131), (4.575141906738281, 2.8064310550689697), (4.665269613265991, 2.557635545730591), (4.761425971984863, 2.2958614826202393), (4.845442295074463, 2.049849033355713), (4.914431571960449, 1.8049694895744324), (4.9590840339660645, 1.5599904656410217), (4.968848943710327, 1.3767715096473694), (4.953732013702393, 1.225703477859497), (4.9243080615997314, 1.0923536717891693), (4.874530076980591, 0.9744531214237213), (4.811803817749023, 0.8670504242181778), (4.7310638427734375, 0.7757765799760818), (4.639754056930542, 0.6960672959685326), (4.53597092628479, 0.6351533643901348), (4.425333380699158, 0.5868358723819256), (4.31070601940155, 0.5462696515023708), (4.133717060089111, 0.5105358138680458), (3.9520061016082764, 0.4950168505311012), (3.7228105068206787, 0.4923985004425049), (3.481923460960388, 0.4975791871547699), (3.245776414871216, 0.501126155257225), (3.035504460334778, 0.50196872651577), (2.8252240419387817, 0.5016545429825783), (2.593984603881836, 0.5014481991529465), (2.3312524557113647, 0.5014559105038643), (2.3312524557113647, 0.5014559105038643), (2.0685290098190308, 0.5015483349561691), (1.821654498577118, 0.5012944936752319), (1.5748854875564575, 0.5017464235424995), (1.3278529644012451, 0.49889006465673447), (1.0805863738059998, 0.4953853487968445), (0.8922330141067505, 0.5015363693237305), (0.7341673970222473, 0.5192165374755859), (0.5884353518486023, 0.5470210313796997), (0.444983147084713, 0.5935898460447788), (0.2900082990527153, 0.6586092279758304), (0.13993845880031586, 0.7347002364695072), (-0.07424405217170715, 0.8577179685235023), (-0.2859680615365505, 0.9852865785360336), (-0.49984051287174225, 1.1095064878463745), (-0.7144187539815903, 1.231426864862442), (-0.9287944138050079, 1.3543725907802582), (-1.1432064771652222, 1.4824312031269073), (-1.358678787946701, 1.616392433643341), (-1.5730475187301636, 1.7552099823951721), (-1.7863735556602478, 1.8984400033950806), (-2.00101900100708, 2.041799545288086), (-2.2202319502830505, 2.177119493484497), (-2.4457980394363403, 2.302161931991577), (-2.6396130323410034, 2.393519937992096), (-2.841279983520508, 2.461174488067627), (-3.0088279247283936, 2.4887245297431946), (-3.159435987472534, 2.4883150458335876), (-3.301242470741272, 2.4654030203819275), (-3.438712000846863, 2.415762960910797), (-3.574346423149109, 2.340478003025055), (-3.708780884742737, 2.2319304943084717), (-3.8442704677581787, 2.0913585424423218), (-3.9776545763015747, 1.9165594577789307), (-4.128239035606384, 1.691877007484436), (-4.285765051841736, 1.4670869410037994), (-4.400313138961792, 1.3299558758735657), (-4.531231164932251, 1.211074411869049), (-4.66146993637085, 1.1303751021623611), (-4.794528007507324, 1.0796546787023544), (-4.928571701049805, 1.0485481023788452), (-5.067189455032349, 1.0454987287521362), (-5.209157705307007, 1.0622868537902832), (-5.3572704792022705, 1.1097516119480133), (-5.516993522644043, 1.182623952627182), (-5.698852062225342, 1.298611044883728), (-5.934061527252197, 1.4790048897266388), (-6.1242475509643555, 1.6191474497318268), (-6.298676490783691, 1.7270694971084595), (-6.4555253982543945, 1.8048919439315796), (-6.603607654571533, 1.8571669459342957), (-6.74675440788269, 1.8865530490875244), (-6.886257648468018, 1.8930449485778809), (-7.022120952606201, 1.8779180645942688), (-7.153216600418091, 1.8405635356903076), (-7.278866529464722, 1.7810590267181396), (-7.400071859359741, 1.7009375095367432), (-7.516464948654175, 1.5975424647331238), (-7.629418134689331, 1.4696255028247833), (-7.7278151512146, 1.3294013738632202), (-7.829723834991455, 1.1528145372867584), (-7.913762092590332, 0.9830333590507507), (-8.009573459625244, 0.7542399913072586), (-8.100673913955688, 0.5238058492541313), (-8.194724082946777, 0.2956051635555923), (-8.298619985580444, 0.07173889875411987), (-8.409120082855225, -0.14898010343313217), (-8.518901824951172, -0.37005828879773617), (-8.628087759017944, -0.5914282500743866), (-8.738917350769043, -0.8120833486318588), (-8.85364580154419, -1.030766874551773), (-8.974236488342285, -1.2464678883552551), (-9.095809936523438, -1.4617609977722168), (-9.19411325454712, -1.6873124837875366), (-9.260026454925537, -1.923838555812834), (-9.296059608459473, -2.1673964262008685), (-9.310271263122559, -2.3729485273361206), (-9.307019233703613, -2.5788949728012085), (-9.290980815887451, -2.7426079511642456), (-9.265025615692139, -2.8841819763183594), (-9.2300705909729, -3.0132004022598267), (-9.183152675628662, -3.133615493774414), (-9.124776363372803, -3.247638463973999), (-9.051392555236816, -3.3558889627456665), (-8.963252067565918, -3.4617279767990112), (-8.853052616119385, -3.568813443183899), (-8.713794708251953, -3.6908544301986694), (-8.531688451766968, -3.8569865226745605), (-8.372043132781982, -4.04205048084259), (-8.238671064376831, -4.246592402458191), (-8.132850885391235, -4.46875), (-8.050578594207764, -4.658127784729004), (-7.972878694534302, -4.8272175788879395), (-7.896465539932251, -4.974606990814209), (-7.814936876296997, -5.1023054122924805), (-7.726712942123413, -5.216485977172852), (-7.62753963470459, -5.320809602737427), (-7.519604921340942, -5.416505336761475), (-7.397137641906738, -5.513267993927002), (-7.2504494190216064, -5.617475509643555), (-7.048876523971558, -5.7599194049835205), (-6.850481033325195, -5.906782627105713), (-6.649286508560181, -6.049717426300049), (-6.435657024383545, -6.176938056945801), (-6.208886384963989, -6.273805379867554), (-6.029123067855835, -6.317962884902954), (-5.883344888687134, -6.333068609237671), (-5.736926555633545, -6.328474521636963), (-5.607994794845581, -6.307444095611572), (-5.4852294921875, -6.275675535202026), (-5.369054794311523, -6.229165315628052), (-5.259897470474243, -6.167568922042847), (-5.158555507659912, -6.093203544616699), (-5.06749701499939, -6.0031139850616455), (-4.98659610748291, -5.899354457855225), (-4.919483661651611, -5.778003454208374), (-4.865926027297974, -5.6392738819122314), (-4.831943511962891, -5.477758169174194), (-4.818969011306763, -5.292669057846069), (-4.83566951751709, -5.085668325424194), (-4.8848326206207275, -4.842398643493652), (-4.951838493347168, -4.603148460388184), (-5.025342702865601, -4.3662941455841064), (-5.088326930999756, -4.127242565155029), (-5.130080938339233, -3.8840184211730957), (-5.155987977981567, -3.6385674476623535), (-5.1728644371032715, -3.392338514328003), (-5.181535005569458, -3.145156502723694), (-5.182666540145874, -2.895782470703125), (-5.170444965362549, -2.64713454246521), (-5.141123056411743, -2.4016629457473755), (-5.096296548843384, -2.20236998796463), (-5.0260045528411865, -2.012779474258423), (-4.9460461139678955, -1.872079074382782), (-4.856797933578491, -1.7525565028190613), (-4.770139455795288, -1.6644110083580017), (-4.670130968093872, -1.594367653131485), (-4.556408405303955, -1.5389962196350098), (-4.431985139846802, -1.4982959032058716), (-4.2897820472717285, -1.479777216911316), (-4.132490396499634, -1.4801546037197113), (-3.9508520364761353, -1.5120134055614471), (-3.744681477546692, -1.5708953142166138), (-3.513489007949829, -1.6688810586929321), (-3.2873870134353638, -1.7803704738616943), (-3.0659879446029663, -1.8896265029907227), (-2.8428244590759277, -1.9949644804000854), (-2.619845986366272, -2.1009715795516968), (-2.404729962348938, -2.2159374952316284), (-2.1922755241394043, -2.333102524280548), (-1.982692539691925, -2.448066473007202), (-1.7868344187736511, -2.570488452911377), (-1.6202459931373596, -2.705993890762329), (-1.493972837924955, -2.8402489423751858), (-1.3763171434402466, -2.983103036880493), (-1.262414127588272, -3.129210948944092), (-1.1287773251533508, -3.2925894260406494), (-0.9973797798156754, -3.4333264827728254), (-0.8654543161392212, -3.549507975578308), (-0.7281691133975983, -3.6452596187591553), (-0.5785466134548187, -3.722331166267395), (-0.4112188443541527, -3.783251166343689), (-0.2231699451804161, -3.8259259462356567), (-0.031099549029022455, -3.849012613296509), (0.21535785496234894, -3.8624050617218018), (0.4619779586791992, -3.8575409650802612), (0.7058971971273422, -3.8195680379867554), (0.8649654388427734, -3.768284559249878), (1.0095209777355194, -3.70067298412323), (1.1387284696102142, -3.6170005798339844), (1.259017288684845, -3.5165555477142334), (1.3745339214801788, -3.38874351978302), (1.4733079373836517, -3.2470074892044067), (1.5934688746929169, -3.03063702583313), (1.714346468448639, -2.814239501953125), (1.8256549835205078, -2.6380715370178223), (1.9424825310707092, -2.492061972618103), (2.062934994697571, -2.374499499797821), (2.1918880343437195, -2.284302532672882), (2.333604574203491, -2.213764488697052), (2.4990124702453613, -2.1619025468826294), (2.7072635889053345, -2.129780948162079), (2.9189255237579346, -2.1197924613952637), (3.125335931777954, -2.119749963283539), (3.3301249742507935, -2.1389089822769165), (3.4910188913345337, -2.1746585369110107), (3.6277875900268555, -2.2239180207252502), (3.7511314153671265, -2.283437490463257), (3.866363048553467, -2.3557459712028503), (3.979614019393921, -2.439976453781128), (4.0961281061172485, -2.5432949662208557), (4.228511929512024, -2.6745409965515137), (4.399149060249329, -2.85362708568573), (4.571305990219116, -3.0310670137405396), (4.754154920578003, -3.1967769861221313), (4.921145915985107, -3.316844940185547), (5.083767414093018, -3.4044893980026245), (5.236791610717773, -3.4653135538101196), (5.381695032119751, -3.5020899772644043), (5.52078104019165, -3.5222115516662598), (5.661412477493286, -3.5291489362716675), (5.908102512359619, -3.5241494178771973), (6.154648065567017, -3.511675000190735), (6.401369094848633, -3.504562497138977), (6.648145914077759, -3.499761462211609), (6.894919157028198, -3.4936060905456543), (7.110523462295532, -3.4762868881225586), (7.307848691940308, -3.441709518432617), (7.4625279903411865, -3.3953850269317627), (7.5950775146484375, -3.342887043952942), (7.711983919143677, -3.2795250415802), (7.820120096206665, -3.2096660137176514), (7.918773174285889, -3.1298835277557373), (8.011921882629395, -3.043442964553833), (8.097152948379517, -2.9467190504074097), (8.178373098373413, -2.84033203125), (8.25303339958191, -2.717187523841858), (8.327041625976562, -2.570037007331848), (8.400359869003296, -2.3773900270462036), (8.454155683517456, -2.1782084703445435), (8.498638391494751, -1.9351195096969604), (8.513131380081177, -1.690551996231079), (8.500330924987793, -1.4454104900360107), (8.466455698013306, -1.2029719948768616), (8.427306413650513, -0.9607942402362823), (8.390488147735596, -0.7179563343524933), (8.352124691009521, -0.4740799069404602), (8.313992977142334, -0.23028354346752167), (8.276074409484863, 0.013493970036506653), (8.238128423690796, 0.2572329491376877), (8.200194120407104, 0.5009891092777252), (8.162255764007568, 0.7447387278079987), (8.12431812286377, 0.9884904026985168)] |
import pandas as pd
import numpy as np
import networkx as nx
import json, pytz, os
from collections import Counter
from tqdm import tqdm
from .tennis_utils import *
from .handler_utils import *
TIMEZONE = {
"rg17": pytz.timezone('Europe/Paris'),
"uo17": pytz.timezone('America/New_York')
}
QUALIFIER_START = {
"rg17": 1495576800, # 2017-05-24 0:00 Paris (2017-05-22 and 2017-05-23 is missing from data)
"uo17": 1503374400 # 2017-08-22 0:00 New York
}
TOURNAMENT_START = {
"rg17": 1495922400, # 2017-05-28 0:00 Paris
"uo17": 1503892800 # 2017-08-28 0:00 New York
}
DATES_WITH_QUALIFIERS = {
"rg17": ["2017-05-%.2i" % i for i in range(24,32)] + ["2017-06-%.2i" % i for i in range(1,12)],
"uo17": ["2017-08-%.2i" % i for i in range(22,32)] + ["2017-09-%.2i" % i for i in range(1,11)]
}
DATES_WITHOUT_QUALIFIERS = {
"rg17": ["2017-05-%.2i" % i for i in range(28,32)] + ["2017-06-%.2i" % i for i in range(1,12)],
"uo17": ["2017-08-%.2i" % i for i in range(28,32)] + ["2017-09-%.2i" % i for i in range(1,11)]
}
DATES_WITH_NO_GAMES = {
"rg17": ["2017-05-27"],
"uo17": ["2017-08-26","2017-08-27"]
}
class TennisDataHandler():
def __init__(self, data_dir, data_id, include_qualifiers=True, verbose=False):
self.verbose = verbose
self.data_id = data_id
self.data_dir = data_dir + "/" + data_id
if not os.path.exists(self.data_dir):
bashCommand = """mkdir -p %s; cd %s; wget https://dms.sztaki.hu/~fberes/tennis/%s.zip; unzip %s.zip""" % (data_dir, data_dir, data_id, data_id)
print(bashCommand)
print("Downloading data from 'https://dms.sztaki.hu/~fberes/tennis' STARTED...")
os.system(bashCommand)
print("Data was DOWNLOADED!")
self.include_qualifiers = include_qualifiers
self._load_files(self.data_id, self.data_dir)
self._filter_data()
self._extract_mappings()
self.weighted_edges, self.weighted_edges_grouped, self.edges_grouped = prepare_edges(self.mentions, "date")
#self._prepare_edges()
self.daily_p_dict, self.daily_p_df = extract_daily_players(self.schedule, self.player_accounts)
def _load_files(self, data_id, data_dir):
mention_file_path = "%s/%s_mentions_with_names.csv" % (data_dir, data_id)
tennis_match_file_path = "%s/%s_schedule.csv" % (data_dir, data_id)
player_assigments_path = "%s/%s_player_accounts.json" % (data_dir, data_id)
mentions = pd.read_csv(mention_file_path, sep="|")
mentions = mentions[["epoch","src","trg","src_screen_str", "trg_screen_str"]].sort_values("epoch")
self.mentions = mentions
if self.verbose:
print("\n### Load Twitter mentions ###")
print(self.mentions.head(3))
sep = "|" if data_id == "rg17" else ";"
self.schedule = pd.read_csv(tennis_match_file_path, sep=sep)
if self.verbose:
print("\n### Load event schedule ###")
print(self.schedule.head(3))
with open(player_assigments_path) as f:
self.player_accounts = json.load(f)
if self.verbose:
print("\n### Load player accounts ###")
print("Rafael Nadal accounts:", self.player_accounts["Rafael Nadal"])
if self.verbose:
print("Done")
def _filter_data(self):
if self.include_qualifiers:
self.start_time = QUALIFIER_START[self.data_id]
self.dates = DATES_WITH_QUALIFIERS[self.data_id]
else:
self.start_time = TOURNAMENT_START[self.data_id]
self.dates = DATES_WITHOUT_QUALIFIERS[self.data_id]
self.end_time = self.start_time + 86400 * len(self.dates)
self.dates_with_no_games = DATES_WITH_NO_GAMES[self.data_id]
if self.verbose:
print("\n### Filter data ###")
print("Start time:", self.start_time)
print("End time:", self.end_time)
print("Number of days:", len(self.dates))
print("Dates:", self.dates)
print("Dates with no games:", self.dates_with_no_games)
mentions = self.mentions
mentions = mentions[(mentions["epoch"] >= self.start_time) & (mentions["epoch"] <= self.end_time)]
mentions = mentions.assign(date=mentions["epoch"].apply(lambda x: epoch2date(x, TIMEZONE[self.data_id])))
self.number_of_edges = len(mentions)
self.number_of_nodes = len(set(mentions["src"]).union(set(mentions["trg"])))
self.mentions = mentions
if self.verbose:
print("Number of mentions (edges):", self.number_of_edges)
print("Number of accounts (nodes):", self.number_of_nodes)
#print("Min epoch:", mentions["epoch"].min(), "Max epoch:", mentions["epoch"].max())
def _extract_mappings(self):
# account to id
mentions = self.mentions
targets = list(zip(mentions["trg_screen_str"], mentions["trg"]))
sources = list(zip(mentions["src_screen_str"], mentions["src"]))
self.account_to_id = dict(sources+targets)
#print(len(self.account_to_id))
#self.id_to_account = dict(zip(self.account_to_id.values(), self.account_to_id.keys()))
rev_targets = list(zip(mentions["trg"],mentions["trg_screen_str"]))
rev_sources = list(zip(mentions["src"],mentions["src_screen_str"]))
self.id_to_account = dict(rev_sources+rev_targets)
nodes = list(self.account_to_id.values())
# tennis account to player
tennis_account_to_player = {}
alternative_players = {}
alternative_players["uo17"] = {
"Carla Suarez Navarro":"Carla Suárez Navarro",
"Coco Vandeweghe":"CoCo Vandeweghe",
"Juan Martin Del Potro":"Juan Martin del Potro",
"Diede De Groot":"Diede de Groot",
"Mariana Duque-Marino":"Mariana Duque-Mariño",
"Alex De Minaur":"Alex de Minaur",
"Tracy Austin-Holt":"Tracy Austin"
}
# reverse alternative name mapping for rg17
alternative_players["rg17"] = dict(zip(alternative_players["uo17"].values(),alternative_players["uo17"].keys()))
for p, account_names in self.player_accounts.items():
cleaned_p = alternative_players[self.data_id].get(p, p)
for a_name in account_names:
tennis_account_to_player[a_name] = cleaned_p
self.tennis_account_to_player = tennis_account_to_player
def summary(self):
"""Show the data summary"""
return {
"data_id":self.data_id,
"include_qualifiers": self.include_qualifiers,
"dates": self.dates,
"dates_with_no_game": self.dates_with_no_games,
"start_time": self.start_time,
"end_time": self.end_time,
"number_of_edges": self.number_of_edges,
"number_of_nodes": self.number_of_nodes
}
def visualize(self, kind="graph", figsize=(12,8)):
"""Visualize the data. Choose from 'graph' and 'players' options for the 'kind' argument."""
fig = None
if kind == "graph":
fig = visu_graph(self, figsize)
elif kind == "players":
fig = visu_players(self, figsize)
else:
raise RuntimeError("Choose 'kind' parameter from 'players' or 'graph'!")
return fig
def get_daily_players(self, date_id):
"""Get daily tennis players"""
if not date_id in self.dates:
raise RuntimeError("Invalid date_id! Not present in collected dates:", self.dates)
elif date_id in self.dates_with_no_games:
raise RuntimeError("There was no game on this day!")
else:
return self.daily_p_dict[date_id]
def show_daily_players(self):
"""Show daily information about tennis players"""
return self.daily_p_df[self.daily_p_df["date"].isin(self.dates)]
def get_daily_relevance_labels(self, binary=True):
if binary:
label_value_dict = {"current":1.0, "previous":0.0, "next":0.0}
else:
label_value_dict = {"current":2.0, "previous":1.0, "next":1.0}
daily_found_player_dict = dict(zip(self.daily_p_df["date"], self.daily_p_df["found_players"]))
for d in self.dates_with_no_games:
daily_found_player_dict[d] = []
mapper_dicts = (self.tennis_account_to_player, self.account_to_id, daily_found_player_dict)
daily_label_dicts = get_daily_label_dicts(label_value_dict, self.dates, self.mentions, mapper_dicts, self.verbose)
return daily_label_dicts
def export_relevance_labels(self, output_dir, binary=True, only_pos_label=False):
"""Export label files for each date. Use 'only_pos_label=True' if you want to export only the relevant nodes per day."""
daily_label_dicts = self.get_daily_relevance_labels(binary)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("%s folder was created." % output_dir)
with open("%s/summary.json" % output_dir, 'w') as f:
json.dump(self.summary(), f, indent=" ", sort_keys=False)
#pd.DataFrame(list(self.account_to_id.items())).sort_values(0).to_csv("%s/account_to_id.csv" % output_dir, index=False)
#pd.DataFrame(list(self.tennis_account_to_player.items())).sort_values(0).to_csv("%s/tennis_account_to_player.csv" % output_dir, index=False)
print("Exporting files STARTED")
for i, date in enumerate(self.dates):
sorted_user_labels = []
for u in sorted(daily_label_dicts[date].keys()):
label_value = daily_label_dicts[date][u]
if only_pos_label:
# export only positive user labels
if label_value > 0.0:
sorted_user_labels.append((u, label_value))
else:
sorted_user_labels.append((u, label_value))
print(date, len(sorted_user_labels))
scores2file(sorted_user_labels,"%s/labels_%i.csv" % (output_dir, i))
print("Exporting files DONE")
def export_edges(self, output_dir, sep="|"):
"""Export edges (mentions) into file. Only time and node identifiers will be expoerted!"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("%s folder was created." % output_dir)
with open("%s/summary.json" % output_dir, 'w') as f:
json.dump(self.summary(), f, indent=" ", sort_keys=False)
self.mentions[["epoch","src","trg"]].to_csv("%s/edges.csv" % output_dir, index=False, header=False, sep=sep)
def get_account_recoder(self, k=None, src_col="src_screen_str", trg_col="trg_screen_str", exclude_final_day=True):
mentions = self.mentions.copy()
enabled_dates = self.dates.copy()
if exclude_final_day:
enabled_dates = enabled_dates[:-1]
mentions = mentions[mentions["date"].isin(enabled_dates)]
mention_activity = list(mentions[src_col]) + list(mentions[trg_col])
cnt = Counter(mention_activity)
if k == None:
accounts, counts = zip(*cnt.most_common())
else:
accounts, counts = zip(*cnt.most_common(k))
node_mapping = dict(zip(accounts,range(len(accounts))))
return node_mapping
def _get_snapshot_edges(self, snapshot_id, grouped_data, edge_type="temporal", account_to_index=None):
edges_grouped, weighted_edges_grouped = grouped_data
snap_edges = []
if edge_type == "temporal":
df = edges_grouped[snapshot_id]
src, trg = reindex_edges(df, self.id_to_account, account_to_index)
weights = list(np.ones(len(df)))
else:
df = weighted_edges_grouped[snapshot_id]
src, trg = reindex_edges(df, self.id_to_account, account_to_index)
if edge_type == "weighted":
weights = list(df["weight"])
else:
weights = list(np.ones(len(df)))
snap_edges = list(zip(src, trg))
weights = weights[:len(snap_edges)]
G = nx.Graph()
G.add_edges_from(snap_edges)
if account_to_index == None:
X = calculate_node_features(G, None)
else:
X = calculate_node_features(G, len(account_to_index))
return snap_edges, weights, X
def extract_snapshots(self, delta_t):
start_epoch = self.start_time
days = len(self.dates)
to_epoch = start_epoch+days*86400+delta_t
splits=list(range(start_epoch,to_epoch,delta_t))
mentions = self.mentions.copy()
mentions = mentions[mentions["date"].isin(self.dates)]
epochs = np.array(mentions["epoch"])
snapshots_ids = pd.cut(epochs, splits, right=False, labels=range(len(splits)-1))
mentions["snapshot_id"] = snapshots_ids
return mentions
def get_data(self, binary_label=True, edge_type="weighted", max_snapshot_idx=None, top_k_nodes=None):
snapshots = self.dates
labels = self.get_daily_relevance_labels(binary=binary_label)
grouped_data = (self.edges_grouped, self.weighted_edges_grouped)
return self._prepare_json_data(snapshots, self.mentions, grouped_data, labels, edge_type, max_snapshot_idx, top_k_nodes)
def get_regression_data(self, delta_t=3*3600, edge_type="weighted", max_snapshot_idx=None, top_k_nodes=None):
mentions = self.extract_snapshots(delta_t)
snapshots = sorted(list(mentions["snapshot_id"].unique()))
labels = regression_labels(mentions, "snapshot_id")
weighted_edges, weighted_edges_grouped, edges_grouped = prepare_edges(mentions, "snapshot_id")
grouped_data = (edges_grouped, weighted_edges_grouped)
return self._prepare_json_data(snapshots, mentions, grouped_data, labels, edge_type, max_snapshot_idx, top_k_nodes)
def _prepare_json_data(self, snapshots, mentions, grouped_data, labels, edge_type, max_snapshot_idx, top_k_nodes):
snaps = snapshots.copy()
account_to_index = self.get_account_recoder(k=top_k_nodes)
data = {}
idx = 0
if max_snapshot_idx != None:
snaps = snaps[:max_snapshot_idx]
for idx, snapshot_id in tqdm(enumerate(snaps)):
edges, weights, X = self._get_snapshot_edges(snapshot_id, grouped_data, edge_type, account_to_index)
X = list([X[node] for node in range(len(account_to_index))])
X = X[:len(account_to_index)]
y = reindex_labels(labels[snapshot_id], self.id_to_account, account_to_index)
y = list([y.get(node,0) for node in range(len(account_to_index))])
y = y[:len(account_to_index)]
data[str(idx)] = {
"index":idx,
#"date":date,
"edges": edges,
"weights": weights,
"y": y,
"X": X,
}
#if self.include_qualifiers:
# data[str(idx)]["game_day"] = not date in self.dates_with_no_games
idx += 1
data["time_periods"] = len(data)
data["node_ids"] = account_to_index
return data
def to_json(self, path, task="classification", delta_t=3*3600, edge_type="weighted", max_snapshot_idx=None, top_k_nodes=None):
if task == "classification":
print("Preparing classification data...")
data = self.get_data(True, edge_type, max_snapshot_idx, top_k_nodes)
else:
print("Preparing regression data...")
data = self.get_regression_data(delta_t, edge_type, max_snapshot_idx, top_k_nodes)
with open(path, 'w') as f:
json.dump(data, f)
print("done")
|
# This script solves Problem #1 of Section 3's exercises in the TFC book
# Updated: 18 Mar 2021
####################################################################################################
# Differential Equation
# yₓₓ + w^2 y = 0 where w is the period
#
# subject to: y(0) = 1, yₓ(0) = 0
####################################################################################################
from tfc import utfc
from tfc.utils import LsClass, egrad, MakePlot
from jax import jit
import jax.numpy as np
import numpy as onp
import tqdm
####################################################################################################
## user defined parameters: ************************************************************************
N = 100 # number of discretization points per TFC step
m = 40 # number of basis function terms
basis = 'CP' # basis function type
xspan = [0., 2.] # time range of problem
Nstep = int(xspan[1]/2) # number of TFC steps
y0 = 1. # y(x0) = 1
y0p = 0. # yₓ(x0) = 0
w = 2.*np.pi
## problem initial conditions: *********************************************************************
if basis == 'CP' or 'LeP':
nC = 2
elif basis == 'FS':
nC = 1
else:
nC = 0
# number of constraints
# length of time for one TFC step
xstep = (xspan[1]-xspan[0])/Nstep
# !!! since this differential equation is not a explicit function of position 'x', I can get
# away with contructing the tfc class such that x = [0, xstep] an imposing a constant step so
# that the mapping parameter c = (zf-z0)/(xf-x0) is also constant
## construct univariate tfc class: *****************************************************************
tfc = utfc(N+1, nC, int(m+1), basis = basis, x0=0, xf=xstep)
x = tfc.x
# !!! notice I am using N+1 for the number of points. this is because I will be using the last point
# of a segment 'n' for the initial conditons of the 'n+1' segment
H = tfc.H
dH = tfc.dH
H0 = H(x[0])
H0p = dH(x[0])
## define tfc constrained expression and derivatives: **********************************************
# switching function
phi1 = lambda x: np.ones_like(x)
phi2 = lambda x: x
# tfc constrained expression
y = lambda x,xi,IC: np.dot(H(x),xi) + phi1(x)*(IC['y0'] - np.dot(H0,xi)) \
+ phi2(x)*(IC['y0p'] - np.dot(H0p,xi))
# !!! notice here that the initial conditions are passed as a dictionary (i.e. IC['y0'])
# this will be important so that the least-squares does not need to be re-JITed
yp = egrad(y)
ypp = egrad(yp)
## define the loss function: ***********************************************************************
# yₓₓ + w^2 y = 0
L = jit(lambda xi,IC: ypp(x,xi,IC) + w**2*y(x,xi,IC))
## construct the least-squares class: **************************************************************
xi0 = np.zeros(H(x).shape[1])
IC = {'y0': np.array([y0]), 'y0p': np.array([y0p])}
ls = LsClass(xi0,L,timer=True)
## initialize dictionary to record solution: *******************************************************
xSol = onp.zeros((Nstep,N))
ySol = onp.zeros_like(xSol)
res = onp.zeros_like(xSol)
err = onp.zeros_like(xSol)
time = onp.zeros(Nstep)
xSol[0,:] = x[:-1]
xFinal = x[-1]
## 'propagation' loop: *****************************************************************************
for i in tqdm.trange(Nstep):
xi, time[i] = ls.run(xi0,IC)
# print solution to dictionary
if i > 0:
xSol[i,:] = xFinal + x[:-1]
xFinal += x[-1]
# save solution to python dictionary
ySol[i,:] = y(x,xi,IC)[:-1]
res[i,:] = np.abs(L(xi,IC))[:-1]
# update initial condtions
IC['y0'] = y(x,xi,IC)[-1]
IC['y0p'] = yp(x,xi,IC)[-1]
## compute the error: ******************************************************************************
A = np.sqrt(y0**2 + (y0p/w)**2)
Phi = np.arctan(-y0p/w/y0)
yTrue = A*np.cos(w*xSol+Phi)
err = np.abs(ySol-yTrue)
## print status of run: ****************************************************************************
print('TFC least-squares time[s]: ' +'\t'+ str((time.sum())))
print('Max residual:' +'\t'*3+ str(res.max()))
print('Max error:' +'\t'*3+ str(err.max()))
## plotting: ***************************************************************************************
# figure 1: solution
p1 = MakePlot(r'$x$',r'$y(t)$')
p1.ax[0].plot(xSol.flatten(),ySol.flatten())
p1.ax[0].grid(True)
p1.PartScreen(7.,6.)
p1.show()
# figure 2: residual
p2 = MakePlot(r'$t$',r'$|L(\xi)|$')
p2.ax[0].plot(xSol.flatten(),res.flatten(),'*')
p2.ax[0].grid(True)
p2.ax[0].set_yscale('log')
p2.PartScreen(7.,6.)
p2.show()
# figure 3: error
p3 = MakePlot(r'$t$',r'$|y_{true} - y(t)|$')
p3.ax[0].plot(xSol.flatten(),err.flatten(),'*')
p3.ax[0].grid(True)
p3.ax[0].set_yscale('log')
p3.PartScreen(7.,6.)
p3.show()
|
""" Module containing helper routines for using Keras and Tensorflow models
"""
from __future__ import annotations
import functools
import os
from typing import TYPE_CHECKING
import numpy as np
import requests
import grpc
import tensorflow as tf
from google.protobuf.json_format import MessageToDict
from tensorflow_serving.apis import (
predict_pb2,
get_model_metadata_pb2,
prediction_service_pb2_grpc,
)
from tensorflow.keras.metrics import top_k_categorical_accuracy
from tensorflow.keras.models import load_model as load_keras_model
from aizynthfinder.utils.logging import logger
from aizynthfinder.utils.exceptions import ExternalModelAPIError
if TYPE_CHECKING:
from aizynthfinder.utils.type_utils import Any, Union, Callable, List
_ModelInput = Union[np.ndarray, List[np.ndarray]]
top10_acc = functools.partial(top_k_categorical_accuracy, k=10)
top10_acc.__name__ = "top10_acc" # type: ignore
top50_acc = functools.partial(top_k_categorical_accuracy, k=50)
top50_acc.__name__ = "top50_acc" # type: ignore
CUSTOM_OBJECTS = {"top10_acc": top10_acc, "top50_acc": top50_acc, "tf": tf}
_logger = logger()
TF_SERVING_HOST = os.environ.get("TF_SERVING_HOST")
TF_SERVING_REST_PORT = os.environ.get("TF_SERVING_REST_PORT")
TF_SERVING_GRPC_PORT = os.environ.get("TF_SERVING_GRPC_PORT")
def load_model(
source: str, key: str, use_remote_models: bool
) -> Union["LocalKerasModel", "ExternalModelViaGRPC", "ExternalModelViaREST"]:
"""
Load model from a configuration specification.
If `use_remote_models` is True, tries to load:
1. A Tensorflow server through gRPC
2. A Tensorflow server through REST API
3. A local model
otherwise it just loads the local model
:param source: if fallbacks to a local model, this is the filename
:param key: when connecting to Tensrflow server this is the model name
:param use_remote_models: if True will try to connect to remote model server
:return: a model object with a predict object
"""
if not use_remote_models:
return LocalKerasModel(source)
try:
return ExternalModelViaGRPC(key)
except ExternalModelAPIError:
pass
try:
return ExternalModelViaREST(key)
except ExternalModelAPIError:
pass
return LocalKerasModel(source)
class LocalKerasModel:
"""
A keras policy model that is executed locally.
The size of the input vector can be determined with the len() method.
:ivar model: the compiled model
:ivar output_size: the length of the output vector
:param filename: the path to a Keras checkpoint file
"""
def __init__(self, filename: str) -> None:
self.model = load_keras_model(filename, custom_objects=CUSTOM_OBJECTS)
try:
self._model_dimensions = int(self.model.input.shape[1])
except AttributeError:
self._model_dimensions = int(self.model.input[0].shape[1])
self.output_size = int(self.model.output.shape[1])
def __len__(self) -> int:
return self._model_dimensions
def predict(self, *args: np.ndarray, **_: np.ndarray) -> np.ndarray:
"""
Perform a forward pass of the neural network.
:param args: the input vectors
:return: the vector of the output layer
"""
return self.model.predict(args)
def _log_and_reraise_exceptions(method: Callable) -> Callable:
@functools.wraps(method)
def wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
except Exception as err:
msg = "Error when requesting from tensorflow model API"
_logger.error("%s: %s", msg, err)
raise ExternalModelAPIError(msg)
return wrapper
class ExternalModelViaREST:
"""
A neural network model implementation using TF Serving via REST API.
:param name: the name of model
"""
def __init__(self, name: str) -> None:
self._model_url = self._get_model_url(name)
self._sig_def = self._get_sig_def()
def __len__(self) -> int:
first_input_name = list(self._sig_def["inputs"].keys())[0]
return int(
self._sig_def["inputs"][first_input_name]["tensor_shape"]["dim"][1]["size"]
)
def predict(self, *args: np.ndarray, **kwargs: np.ndarray) -> np.ndarray:
"""
Get prediction from model.
If the keys in `kwargs` agree with the model input names, they
will be used in stead of `arg`
:param args: the input vectors
:param kwargs: the named input vectors
:return: the vector of the output layer
"""
url = self._model_url + ":predict"
res = self._handle_rest_api_request(
"POST", url, json=self._make_payload(*args, **kwargs)
)
return np.asarray(res["outputs"])
def _get_sig_def(self) -> dict:
res = self._handle_rest_api_request("GET", self._model_url + "/metadata")
return res["metadata"]["signature_def"]["signature_def"]["serving_default"]
# pylint: disable=no-self-use
@_log_and_reraise_exceptions
def _handle_rest_api_request(
self, method: str, url: str, *args: Any, **kwargs: Any
) -> dict:
res = requests.request(method, url, *args, **kwargs)
if res.status_code != 200 or (
res.headers["Content-Type"] != "application/json"
):
raise ExternalModelAPIError(
f"Unexpected response from REST API: {res.status_code}\n{res.text}"
)
return res.json()
def _make_payload(self, *args: np.ndarray, **kwargs: np.ndarray) -> dict:
if all(key in kwargs for key in self._sig_def["inputs"].keys()):
data = {key: kwargs[key].tolist() for key in self._sig_def["inputs"].keys()}
else:
data = {
name: fp.tolist()
for name, fp in zip(self._sig_def["inputs"].keys(), args)
}
return {"inputs": data}
@staticmethod
def _get_model_url(name: str) -> str:
warning = f"Failed to get url of REST service for external model {name}"
if not TF_SERVING_HOST:
_logger.warning(warning)
raise ExternalModelAPIError("Host not set for model {name}")
if not TF_SERVING_REST_PORT:
_logger.warning(warning)
raise ExternalModelAPIError("REST port not set for model {name}")
return f"http://{TF_SERVING_HOST}:{TF_SERVING_REST_PORT}/v1/models/{name}"
class ExternalModelViaGRPC:
"""
A neural network model implementation using TF Serving via gRPC.
:param name: the name of model
"""
def __init__(self, name: str) -> None:
self._server = self._get_server(name)
self._model_name = name
self._sig_def = self._get_sig_def()
def __len__(self) -> int:
first_input_name = list(self._sig_def["inputs"].keys())[0]
return int(
self._sig_def["inputs"][first_input_name]["tensorShape"]["dim"][1]["size"]
)
@_log_and_reraise_exceptions
def predict(self, *args: np.ndarray, **kwargs: np.ndarray) -> np.ndarray:
"""
Get prediction from model.
If the keys in `kwargs` agree with the model input names, they
will be used in stead of `arg`
:param args: the input vectors
:param kwargs: the named input vectors
:return: the vector of the output layer
"""
input_tensors = self._make_payload(*args, **kwargs)
channel = grpc.insecure_channel(self._server)
service = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = self._model_name
for name, tensor in input_tensors.items():
request.inputs[name].CopyFrom(tensor)
key = list(self._sig_def["outputs"].keys())[0]
return tf.make_ndarray(service.Predict(request, 10.0).outputs[key])
@_log_and_reraise_exceptions
def _get_sig_def(self) -> dict:
channel = grpc.insecure_channel(self._server)
service = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = get_model_metadata_pb2.GetModelMetadataRequest()
request.model_spec.name = self._model_name
request.metadata_field.append("signature_def")
result = MessageToDict(service.GetModelMetadata(request, 10.0))
# close the channel so that it won't be reused after fork and fail
channel.close()
return result["metadata"]["signature_def"]["signatureDef"]["serving_default"]
def _make_payload(self, *args: np.ndarray, **kwargs: np.ndarray) -> dict:
if all(key in kwargs for key in self._sig_def["inputs"].keys()):
inputs = kwargs
else:
inputs = dict(zip(self._sig_def["inputs"].keys(), args))
tensors = {}
for name, vec in inputs.items():
size = int(self._sig_def["inputs"][name]["tensorShape"]["dim"][1]["size"])
tensors[name] = tf.make_tensor_proto(vec, dtype=np.float32, shape=(1, size))
return tensors
@staticmethod
def _get_server(name: str) -> str:
warning = f"Failed to get gRPC server for external model {name}"
if not TF_SERVING_HOST:
_logger.warning(warning)
raise ExternalModelAPIError(f"Host not set for model {name}")
if not TF_SERVING_GRPC_PORT:
_logger.warning(warning)
raise ExternalModelAPIError(f"GRPC port not set for model {name}")
return f"{TF_SERVING_HOST}:{TF_SERVING_GRPC_PORT}"
|
import ply.lex as lex
from .errors import LexicographicError
def find_column(code, token):
line_start = code.rfind('\n', 0, token.lexpos) + 1
return token.lexpos - line_start + 1
###### TOKEN LISTS ######
reserved = [
'CLASS',
'INHERITS',
'IF',
'THEN',
'ELSE',
'FI',
'WHILE',
'LOOP',
'POOL',
'LET',
'IN',
'CASE',
'OF',
'ESAC',
'NEW',
'ISVOID',
]
tokens = [
# Literals
'LIT',
# Identifiers
'TYPE', 'ID',
# Primitive data types
'INTEGER', 'STRING', 'BOOL',
# Special keywords
'ACTION',
# Operators
'ASSIGN', 'LESS', 'LESSEQUAL', 'EQUAL', 'INT_COMPLEMENT', 'NOT',
] + reserved
# Ignore rule for single line comments
# t_ignore_SINGLE_LINE_COMMENT = r"\-\-[^\n]*"
def t_SINGLE_LINE_COMMENT(token):
r"\-\-[^\n]*"
token.lexer.lineno += 1
token.lexer.skip(1)
###### TOKEN RULES ######
# Operators
t_LIT = r"[\+\-\*\/\:\;\(\)\{\}\@\.\,]"
t_ASSIGN = r'<-'
t_LESS = r'<'
t_LESSEQUAL = r'<='
t_EQUAL = r'='
t_INT_COMPLEMENT = r'~'
# Special keywords
t_ACTION = r'=>'
# Primitive data types
def t_INTEGER(t):
r"[0-9]+"
t.value = int(t.value)
return t
def t_BOOL(t):
r"t[rR][uU][eE]|f[aA][lL][sS][eE]"
t.value = True if t.value.lower() == 'true' else False
return t
# Other tokens with precedence before TYPE and ID
def t_NOT(t):
r"[nN][oO][tT]"
return t
# Identifiers
def check_RESERVED(t):
tupper = t.value.upper()
if tupper in reserved:
t.type = tupper
def t_TYPE(t):
r"[A-Z][A-Za-z0-9_]*"
check_RESERVED(t)
return t
def t_ID(t):
r"[a-z][A-Za-z0-9_]*"
check_RESERVED(t)
return t
def t_newline(token):
r"\n+"
token.lexer.lineno += len(token.value)
t_ignore = ' \t\r\f'
# LEXER STATES
states = (
("STRING", "exclusive"),
("COMMENT", "exclusive")
)
###
# THE STRING STATE
def t_start_string(token):
r"\""
token.lexer.push_state("STRING")
token.lexer.string_backslashed = False
token.lexer.stringbuf = ""
def t_STRING_newline(token):
r"\n"
token.lexer.lineno += 1
if not token.lexer.string_backslashed:
errors.append(LexicographicError((token.lineno, find_column(token.lexer.lexdata, token)), 'Unterminated string constant'))
token.lexer.pop_state()
else:
token.lexer.string_backslashed = False
def t_STRING_null(token):
r"\0"
errors.append(LexicographicError((token.lineno, find_column(token.lexer.lexdata, token)), 'Null character in string'))
token.lexer.skip(1)
def t_STRING_end(token):
r"\""
if not token.lexer.string_backslashed:
token.lexer.pop_state()
token.value = token.lexer.stringbuf
token.type = "STRING"
return token
else:
token.lexer.stringbuf += '"'
token.lexer.string_backslashed = False
def t_STRING_anything(token):
r"[^\n]"
if token.lexer.string_backslashed:
if token.value == 'b':
token.lexer.stringbuf += '\b'
elif token.value == 't':
token.lexer.stringbuf += '\t'
elif token.value == 'n':
token.lexer.stringbuf += '\n'
elif token.value == 'f':
token.lexer.stringbuf += '\f'
elif token.value == '\\':
token.lexer.stringbuf += '\\'
else:
token.lexer.stringbuf += token.value
token.lexer.string_backslashed = False
else:
if token.value != '\\':
token.lexer.stringbuf += token.value
else:
token.lexer.string_backslashed = True
# STRING ignored characters
t_STRING_ignore = ''
# STRING error handler
def t_STRING_error(token):
errors.append(LexicographicError((token.lineno, find_column(token.lexer.lexdata, token)), 'ERROR at or near ' + token.value[:10]))
token.lexer.skip(1)
def t_STRING_eof(token):
errors.append(LexicographicError((token.lineno, find_column(token.lexer.lexdata, token)), 'EOF in string'))
token.lexer.pop_state()
###
# THE COMMENT STATE
def t_start_comment(token):
r"\(\*"
token.lexer.push_state("COMMENT")
token.lexer.comment_count = 0
def t_COMMENT_newline(token):
r"\n+"
token.lexer.lineno += len(token.value)
def t_COMMENT_startanother(t):
r"\(\*"
t.lexer.comment_count += 1
def t_COMMENT_end(token):
r"\*\)"
if token.lexer.comment_count == 0:
token.lexer.pop_state()
else:
token.lexer.comment_count -= 1
# COMMENT ignored characters
t_COMMENT_ignore = ''
# COMMENT error handler
def t_COMMENT_error(token):
token.lexer.skip(1)
def t_COMMENT_eof(token):
errors.append(LexicographicError((token.lineno, find_column(token.lexer.lexdata, token)), 'EOF in comment'))
token.lexer.pop_state()
###### SPECIAL RULES ######
errors = []
def t_error(token):
errors.append(LexicographicError((token.lineno, find_column(token.lexer.lexdata, token)), 'ERROR at ooooor near ' + token.value[:10]))
token.lexer.skip(1)
###### CREATE LEXER ######
lex.lex()
###### TOKENIZER ######
def tokenizer(code):
lex.input(code)
token_list = []
while True:
token = lex.token()
if token is None:
break
token.lexpos = find_column(code, token)
token_list.append(token)
return errors, token_list |
#!/usr/bin/python3
import os
import pickle
from sklearn.cluster import KMeans
from scipy import sparse
import pandas as pd
import matplotlib.pyplot as plt
from yellowbrick.cluster import KElbowVisualizer
def main():
"""
Using k-means for some data exploration and a potential solution for the license prediction problem
"""
os.chdir('../../../all_files_generated')
current_dir = os.getcwd()
data_pickles_dir = os.path.join(current_dir, 'data_pickles')
elbow_method_files_dir = os.path.join(current_dir, 'elbow_method_files')
x_train_path = os.path.join(data_pickles_dir, 'x_train.pickle')
x_validation_path = os.path.join(data_pickles_dir, 'x_validation.pickle')
x_test_path = os.path.join(data_pickles_dir, 'x_test.pickle')
y_train_path = os.path.join(data_pickles_dir, 'y_train.pickle')
y_validation_path = os.path.join(data_pickles_dir, 'y_validation.pickle')
y_test_path = os.path.join(data_pickles_dir, 'y_test.pickle')
# read in all pickle files that may be required
with open(x_train_path, 'rb') as data:
x_train = pickle.load(data)
with open(x_validation_path, 'rb') as data:
x_validation = pickle.load(data)
with open(x_test_path, 'rb') as data:
x_test = pickle.load(data)
with open(y_train_path, 'rb') as data:
y_train = pickle.load(data)
with open(y_validation_path, 'rb') as data:
y_validation = pickle.load(data)
with open(y_test_path, 'rb') as data:
y_test = pickle.load(data)
# combine all datasets
x_train = sparse.vstack((x_train, x_validation, x_test)) # scipy.sparse.csr matrix
y_train = y_train.append(pd.Series(y_validation)) # pandas series
y_train = y_train.append(pd.Series(y_test)) # pandas series
use_yellowbrick = False
if use_yellowbrick:
license_classifier = KMeans()
visualizer = KElbowVisualizer(license_classifier, k=(2, 100))
visualizer.fit(x_train)
visualizer.show()
else:
inertia = []
k = range(2, 100)
for i in k:
license_classifier = KMeans(n_clusters=i)
license_classifier.fit(x_train)
inertia.append(license_classifier.inertia_)
plt.plot(k, inertia)
plt.xlabel('K')
plt.ylabel('Inertia')
plt.title('Elbow Method')
elbow_method_path = os.path.join(elbow_method_files_dir, 'k_means_clustering_elbow_method.png')
plt.savefig(elbow_method_path)
plt.show()
if __name__ == '__main__':
main()
|
import collections
import itertools
import os
import sqlite3
import typing
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusDBBase
from hydrus.core import HydrusExceptions
from hydrus.core.networking import HydrusNetwork
from hydrus.client import ClientFiles
from hydrus.client.db import ClientDBDefinitionsCache
from hydrus.client.db import ClientDBFilesMaintenanceQueue
from hydrus.client.db import ClientDBFilesMetadataBasic
from hydrus.client.db import ClientDBFilesStorage
from hydrus.client.db import ClientDBModule
from hydrus.client.db import ClientDBServices
REPOSITORY_HASH_ID_MAP_PREFIX = 'repository_hash_id_map_'
REPOSITORY_TAG_ID_MAP_PREFIX = 'repository_tag_id_map_'
REPOSITORY_UPDATES_PREFIX = 'repository_updates_'
REPOSITORY_UNREGISTERED_UPDATES_PREFIX = 'repository_unregistered_updates_'
REPOSITORY_UPDATES_PROCESSED_PREFIX = 'repository_updates_processed_'
def GenerateRepositoryDefinitionTableNames( service_id: int ):
suffix = str( service_id )
hash_id_map_table_name = 'external_master.{}{}'.format( REPOSITORY_HASH_ID_MAP_PREFIX, suffix )
tag_id_map_table_name = 'external_master.{}{}'.format( REPOSITORY_TAG_ID_MAP_PREFIX, suffix )
return ( hash_id_map_table_name, tag_id_map_table_name )
def GenerateRepositoryFileDefinitionTableName( service_id: int ):
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryDefinitionTableNames( service_id )
return hash_id_map_table_name
def GenerateRepositoryTagDefinitionTableName( service_id: int ):
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryDefinitionTableNames( service_id )
return tag_id_map_table_name
def GenerateRepositoryUpdatesTableNames( service_id: int ):
repository_updates_table_name = '{}{}'.format( REPOSITORY_UPDATES_PREFIX, service_id )
repository_unregistered_updates_table_name = '{}{}'.format( REPOSITORY_UNREGISTERED_UPDATES_PREFIX, service_id )
repository_updates_processed_table_name = '{}{}'.format( REPOSITORY_UPDATES_PROCESSED_PREFIX, service_id )
return ( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name )
class ClientDBRepositories( ClientDBModule.ClientDBModule ):
def __init__(
self,
cursor: sqlite3.Cursor,
cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper,
modules_services: ClientDBServices.ClientDBMasterServices,
modules_files_storage: ClientDBFilesStorage.ClientDBFilesStorage,
modules_files_metadata_basic: ClientDBFilesMetadataBasic.ClientDBFilesMetadataBasic,
modules_hashes_local_cache: ClientDBDefinitionsCache.ClientDBCacheLocalHashes,
modules_tags_local_cache: ClientDBDefinitionsCache.ClientDBCacheLocalTags,
modules_files_maintenance_queue: ClientDBFilesMaintenanceQueue.ClientDBFilesMaintenanceQueue
):
# since we'll mostly be talking about hashes and tags we don't have locally, I think we shouldn't use the local caches
ClientDBModule.ClientDBModule.__init__( self, 'client repositories', cursor )
self._cursor_transaction_wrapper = cursor_transaction_wrapper
self.modules_services = modules_services
self.modules_files_storage = modules_files_storage
self.modules_files_metadata_basic = modules_files_metadata_basic
self.modules_files_maintenance_queue = modules_files_maintenance_queue
self.modules_hashes_local_cache = modules_hashes_local_cache
self.modules_tags_local_cache = modules_tags_local_cache
self._service_ids_to_content_types_to_outstanding_local_processing = collections.defaultdict( dict )
def _ClearOutstandingWorkCache( self, service_id, content_type = None ):
if service_id not in self._service_ids_to_content_types_to_outstanding_local_processing:
return
if content_type is None:
del self._service_ids_to_content_types_to_outstanding_local_processing[ service_id ]
else:
if content_type in self._service_ids_to_content_types_to_outstanding_local_processing[ service_id ]:
del self._service_ids_to_content_types_to_outstanding_local_processing[ service_id ][ content_type ]
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
index_generation_dict = {}
index_generation_dict[ repository_updates_table_name ] = [
( [ 'hash_id' ], True, 449 )
]
index_generation_dict[ repository_updates_processed_table_name ] = [
( [ 'content_type' ], False, 449 )
]
return index_generation_dict
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryDefinitionTableNames( service_id )
return {
repository_updates_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( update_index INTEGER, hash_id INTEGER, PRIMARY KEY ( update_index, hash_id ) );', 449 ),
repository_unregistered_updates_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER PRIMARY KEY );', 449 ),
repository_updates_processed_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, content_type INTEGER, processed INTEGER_BOOLEAN, PRIMARY KEY ( hash_id, content_type ) );', 449 ),
hash_id_map_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( service_hash_id INTEGER PRIMARY KEY, hash_id INTEGER );', 400 ),
tag_id_map_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( service_tag_id INTEGER PRIMARY KEY, tag_id INTEGER );', 400 )
}
def _GetServiceTablePrefixes( self ):
return {
REPOSITORY_HASH_ID_MAP_PREFIX,
REPOSITORY_TAG_ID_MAP_PREFIX,
REPOSITORY_UPDATES_PREFIX,
REPOSITORY_UNREGISTERED_UPDATES_PREFIX,
REPOSITORY_UPDATES_PROCESSED_PREFIX
}
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
return self.modules_services.GetServiceIds( HC.REPOSITORIES )
def _HandleCriticalRepositoryDefinitionError( self, service_id, name, bad_ids ):
self._ReprocessRepository( service_id, ( HC.CONTENT_TYPE_DEFINITIONS, ) )
self._ScheduleRepositoryUpdateFileMaintenance( service_id, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_INTEGRITY_DATA )
self._ScheduleRepositoryUpdateFileMaintenance( service_id, ClientFiles.REGENERATE_FILE_DATA_JOB_FILE_METADATA )
self._cursor_transaction_wrapper.CommitAndBegin()
message = 'A critical error was discovered with one of your repositories: its definition reference is in an invalid state. Your repository should now be paused, and all update files have been scheduled for an integrity and metadata check. Please permit file maintenance to check them, or tell it to do so manually, before unpausing your repository. Once unpaused, it will reprocess your definition files and attempt to fill the missing entries. If this error occurs again once that is complete, please inform hydrus dev.'
message += os.linesep * 2
message += 'Error: {}: {}'.format( name, bad_ids )
raise Exception( message )
def _RegisterUpdates( self, service_id, hash_ids = None ):
# it is ok if this guy gets hash ids that are already in the 'processed' table--it'll now resync them and correct if needed
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
if hash_ids is None:
hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( repository_unregistered_updates_table_name ) ) )
else:
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, repository_unregistered_updates_table_name ) ) )
if len( hash_ids ) > 0:
service_type = self.modules_services.GetService( service_id ).GetServiceType()
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_hash_ids_table_name:
hash_ids_to_mimes = { hash_id : mime for ( hash_id, mime ) in self._Execute( 'SELECT hash_id, mime FROM {} CROSS JOIN files_info USING ( hash_id );'.format( temp_hash_ids_table_name ) ) }
current_rows = set( self._Execute( 'SELECT hash_id, content_type FROM {} CROSS JOIN {} USING ( hash_id );'.format( temp_hash_ids_table_name, repository_updates_processed_table_name ) ) )
correct_rows = set()
for ( hash_id, mime ) in hash_ids_to_mimes.items():
if mime == HC.APPLICATION_HYDRUS_UPDATE_DEFINITIONS:
content_types = ( HC.CONTENT_TYPE_DEFINITIONS, )
else:
content_types = tuple( HC.SERVICE_TYPES_TO_CONTENT_TYPES[ service_type ] )
correct_rows.update( ( ( hash_id, content_type ) for content_type in content_types ) )
deletee_rows = current_rows.difference( correct_rows )
if len( deletee_rows ) > 0:
# these were registered wrong at some point
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ? AND content_type = ?;'.format( repository_updates_processed_table_name ), deletee_rows )
insert_rows = correct_rows.difference( current_rows )
if len( insert_rows ) > 0:
processed = False
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id, content_type, processed ) VALUES ( ?, ?, ? );'.format( repository_updates_processed_table_name ), ( ( hash_id, content_type, processed ) for ( hash_id, content_type ) in insert_rows ) )
if len( hash_ids_to_mimes ) > 0:
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( repository_unregistered_updates_table_name ), ( ( hash_id, ) for hash_id in hash_ids_to_mimes.keys() ) )
if len( deletee_rows ) + len( insert_rows ) > 0:
content_types_that_changed = { content_type for ( hash_id, content_type ) in deletee_rows.union( insert_rows ) }
for content_type in content_types_that_changed:
self._ClearOutstandingWorkCache( service_id, content_type = content_type )
def _ReprocessRepository( self, service_id, content_types ):
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
self._ExecuteMany( 'UPDATE {} SET processed = ? WHERE content_type = ?;'.format( repository_updates_processed_table_name ), ( ( False, content_type ) for content_type in content_types ) )
self._ClearOutstandingWorkCache( service_id )
def _ScheduleRepositoryUpdateFileMaintenance( self, service_id, job_type ):
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.local_update_service_id, repository_updates_table_name, HC.CONTENT_STATUS_CURRENT )
update_hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {};'.format( table_join ) ) )
self.modules_files_maintenance_queue.AddJobs( update_hash_ids, job_type )
def AssociateRepositoryUpdateHashes( self, service_key: bytes, metadata_slice: HydrusNetwork.Metadata ):
service_id = self.modules_services.GetServiceId( service_key )
inserts = []
for ( update_index, update_hashes ) in metadata_slice.GetUpdateIndicesAndHashes():
hash_ids = self.modules_hashes_local_cache.GetHashIds( update_hashes )
inserts.extend( ( ( update_index, hash_id ) for hash_id in hash_ids ) )
if len( inserts ) > 0:
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( update_index, hash_id ) VALUES ( ?, ? );'.format( repository_updates_table_name ), inserts )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( repository_unregistered_updates_table_name ), ( ( hash_id, ) for ( update_index, hash_id ) in inserts ) )
self._RegisterUpdates( service_id )
def DropRepositoryTables( self, service_id: int ):
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( repository_updates_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( repository_unregistered_updates_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( repository_updates_processed_table_name ) )
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryDefinitionTableNames( service_id )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( hash_id_map_table_name ) )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( tag_id_map_table_name ) )
self._ClearOutstandingWorkCache( service_id )
def DoOutstandingUpdateRegistration( self ):
for service_id in self.modules_services.GetServiceIds( HC.REPOSITORIES ):
self._RegisterUpdates( service_id )
def GenerateRepositoryTables( self, service_id: int ):
table_generation_dict = self._GetServiceTableGenerationDict( service_id )
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
self._Execute( create_query_without_name.format( table_name ) )
index_generation_dict = self._GetServiceIndexGenerationDict( service_id )
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
self._CreateIndex( table_name, columns, unique = unique )
def GetRepositoryProgress( self, service_key: bytes ):
service_id = self.modules_services.GetServiceId( service_key )
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
( num_updates, ) = self._Execute( 'SELECT COUNT( * ) FROM {}'.format( repository_updates_table_name ) ).fetchone()
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.local_update_service_id, repository_updates_table_name, HC.CONTENT_STATUS_CURRENT )
( num_local_updates, ) = self._Execute( 'SELECT COUNT( * ) FROM {};'.format( table_join ) ).fetchone()
content_types_to_num_updates = collections.Counter( dict( self._Execute( 'SELECT content_type, COUNT( * ) FROM {} GROUP BY content_type;'.format( repository_updates_processed_table_name ) ) ) )
content_types_to_num_processed_updates = collections.Counter( dict( self._Execute( 'SELECT content_type, COUNT( * ) FROM {} WHERE processed = ? GROUP BY content_type;'.format( repository_updates_processed_table_name ), ( True, ) ) ) )
# little helpful thing that pays off later
for content_type in content_types_to_num_updates:
if content_type not in content_types_to_num_processed_updates:
content_types_to_num_processed_updates[ content_type ] = 0
return ( num_local_updates, num_updates, content_types_to_num_processed_updates, content_types_to_num_updates )
def GetRepositoryUpdateHashesICanProcess( self, service_key: bytes, content_types_to_process ):
# it is important that we use lists and sort by update index!
# otherwise add/delete actions can occur in the wrong order
service_id = self.modules_services.GetServiceId( service_key )
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
result = self._Execute( 'SELECT 1 FROM {} WHERE content_type = ? AND processed = ?;'.format( repository_updates_processed_table_name ), ( HC.CONTENT_TYPE_DEFINITIONS, True ) ).fetchone()
this_is_first_definitions_work = result is None
result = self._Execute( 'SELECT 1 FROM {} WHERE content_type != ? AND processed = ?;'.format( repository_updates_processed_table_name ), ( HC.CONTENT_TYPE_DEFINITIONS, True ) ).fetchone()
this_is_first_content_work = result is None
min_unregistered_update_index = None
result = self._Execute( 'SELECT MIN( update_index ) FROM {} CROSS JOIN {} USING ( hash_id );'.format( repository_unregistered_updates_table_name, repository_updates_table_name ) ).fetchone()
if result is not None:
( min_unregistered_update_index, ) = result
predicate_phrase = 'processed = ? AND content_type IN {}'.format( HydrusData.SplayListForDB( content_types_to_process ) )
if min_unregistered_update_index is not None:
# can't process an update if any of its files are as yet unregistered (these are both unprocessed and unavailable)
# also, we mustn't skip any update indices, so if there is an invalid one, we won't do any after that!
predicate_phrase = '{} AND update_index < {}'.format( predicate_phrase, min_unregistered_update_index )
query = 'SELECT update_index, hash_id, content_type FROM {} CROSS JOIN {} USING ( hash_id ) WHERE {};'.format( repository_updates_processed_table_name, repository_updates_table_name, predicate_phrase )
rows = self._Execute( query, ( False, ) ).fetchall()
update_indices_to_unprocessed_hash_ids = HydrusData.BuildKeyToSetDict( ( ( update_index, hash_id ) for ( update_index, hash_id, content_type ) in rows ) )
hash_ids_to_content_types_to_process = HydrusData.BuildKeyToSetDict( ( ( hash_id, content_type ) for ( update_index, hash_id, content_type ) in rows ) )
all_hash_ids = set( hash_ids_to_content_types_to_process.keys() )
all_local_hash_ids = self.modules_files_storage.FilterCurrentHashIds( self.modules_services.local_update_service_id, all_hash_ids )
for sorted_update_index in sorted( update_indices_to_unprocessed_hash_ids.keys() ):
unprocessed_hash_ids = update_indices_to_unprocessed_hash_ids[ sorted_update_index ]
if not unprocessed_hash_ids.issubset( all_local_hash_ids ):
# can't process an update if any of its unprocessed files are not local
# normally they'll always be available if registered, but just in case a user deletes one manually etc...
# also, we mustn't skip any update indices, so if there is an invalid one, we won't do any after that!
update_indices_to_unprocessed_hash_ids = { update_index : unprocessed_hash_ids for ( update_index, unprocessed_hash_ids ) in update_indices_to_unprocessed_hash_ids.items() if update_index < sorted_update_index }
break
# all the hashes are now good to go
all_hash_ids = set( itertools.chain.from_iterable( update_indices_to_unprocessed_hash_ids.values() ) )
hash_ids_to_hashes = self.modules_hashes_local_cache.GetHashIdsToHashes( hash_ids = all_hash_ids )
definition_hashes_and_content_types = []
content_hashes_and_content_types = []
if len( update_indices_to_unprocessed_hash_ids ) > 0:
for update_index in sorted( update_indices_to_unprocessed_hash_ids.keys() ):
unprocessed_hash_ids = update_indices_to_unprocessed_hash_ids[ update_index ]
definition_hash_ids = { hash_id for hash_id in unprocessed_hash_ids if HC.CONTENT_TYPE_DEFINITIONS in hash_ids_to_content_types_to_process[ hash_id ] }
content_hash_ids = { hash_id for hash_id in unprocessed_hash_ids if hash_id not in definition_hash_ids }
for ( hash_ids, hashes_and_content_types ) in [
( definition_hash_ids, definition_hashes_and_content_types ),
( content_hash_ids, content_hashes_and_content_types )
]:
hashes_and_content_types.extend( ( ( hash_ids_to_hashes[ hash_id ], hash_ids_to_content_types_to_process[ hash_id ] ) for hash_id in hash_ids ) )
return ( this_is_first_definitions_work, definition_hashes_and_content_types, this_is_first_content_work, content_hashes_and_content_types )
def GetRepositoryUpdateHashesIDoNotHave( self, service_key: bytes ):
service_id = self.modules_services.GetServiceId( service_key )
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
all_hash_ids = self._STL( self._Execute( 'SELECT hash_id FROM {} ORDER BY update_index ASC;'.format( repository_updates_table_name ) ) )
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( self.modules_services.local_update_service_id, repository_updates_table_name, HC.CONTENT_STATUS_CURRENT )
existing_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( table_join ) ) )
needed_hash_ids = [ hash_id for hash_id in all_hash_ids if hash_id not in existing_hash_ids ]
needed_hashes = self.modules_hashes_local_cache.GetHashes( needed_hash_ids )
return needed_hashes
def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]:
tables_and_columns = []
if HC.CONTENT_TYPE_HASH:
for service_id in self.modules_services.GetServiceIds( HC.REPOSITORIES ):
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
hash_id_map_table_name = GenerateRepositoryFileDefinitionTableName( service_id )
tables_and_columns.extend( [
( repository_updates_table_name, 'hash_id' ),
( hash_id_map_table_name, 'hash_id' )
] )
elif HC.CONTENT_TYPE_TAG:
for service_id in self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ):
tag_id_map_table_name = GenerateRepositoryTagDefinitionTableName( service_id )
tables_and_columns.extend( [
( tag_id_map_table_name, 'tag_id' )
] )
return tables_and_columns
def HasLotsOfOutstandingLocalProcessing( self, service_id, content_types ):
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
content_types_to_outstanding_local_processing = self._service_ids_to_content_types_to_outstanding_local_processing[ service_id ]
for content_type in content_types:
if content_type not in content_types_to_outstanding_local_processing:
result = self._STL( self._Execute( 'SELECT 1 FROM {} WHERE content_type = ? AND processed = ?;'.format( repository_updates_processed_table_name ), ( content_type, False ) ).fetchmany( 20 ) )
content_types_to_outstanding_local_processing[ content_type ] = len( result ) >= 20
if content_types_to_outstanding_local_processing[ content_type ]:
return True
return False
def NormaliseServiceHashId( self, service_id: int, service_hash_id: int ) -> int:
hash_id_map_table_name = GenerateRepositoryFileDefinitionTableName( service_id )
result = self._Execute( 'SELECT hash_id FROM {} WHERE service_hash_id = ?;'.format( hash_id_map_table_name ), ( service_hash_id, ) ).fetchone()
if result is None:
self._HandleCriticalRepositoryDefinitionError( service_id, 'hash_id', service_hash_id )
( hash_id, ) = result
return hash_id
def NormaliseServiceHashIds( self, service_id: int, service_hash_ids: typing.Collection[ int ] ) -> typing.Set[ int ]:
hash_id_map_table_name = GenerateRepositoryFileDefinitionTableName( service_id )
with self._MakeTemporaryIntegerTable( service_hash_ids, 'service_hash_id' ) as temp_table_name:
# temp service hashes to lookup
hash_ids_potentially_dupes = self._STL( self._Execute( 'SELECT hash_id FROM {} CROSS JOIN {} USING ( service_hash_id );'.format( temp_table_name, hash_id_map_table_name ) ) )
# every service_id can only exist once, but technically a hash_id could be mapped to two service_ids
if len( hash_ids_potentially_dupes ) != len( service_hash_ids ):
bad_service_hash_ids = []
for service_hash_id in service_hash_ids:
result = self._Execute( 'SELECT hash_id FROM {} WHERE service_hash_id = ?;'.format( hash_id_map_table_name ), ( service_hash_id, ) ).fetchone()
if result is None:
bad_service_hash_ids.append( service_hash_id )
self._HandleCriticalRepositoryDefinitionError( service_id, 'hash_ids', bad_service_hash_ids )
hash_ids = set( hash_ids_potentially_dupes )
return hash_ids
def NormaliseServiceTagId( self, service_id: int, service_tag_id: int ) -> int:
tag_id_map_table_name = GenerateRepositoryTagDefinitionTableName( service_id )
result = self._Execute( 'SELECT tag_id FROM {} WHERE service_tag_id = ?;'.format( tag_id_map_table_name ), ( service_tag_id, ) ).fetchone()
if result is None:
self._HandleCriticalRepositoryDefinitionError( service_id, 'tag_id', service_tag_id )
( tag_id, ) = result
return tag_id
def NotifyUpdatesChanged( self, hash_ids ):
for service_id in self.modules_services.GetServiceIds( HC.REPOSITORIES ):
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( repository_unregistered_updates_table_name ), ( ( hash_id, ) for hash_id in hash_ids ) )
self._RegisterUpdates( service_id, hash_ids )
def NotifyUpdatesImported( self, hash_ids ):
for service_id in self.modules_services.GetServiceIds( HC.REPOSITORIES ):
self._RegisterUpdates( service_id, hash_ids )
def ProcessRepositoryDefinitions( self, service_key: bytes, definition_hash: bytes, definition_iterator_dict, content_types, job_key, work_time ):
# ignore content_types for now
service_id = self.modules_services.GetServiceId( service_key )
precise_time_to_stop = HydrusData.GetNowPrecise() + work_time
( hash_id_map_table_name, tag_id_map_table_name ) = GenerateRepositoryDefinitionTableNames( service_id )
num_rows_processed = 0
if 'service_hash_ids_to_hashes' in definition_iterator_dict:
i = definition_iterator_dict[ 'service_hash_ids_to_hashes' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, 50, precise_time_to_stop ):
inserts = []
for ( service_hash_id, hash ) in chunk:
hash_id = self.modules_hashes_local_cache.GetHashId( hash )
inserts.append( ( service_hash_id, hash_id ) )
self._ExecuteMany( 'REPLACE INTO {} ( service_hash_id, hash_id ) VALUES ( ?, ? );'.format( hash_id_map_table_name ), inserts )
num_rows_processed += len( inserts )
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del definition_iterator_dict[ 'service_hash_ids_to_hashes' ]
if 'service_tag_ids_to_tags' in definition_iterator_dict:
i = definition_iterator_dict[ 'service_tag_ids_to_tags' ]
for chunk in HydrusData.SplitIteratorIntoAutothrottledChunks( i, 50, precise_time_to_stop ):
inserts = []
for ( service_tag_id, tag ) in chunk:
try:
tag_id = self.modules_tags_local_cache.GetTagId( tag )
except HydrusExceptions.TagSizeException:
# in future what we'll do here is assign this id to the 'do not show' table, so we know it exists, but it is knowingly filtered out
# _or something_. maybe a small 'invalid' table, so it isn't mixed up with potentially re-addable tags
tag_id = self.modules_tags_local_cache.GetTagId( 'invalid repository tag' )
inserts.append( ( service_tag_id, tag_id ) )
self._ExecuteMany( 'REPLACE INTO {} ( service_tag_id, tag_id ) VALUES ( ?, ? );'.format( tag_id_map_table_name ), inserts )
num_rows_processed += len( inserts )
if HydrusData.TimeHasPassedPrecise( precise_time_to_stop ) or job_key.IsCancelled():
return num_rows_processed
del definition_iterator_dict[ 'service_tag_ids_to_tags' ]
self.SetUpdateProcessed( service_id, definition_hash, ( HC.CONTENT_TYPE_DEFINITIONS, ) )
return num_rows_processed
def ReprocessRepository( self, service_key: bytes, content_types: typing.Collection[ int ] ):
service_id = self.modules_services.GetServiceId( service_key )
self._ReprocessRepository( service_id, content_types )
def ScheduleRepositoryUpdateFileMaintenance( self, service_key, job_type ):
service_id = self.modules_services.GetServiceId( service_key )
self._ScheduleRepositoryUpdateFileMaintenance( service_id, job_type )
def SetRepositoryUpdateHashes( self, service_key: bytes, metadata: HydrusNetwork.Metadata ):
# this is a full metadata resync
service_id = self.modules_services.GetServiceId( service_key )
all_future_update_hash_ids = self.modules_hashes_local_cache.GetHashIds( metadata.GetUpdateHashes() )
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
#
current_update_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( repository_updates_table_name ) ) )
deletee_hash_ids = current_update_hash_ids.difference( all_future_update_hash_ids )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( repository_updates_table_name ), ( ( hash_id, ) for hash_id in deletee_hash_ids ) )
#
self._Execute( 'DELETE FROM {};'.format( repository_unregistered_updates_table_name ) )
#
good_current_hash_ids = current_update_hash_ids.intersection( all_future_update_hash_ids )
current_processed_table_update_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( repository_updates_processed_table_name ) ) )
deletee_processed_table_update_hash_ids = current_processed_table_update_hash_ids.difference( good_current_hash_ids )
self._ExecuteMany( 'DELETE FROM {} WHERE hash_id = ?;'.format( repository_updates_processed_table_name ), ( ( hash_id, ) for hash_id in deletee_processed_table_update_hash_ids ) )
#
inserts = []
for ( update_index, update_hashes ) in metadata.GetUpdateIndicesAndHashes():
for update_hash in update_hashes:
hash_id = self.modules_hashes_local_cache.GetHashId( update_hash )
if hash_id in current_update_hash_ids:
self._Execute( 'UPDATE {} SET update_index = ? WHERE hash_id = ?;'.format( repository_updates_table_name ), ( update_index, hash_id ) )
else:
inserts.append( ( update_index, hash_id ) )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( update_index, hash_id ) VALUES ( ?, ? );'.format( repository_updates_table_name ), inserts )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( hash_id ) VALUES ( ? );'.format( repository_unregistered_updates_table_name ), ( ( hash_id, ) for hash_id in all_future_update_hash_ids ) )
self._RegisterUpdates( service_id )
def SetUpdateProcessed( self, service_id: int, update_hash: bytes, content_types: typing.Collection[ int ] ):
( repository_updates_table_name, repository_unregistered_updates_table_name, repository_updates_processed_table_name ) = GenerateRepositoryUpdatesTableNames( service_id )
update_hash_id = self.modules_hashes_local_cache.GetHashId( update_hash )
self._ExecuteMany( 'UPDATE {} SET processed = ? WHERE hash_id = ? AND content_type = ?;'.format( repository_updates_processed_table_name ), ( ( True, update_hash_id, content_type ) for content_type in content_types ) )
for content_type in content_types:
self._ClearOutstandingWorkCache( service_id, content_type )
|
from csbuilder.session.session import Session
from csbuilder.session.manager import SessionManager
from csbuilder.session.result import SessionResult |
from ramp_utils.password import hash_password
from ramp_utils.password import check_password
def test_check_password():
password = "hjst3789ep;ocikaqjw"
hashed_password = hash_password(password)
assert check_password(password, hashed_password)
assert not check_password("hjst3789ep;ocikaqji", hashed_password)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, omar jaber and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import getdate, add_to_date, cstr
class RosterDailyReport(Document):
def autoname(self):
self.name = add_to_date(cstr(self.date), days=1) + "|" + add_to_date(cstr(self.date), days=14) |
from models.Batch_CNN_Model import Batch_CNN_Model
from models.Basic_CNN_Model import Basic_CNN_Model
from models.Improved_CNN_Model import Improved_CNN_Model
from models.Transfer_ResNet_CNN_Model import Transfer_ResNet_CNN_Model
from models.Transfer_InceptionV3_CNN_Model import Transfer_InceptionV3_CNN_Model
class Model:
""" Model factory class for deep learning network types. """
def __init__(self, type_name=None, name=None, metric='accuracy'):
'''
Initialisation of the network model
Params:
- type_name: general network type
- name: specific network that shall be created
- metric: metric type depends on the task and the used optimiser,
e.g. for classifiction the simplest one is 'accuracy'
'''
self.model_class = None
self.model = None
self.name = name
self.metric = metric
self.type_name = type_name
if self.type_name in ["Basic"]:
self.model_class = Basic_CNN_Model(name=name, metric=metric)
elif type_name in ["Batch"]:
self.model_class = Batch_CNN_Model(name=name, metric=metric)
elif type_name in ["Improved"]:
self.model_class = Improved_CNN_Model(name=name, metric=metric)
elif type_name in ["Transfer"]:
if self.name in ["Transfer_ResNet_CNN_Model"]:
self.model_class = Transfer_ResNet_CNN_Model(name=name, metric=metric)
elif self.name in ["Transfer_InceptionV3_CNN_Model"]:
self.model_class = Transfer_InceptionV3_CNN_Model(name=name, metric=metric)
else:
print("Wrong Model type - {} -, does not exist, therefore no model building possible.".format(type_name))
def get_model(self):
# Can return None, if model type does not exist yet.
self.model = self.model_class.get_model()
return self.model
def get_class(self):
# Can return None, if model type does not exist yet.
return self.model_class
|
# Copyright (c) 2018 Serguei Kalentchouk et al. All rights reserved.
# Use of this source code is governed by an MIT license that can be found in the LICENSE file.
from node_test_case import NodeTestCase
class TestAdd(NodeTestCase):
def test_add(self):
self.create_node('Add', {'input1': 5.0, 'input2': -3.0}, 2.0)
def test_add_int(self):
self.create_node('AddInt', {'input1': 5, 'input2': -3}, 2)
def test_add_angle(self):
self.create_node('AddAngle', {'input1': 5.0, 'input2': -3.0}, 2.0)
def test_add_vector(self):
self.create_node('AddVector', {'input1': [1.0, 1.0, 1.0], 'input2': [1.0, -1.0, 0.5]}, [2.0, 0.0, 1.5])
|
import unittest
from unittest.mock import patch
from unittest.mock import mock_open
from acscore import metrics
class FunctionNameCaseTest(unittest.TestCase):
def setUp(self):
self.data = {'underscore': 100, 'camelcase': 10, 'other': 15}
self.function_name_case = metrics.FunctionNameCase()
self.files = [
'def __no__Name__(): pass\ndef __mis(): pass\ndef testOk(): pass\n',
'def ke__(): pass\ndef under_score(): pass\ndef Very__Bad(): pass\n',
# TODO: extract camel case with bit first letter to separate group
'def Very(): pass\ndef VeryVery(): pass\ndef not_camel_case(): pass\n',
]
def test_count(self):
with patch('acscore.metric.function_name_case.open', mock_open(read_data=self.files[0])):
result1 = self.function_name_case.count('')
expected1 = {
'other': 1,
'camelcase': 1,
'underscore': 0,
}
self.assertEqual(expected1, result1)
with patch('acscore.metric.function_name_case.open', mock_open(read_data=self.files[1])):
result2 = self.function_name_case.count('')
expected2 = {
'other': 2,
'camelcase': 0,
'underscore': 1,
}
self.assertEqual(expected2, result2)
with patch('acscore.metric.function_name_case.open', mock_open(read_data=self.files[2])):
result3 = self.function_name_case.count('')
expected3 = {
'other': 0,
'camelcase': 2,
'underscore': 1,
}
self.assertEqual(expected3, result3)
def test_count_verbose(self):
with patch('acscore.metric.function_name_case.open', mock_open(read_data=self.files[0])):
result1 = self.function_name_case.count('', True)
expected1 = {
'other': {
'count': 1,
'lines': [2],
},
'camelcase': {
'count': 1,
'lines': [3],
},
'underscore': {
'count': 0,
'lines': [],
},
}
self.assertEqual(expected1, result1)
with patch('acscore.metric.function_name_case.open', mock_open(read_data=self.files[1])):
result2 = self.function_name_case.count('', True)
expected2 = {
'other': {
'count': 2,
'lines': [1, 3],
},
'camelcase': {
'count': 0,
'lines': [],
},
'underscore': {
'count': 1,
'lines': [2],
},
}
self.assertEqual(expected2, result2)
with patch('acscore.metric.function_name_case.open', mock_open(read_data=self.files[2])):
result3 = self.function_name_case.count('', True)
expected3 = {
'other': {
'count': 0,
'lines': [],
},
'camelcase': {
'count': 2,
'lines': [1, 2],
},
'underscore': {
'count': 1,
'lines': [3],
},
}
self.assertEqual(expected3, result3)
def test_discretize(self):
result = self.function_name_case.discretize(self.data)
self.assertEqual({'other': 0.12, 'underscore': 0.8, 'camelcase': 0.08}, result)
def test_inspect(self):
discrete = self.function_name_case.discretize(self.data)
values = {
'underscore': {
'count': 3,
'lines': [1, 2, 3],
},
'camelcase': {
'count': 2,
'lines': [4, 5],
},
'other': {
'count': 1,
'lines': [6],
},
}
inspections = self.function_name_case.inspect(discrete, values)
expected = {
metrics.FunctionNameCase.NEED_TO_USE_UNDERSCORE: {
'message': metrics.FunctionNameCase.inspections[metrics.FunctionNameCase.NEED_TO_USE_UNDERSCORE],
'lines': [4, 5],
},
metrics.FunctionNameCase.NO_STYLE: {
'message': metrics.FunctionNameCase.inspections[metrics.FunctionNameCase.NO_STYLE],
'lines': [6],
}
}
self.assertEqual(expected, inspections)
|
from panda3d.core import *
from direct.showbase import Pool
from direct.showbase.DirectObject import DirectObject
import re
class PooledEffect(DirectObject, NodePath):
pool = None
poolLimit = 124
@classmethod
def getEffect(cls, context=''):
if cls.pool is None:
cls.pool = Pool.Pool()
if cls.pool.hasFree():
return cls.pool.checkout()
else:
free, used = cls.pool.getNumItems()
if free + used < cls.poolLimit:
cls.pool.add(cls())
return cls.pool.checkout()
return
return
@classmethod
def cleanup(cls):
if cls.pool:
cls.pool.cleanup(cls.destroy)
cls.pool = None
return
def __init__(self):
NodePath.__init__(self, self.__class__.__name__)
self.accept('clientLogout', self.__class__.cleanup)
def destroy(self, item=None):
if item:
self.pool.remove(item)
self.ignore('clientLogout')
self.removeNode() |
import numpy as np
from .basic import BasicProducer
class NewFromFunction(BasicProducer):
'''
Generic producer to calculate inputs from functions.
The passed function needs to return a sequence.
Can either be used directly, e.g.
met_ht_ratio = NewFromFunction(
'met_ht_ratio',
inputs=['MET', 'HT'],
function=np.divide,
)
or to implement specialisations as classes, see
- alphatwirl_interface.functions.Divide
- alphatwirl_interface.functions.TransverseMomentum
'''
def __init__(self, outputName, inputs, function):
super(NewFromFunction, self).__init__(outputName)
self.inputs = inputs
self.function = function
def _value(self, obj):
return self.function(
*[np.array(getattr(obj, n)) for n in self.inputs]
)
def __repr__(self):
attributes = ['outputName', 'inputs', 'function']
formattedAttrs = super(
NewFromFunction, self)._format_attributes(attributes)
return '{}({})'.format(
self.__class__.__name__,
formattedAttrs,
)
class Divide(NewFromFunction):
'''
Divides two inputs by each other (can be vectors)
'''
def __init__(self, outputName, inputs):
super(self.__class__, self).__init__(outputName, inputs, np.divide)
class TransverseMomentum(NewFromFunction):
'''
Calculates the transverse momentum of particles given their
momenta in the x- and y-plane, e.g.
muon_pt = TransverseMomentum(
outputName='muon_pt',
inputs=['Muon_Px', 'Muon_Py'],
)
'''
def __init__(self, outputName, inputs):
super(self.__class__, self).__init__(
outputName,
inputs,
function=lambda x, y: np.sqrt(np.square(x) + np.square(y))
)
|
from javax import swing
class test189c(swing.ListCellRenderer):
def getListCellRendererComponent(self):
return "test189c"
class test189c2(test189c):
def getListCellRendererComponent(self):
return "test189c2"
print test189c().getListCellRendererComponent()
print test189c2().getListCellRendererComponent() |
from tensorflow.keras.layers import BatchNormalization, Conv2D, AveragePooling2D, MaxPooling2D
from tensorflow.keras.layers import Activation, Dropout, Dense, Flatten, Input, concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras import backend as K
class GoogLeNet:
@staticmethod
def conv_module(x, K, kX, kY, stride, chanDim, padding='same', reg=0.0002, name=None):
(convName, bnName, actName) = (None, None, None)
if name is not None:
convName = name + "_conv"
bnName = name + "_bn"
actName = name + "_act"
x = Conv2D(K, (kX, kY), strides=stride, padding=padding,
kernel_regularizer=l2(reg), name=convName)(x)
x = BatchNormalization(axis=chanDim, name=bnName)(x)
x = Activation("elu", name=actName)(x)
return x
@staticmethod
def inception_module(x, num1x1, num3x3Reduce, num3x3, num5x5Reduce, num5x5, num1x1Proj, chanDim, stage, reg=0.0005):
# First Branch -> 1x1 CONV
first = DeeperGoogLeNet.conv_module(
x, num1x1, 1, 1, (1, 1), chanDim, reg=reg, name=stage + "_first")
# Second Branch -> 1x1 CONV -> 3x3 CONV
second = DeeperGoogLeNet.conv_module(
x, num3x3Reduce, 1, 1, (1, 1), chanDim, reg=reg, name=stage + "_second1")
second = DeeperGoogLeNet.conv_module(
second, num3x3, 3, 3, (1, 1), chanDim, reg=reg, name=stage + "_second2")
# Third Branch -> 1x1 CONV -> 5x5 CONV
third = DeeperGoogLeNet.conv_module(
x, num5x5Reduce, 1, 1, (1, 1), chanDim, reg=reg, name=stage + "_third1")
third = DeeperGoogLeNet.conv_module(
third, num5x5, 5, 5, (1, 1), chanDim, reg=reg, name=stage + "_third2")
# Fourth Branch -> POOL -> 1x1 CONV
fourth = MaxPooling2D((3, 3), strides=(1, 1), padding='same', name=stage + "_pool")(x)
fourth = DeeperGoogLeNet.conv_module(
fourth, num1x1Proj, 1, 1, (1, 1), chanDim, reg=reg, name=stage + "_fourth")
# Concantenate
x = concatenate([first, second, third, fourth],
axis=chanDim, name=stage + "_mixed")
return x
@staticmethod
def build(width, height, depth, classes, reg=0.0005):
input_shape = (height, width, depth)
chanDim = -1
if K.image_data_format() == "channels_first":
input_shape = (depth, height, width)
chanDim = 1
# CONV => POOL => (CONV * 2) => POOL
inputs = Input(shape=input_shape)
x = DeeperGoogLeNet.conv_module(
inputs, 64, 5, 5, (1, 1), chanDim, reg=reg, name="block1")
x = MaxPooling2D((3, 3), strides=(
2, 2), padding='same', name="pool1")(x)
x = DeeperGoogLeNet.conv_module(
x, 64, 1, 1, (1, 1), chanDim, reg=reg, name="block2")
x = DeeperGoogLeNet.conv_module(
x, 192, 3, 3, (1, 1), chanDim, reg=reg, name="block3")
x = MaxPooling2D((3, 3), strides=(
2, 2), padding='same', name="pool2")(x)
# (INCEP * 2) => POOL
x = DeeperGoogLeNet.inception_module(
x, 64, 96, 128, 16, 32, 32, chanDim, "3a", reg=reg)
x = DeeperGoogLeNet.inception_module(
x, 128, 128, 192, 32, 96, 64, chanDim, "3b", reg=reg)
x = MaxPooling2D((3, 3), strides=(
2, 2), padding='same', name="pool3")(x)
# (INCEP * 5) => POOL
x = DeeperGoogLeNet.inception_module(
x, 192, 96, 208, 16, 48, 64, chanDim, "4a", reg=reg)
x = DeeperGoogLeNet.inception_module(
x, 160, 112, 224, 24, 64, 64, chanDim, "4b", reg=reg)
x = DeeperGoogLeNet.inception_module(
x, 128, 128, 256, 24, 64, 64, chanDim, "4c", reg=reg)
x = DeeperGoogLeNet.inception_module(
x, 112, 144, 288, 32, 64, 64, chanDim, "4d", reg=reg)
x = DeeperGoogLeNet.inception_module(
x, 256, 160, 320, 32, 128, 128, chanDim, "4e", reg=reg)
x = MaxPooling2D((3, 3), strides=(
2, 2), padding='same', name="pool4")(x)
# POOL => Dropout
x = AveragePooling2D((4, 4), name="pool5")(x)
x = Dropout(0.4, name="do")(x)
# Softmax
x = Flatten(name="flatten")(x)
x = Dense(classes, kernel_regularizer=l2(reg), name="labels")(x)
x = Activation("softmax", name="softmax")(x)
# create the model
model = Model(inputs, x, name="googlenet")
return model
|
# Generated by Django 2.1.15 on 2020-04-12 17:41
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0011_auto_20200411_2030'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='user_Main_Img',
field=models.ImageField(default=django.utils.timezone.now, upload_to='images/'),
preserve_default=False,
),
migrations.AlterField(
model_name='customuser',
name='description',
field=models.CharField(blank=True, help_text='The detailed description of your post (will be shown on map)', max_length=255, null=True),
),
]
|
print('\033[1;31mDetector de Palíndromos\033[m')
frase = str(input('Digite a frase sem os acentos: ')).strip().replace(' ', '')
palindromo = frase == frase[::-1]
print(frase)
if palindromo == True:
print('\033[1;32mA frase é um políndromo')
else:
print('\033[1;31mA frase não é um políndromo')
|
import unittest
import json
import sys, os.path
from apiV1 import *
class TestApiV1(unittest.TestCase):
def setUp(self):
app.testing = True
self.app = app.test_client()
def test_24h(self):
response = self.app.get('/servicio/v1/prediccion/24horas')
self.assertEqual(response.status_code, 200)
def test_48h(self):
response = self.app.get('/servicio/v1/prediccion/48horas')
self.assertEqual(response.status_code, 200)
def test_72h(self):
response = self.app.get('/servicio/v1/prediccion/72horas')
self.assertEqual(response.status_code, 200)
|
'''
genarate graph
Copyright (C) 2020 Yuto Watanabe
'''
import datetime
import os
import matplotlib.pyplot as plt
try:
from database import read_today
except ImportError:
from .database import read_today
def graph(data_file_path: str) -> str:
'''
graph plot and save to image file.
Args:
data_file_path (str): database file path.
Returns:
str: generated image file path.
'''
today_data = read_today(data_file_path)
temps = []
hums = []
jst = datetime.timezone(datetime.timedelta(hours=+9), 'JST')
now = datetime.datetime.now(jst)
yestaday = now - datetime.timedelta(days=1)
directory = 'graph_image'
# if directory not found it create directory.
if not os.path.isdir(directory):
os.makedirs(directory)
image_file_path = os.path.join(directory, 'graph.png')
# Sort by date.
sorted_data = sorted(today_data, key=lambda x: x['date'])
for element in sorted_data:
temps.append(element['temp'])
hums.append(element['hum'])
# graph plot
fig = plt.figure(figsize=[7, 4.8])
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
# line color
ax1.plot(temps, color='blue')
ax2.plot(hums, color='orange')
# label of y
ax1.set_ylabel('Temperature ($^\\circ$C)')
ax2.set_ylabel('Humidity (%)')
ax1.grid(True)
ax1.set_xlabel('date')
ax1.tick_params(labelbottom=False, bottom=False)
ax2.tick_params(labelbottom=False, bottom=False)
plt.title(f'Graph of changes in room temperature and humidity on {yestaday.strftime(r"%m/%d")}')
plt.savefig(image_file_path)
return image_file_path
|
import sys
testCases = int(sys.stdin.readline().strip())
for i in range(testCases):
n, index = tuple(map(int, sys.stdin.readline().strip().split()))
queue = list(map(int, sys.stdin.readline().strip().split()))
cnt = 0
while True:
max_num = max(queue)
item = queue[0]
queue = queue[1:]
if item == max_num and index == 0:
cnt += 1
break
elif item == max_num:
cnt += 1
else:
queue.append(item)
index -= 1
if index < 0:
index += len(queue)
sys.stdout.write(str(cnt) + '\n')
|
# -*- coding: utf-8 -*-
from plugin import plugin, require
from azapi import AZlyrics
@require(network=True)
@plugin('lyrics')
class lyrics():
"""
finds lyrics
the format is song-artist
song and artist are separated by a -
-- Example:
lyrics wonderful tonight-eric clapton
"""
def __call__(self, jarvis, s):
jarvis.say(self.find(s))
def find(self, s):
info = s.split('-')
artist = None
song = None
if info:
song = info[0]
info.pop(0)
if info:
artist = info[0]
info.pop(0)
if not song:
# error if song does not exist
return "The song name parameter was incomplete"
if not artist:
# error if the artist does not exist
return "The artist name parameter was incomplete"
response = get_lyric(artist, song)
if response:
return response
else:
return "Song or Singer does not exist or the API does not have lyrics"
# makes api call to AZ lyrics and returns song result
def get_lyric(singer, song):
API = AZlyrics('google', accuracy=0.5)
API.artist = singer
API.title = song
API.getLyrics()
return API.lyrics |
#
# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
from acados_template import AcadosModel
import numpy as np
from casadi import SX, vertcat
import matplotlib
import matplotlib.pyplot as plt
def export_linear_mass_model():
model_name = 'linear_mass'
# set up states & controls
qx = SX.sym('qx')
qy = SX.sym('qy')
vx = SX.sym('vx')
vy = SX.sym('vy')
x = vertcat(qx, qy, vx, vy)
# u = SX.sym('u', 2)
ux = SX.sym('ux')
uy = SX.sym('uy')
u = vertcat(ux, uy)
f_expl = vertcat(vx, vy, u)
model = AcadosModel()
# model.f_impl_expr = f_impl
model.f_expl_expr = f_expl
# model.disc_dyn_expr
model.x = x
model.u = u
# model.xdot = xdot
# model.z = z
model.p = []
model.name = model_name
return model
def plot_linear_mass_system_X_state_space(simX, latexify=False, circle=None, x_goal=None):
"""
Params:
simX: x trajectory
latexify: latex style plots
"""
# latexify plot
if latexify:
params = {'backend': 'ps',
'text.latex.preamble': r"\usepackage{gensymb} \usepackage{amsmath}",
'axes.labelsize': 10,
'axes.titlesize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': True,
'font.family': 'serif'
}
matplotlib.rcParams.update(params)
fig, axs = plt.subplots(1, 1)
if x_goal is not None:
plt.plot(x_goal[0], x_goal[1], 'rx')
if circle is not None:
obs_x, obs_y, obs_rad = circle
ts = np.linspace(0,2*np.pi,100)
plt.plot(obs_rad * np.cos(ts)+obs_x,obs_rad * np.sin(ts)-obs_y, 'r')
plt.grid()
plt.plot(simX[:,0], simX[:,1], '*-b')
plt.title('state space plot')
axs.axis('equal')
plt.show()
return
def plot_linear_mass_system_U(shooting_nodes, simU, latexify=False):
"""
Params:
simU: u trajectory
latexify: latex style plots
"""
nu = simU.shape[1]
for i in range(nu):
plt.subplot(nu, 1, i+1)
line, = plt.step(shooting_nodes, np.append([simU[0,i]], simU[:,i]))
plt.grid()
plt.show()
return
def plot_linear_mass_system_X(shooting_nodes, simX, latexify=False):
"""
Params:
simX: x trajectory
latexify: latex style plots
"""
nx = simX.shape[1]
for i in range(nx):
plt.subplot(nx, 1, i+1)
line, = plt.plot(shooting_nodes, simX[:,i])
plt.grid()
plt.show()
return
|
import mock
import requests
from st2tests.base import BaseActionTestCase
from lib.slack import SlackPoster, SlackNotifier
class TestSlackPoster(BaseActionTestCase):
action_cls = SlackPoster
@mock.patch("lib.slack.SlackNotifier", autospec=True)
def test_successful_post(self, notifier_mock):
action = self.get_action_instance()
action.config["slack_webhook_url"] = "this-is-the-webhook-url"
exit_code, out = action.run("channel", "user", "message")
self.assertTrue(exit_code)
@mock.patch("lib.slack.SlackNotifier", autospec=True)
def test_unsuccessful_post(self, notifier_mock):
action = self.get_action_instance()
action.config["slack_webhook_url"] = "this-is-the-webhook-url"
notifier = notifier_mock.return_value
notifier.post_message.side_effect = requests.exceptions.HTTPError("a mock HTTPError")
exit_code, out = action.run("channel", "user", "message")
self.assertFalse(exit_code)
|
'''
Definition of the layer between the UI frontend and the database.
2019 Benjamin Kellenberger
'''
from uuid import UUID
from datetime import datetime
import pytz
import dateutil.parser
import json
from modules.Database.app import Database
from .sql_string_builder import SQLStringBuilder
from .annotation_sql_tokens import QueryStrings_annotation, AnnotationParser
class DBMiddleware():
def __init__(self, config):
self.config = config
self.dbConnector = Database(config)
self._fetchProjectSettings()
self.sqlBuilder = SQLStringBuilder(config)
self.annoParser = AnnotationParser(config)
def _fetchProjectSettings(self):
# AI controller URI
aiControllerURI = self.config.getProperty('Server', 'aiController_uri')
if aiControllerURI is None or aiControllerURI.strip() == '':
# no AI backend configured
aiControllerURI = None
# LabelUI drawing styles
with open(self.config.getProperty('LabelUI', 'styles_file', type=str, fallback='modules/LabelUI/static/json/styles.json'), 'r') as f:
styles = json.load(f)
# Image backdrops for index screen
with open(self.config.getProperty('Project', 'backdrops_file', type=str, fallback='modules/LabelUI/static/json/backdrops.json'), 'r') as f:
backdrops = json.load(f)
# Welcome message for UI tutorial
with open(self.config.getProperty('Project', 'welcome_message_file', type=str, fallback='modules/LabelUI/static/templates/welcome_message.html'), 'r') as f:
welcomeMessage = f.readlines()
self.projectSettings = {
'projectName': self.config.getProperty('Project', 'projectName'),
'projectDescription': self.config.getProperty('Project', 'projectDescription'),
'indexURI': self.config.getProperty('Server', 'index_uri', type=str, fallback='/'),
'dataServerURI': self.config.getProperty('Server', 'dataServer_uri'),
'aiControllerURI': aiControllerURI,
'dataType': self.config.getProperty('Project', 'dataType', fallback='images'),
'classes': self.getClassDefinitions(),
'enableEmptyClass': self.config.getProperty('Project', 'enableEmptyClass', fallback='no'),
'annotationType': self.config.getProperty('Project', 'annotationType'),
'predictionType': self.config.getProperty('Project', 'predictionType'),
'showPredictions': self.config.getProperty('LabelUI', 'showPredictions', fallback='yes'),
'showPredictions_minConf': self.config.getProperty('LabelUI', 'showPredictions_minConf', type=float, fallback=0.5),
'carryOverPredictions': self.config.getProperty('LabelUI', 'carryOverPredictions', fallback='no'),
'carryOverRule': self.config.getProperty('LabelUI', 'carryOverRule', fallback='maxConfidence'),
'carryOverPredictions_minConf': self.config.getProperty('LabelUI', 'carryOverPredictions_minConf', type=float, fallback=0.75),
'defaultBoxSize_w': self.config.getProperty('LabelUI', 'defaultBoxSize_w', type=int, fallback=10),
'defaultBoxSize_h': self.config.getProperty('LabelUI', 'defaultBoxSize_h', type=int, fallback=10),
'minBoxSize_w': self.config.getProperty('Project', 'box_minWidth', type=int, fallback=1),
'minBoxSize_h': self.config.getProperty('Project', 'box_minHeight', type=int, fallback=1),
'numImagesPerBatch': self.config.getProperty('LabelUI', 'numImagesPerBatch', type=int, fallback=1),
'minImageWidth': self.config.getProperty('LabelUI', 'minImageWidth', type=int, fallback=300),
'numImageColumns_max': self.config.getProperty('LabelUI', 'numImageColumns_max', type=int, fallback=1),
'defaultImage_w': self.config.getProperty('LabelUI', 'defaultImage_w', type=int, fallback=800),
'defaultImage_h': self.config.getProperty('LabelUI', 'defaultImage_h', type=int, fallback=600),
'styles': styles['styles'],
'backdrops': backdrops,
'welcomeMessage': welcomeMessage,
'demoMode': self.config.getProperty('Project', 'demoMode', type=bool, fallback=False)
}
def _assemble_annotations(self, cursor):
response = {}
while True:
b = cursor.fetchone()
if b is None:
break
imgID = str(b['image'])
if not imgID in response:
response[imgID] = {
'fileName': b['filename'],
'predictions': {},
'annotations': {},
'last_checked': None
}
viewcount = b['viewcount']
if viewcount is not None:
response[imgID]['viewcount'] = viewcount
last_checked = b['last_checked']
if last_checked is not None:
if response[imgID]['last_checked'] is None:
response[imgID]['last_checked'] = last_checked
else:
response[imgID]['last_checked'] = max(response[imgID]['last_checked'], last_checked)
# parse annotations and predictions
entryID = str(b['id'])
if b['ctype'] is not None:
colnames = self.sqlBuilder.getColnames(b['ctype'])
entry = {}
for c in colnames:
value = b[c]
if isinstance(value, datetime):
value = value.timestamp()
elif isinstance(value, UUID):
value = str(value)
entry[c] = value
if b['ctype'] == 'annotation':
response[imgID]['annotations'][entryID] = entry
elif b['ctype'] == 'prediction':
response[imgID]['predictions'][entryID] = entry
return response
def getProjectSettings(self):
'''
Queries the database for general project-specific metadata, such as:
- Classes: names, indices, default colors
- Annotation type: one of {class labels, positions, bboxes}
'''
return self.projectSettings
def getProjectInfo(self):
'''
Returns safe, shareable information about the project.
'''
return {
'projectName' : self.projectSettings['projectName'],
'projectDescription' : self.projectSettings['projectDescription'],
'demoMode': self.config.getProperty('Project', 'demoMode', type=bool, fallback=False),
'backdrops': self.projectSettings['backdrops']['images']
}
def getClassDefinitions(self):
'''
Returns a dictionary with entries for all classes in the project.
'''
classdef = {
'entries': {
'default': {} # default group for ungrouped label classes
}
}
schema = self.config.getProperty('Database', 'schema')
# query data
sql = '''
SELECT 'group' AS type, id, NULL as idx, name, color, parent, NULL AS keystroke FROM {schema}.labelclassgroup
UNION ALL
SELECT 'class' AS type, id, idx, name, color, labelclassgroup, keystroke FROM {schema}.labelclass;
'''.format(schema=schema)
classData = self.dbConnector.execute(sql, None, 'all')
# assemble entries first
allEntries = {}
numClasses = 0
for cl in classData:
id = str(cl['id'])
entry = {
'id': id,
'name': cl['name'],
'color': cl['color'],
'parent': str(cl['parent']) if cl['parent'] is not None else None,
}
if cl['type'] == 'group':
entry['entries'] = {}
else:
entry['index'] = cl['idx']
entry['keystroke'] = cl['keystroke']
numClasses += 1
allEntries[id] = entry
# transform into tree
def _find_parent(tree, parentID):
if parentID is None:
return tree['entries']['default']
elif 'id' in tree and tree['id'] == parentID:
return tree
elif 'entries' in tree:
for ek in tree['entries'].keys():
rv = _find_parent(tree['entries'][ek], parentID)
if rv is not None:
return rv
return None
else:
return None
allEntries['default'] = {
'name': '(other)',
'entries': {}
}
allEntries = {
'entries': allEntries
}
for key in list(allEntries['entries'].keys()):
if key == 'default':
continue
if key in allEntries['entries']:
entry = allEntries['entries'][key]
parentID = entry['parent']
del entry['parent']
if 'entries' in entry and parentID is None:
# group, but no parent: append to root directly
allEntries['entries'][key] = entry
else:
# move item
parent = _find_parent(allEntries, parentID)
parent['entries'][key] = entry
del allEntries['entries'][key]
classdef = allEntries
classdef['numClasses'] = numClasses
return classdef
def getBatch_fixed(self, username, data):
'''
Returns entries from the database based on the list of data entry identifiers specified.
'''
# query
sql = self.sqlBuilder.getFixedImagesQueryString(self.projectSettings['demoMode'])
# parse results
queryVals = (tuple(UUID(d) for d in data), username, username,)
if self.projectSettings['demoMode']:
queryVals = (tuple(UUID(d) for d in data),)
with self.dbConnector.execute_cursor(sql, queryVals) as cursor:
try:
response = self._assemble_annotations(cursor)
# self.dbConnector.conn.commit()
except Exception as e:
print(e)
# self.dbConnector.conn.rollback()
finally:
pass
# cursor.close()
return { 'entries': response }
def getBatch_auto(self, username, order='unlabeled', subset='default', limit=None):
'''
TODO: description
'''
# query
sql = self.sqlBuilder.getNextBatchQueryString(order, subset, self.projectSettings['demoMode'])
# limit (TODO: make 128 a hyperparameter)
if limit is None:
limit = 128
else:
limit = min(int(limit), 128)
# parse results
queryVals = (username,limit,username,)
if self.projectSettings['demoMode']:
queryVals = (limit,)
with self.dbConnector.execute_cursor(sql, queryVals) as cursor:
response = self._assemble_annotations(cursor)
return { 'entries': response }
def getBatch_timeRange(self, minTimestamp, maxTimestamp, userList, skipEmptyImages=False, limit=None):
'''
Returns images that have been annotated within the given time range and/or
by the given user(s). All arguments are optional.
Useful for reviewing existing annotations.
'''
# query string
sql = self.sqlBuilder.getDateQueryString(minTimestamp, maxTimestamp, userList, skipEmptyImages)
# check validity and provide arguments
queryVals = []
if userList is not None:
queryVals.append(tuple(userList))
if minTimestamp is not None:
queryVals.append(minTimestamp)
if maxTimestamp is not None:
queryVals.append(maxTimestamp)
if skipEmptyImages and userList is not None:
queryVals.append(tuple(userList))
# limit (TODO: make 128 a hyperparameter)
if limit is None:
limit = 128
else:
limit = min(int(limit), 128)
queryVals.append(limit)
if userList is not None:
queryVals.append(tuple(userList))
# query and parse results
with self.dbConnector.execute_cursor(sql, tuple(queryVals)) as cursor:
try:
response = self._assemble_annotations(cursor)
# self.dbConnector.conn.commit()
except Exception as e:
print(e)
# self.dbConnector.conn.rollback()
finally:
pass
# cursor.close()
return { 'entries': response }
def get_timeRange(self, userList, skipEmptyImages=False):
'''
Returns two timestamps denoting the temporal limits within which
images have been viewed by the users provided in the userList.
Arguments:
- userList: string (single user name) or list of strings (multiple).
Can also be None; in this case all annotations will be
checked.
- skipEmptyImages: if True, only images that contain at least one
annotation will be considered.
'''
# query string
sql = self.sqlBuilder.getTimeRangeQueryString(userList, skipEmptyImages)
arguments = (None if userList is None else tuple(userList))
result = self.dbConnector.execute(sql, (arguments,), numReturn=1)
if result is not None and len(result):
return {
'minTimestamp': result[0]['mintimestamp'],
'maxTimestamp': result[0]['maxtimestamp'],
}
else:
return {
'error': 'no annotations made'
}
def submitAnnotations(self, username, submissions):
'''
Sends user-provided annotations to the database.
'''
if self.projectSettings['demoMode']:
return 0
# assemble values
colnames = getattr(QueryStrings_annotation, self.projectSettings['annotationType']).value
values_insert = []
values_update = []
meta = (None if not 'meta' in submissions else json.dumps(submissions['meta']))
# for deletion: remove all annotations whose image ID matches but whose annotation ID is not among the submitted ones
ids = []
viewcountValues = []
for imageKey in submissions['entries']:
entry = submissions['entries'][imageKey]
try:
lastChecked = entry['timeCreated']
lastTimeRequired = entry['timeRequired']
if lastTimeRequired is None: lastTimeRequired = 0
except:
lastChecked = datetime.now(tz=pytz.utc)
lastTimeRequired = 0
if 'annotations' in entry and len(entry['annotations']):
for annotation in entry['annotations']:
# assemble annotation values
annotationTokens = self.annoParser.parseAnnotation(annotation)
annoValues = []
for cname in colnames:
if cname == 'id':
if cname in annotationTokens:
# cast and only append id if the annotation is an existing one
annoValues.append(UUID(annotationTokens[cname]))
ids.append(UUID(annotationTokens[cname]))
elif cname == 'image':
annoValues.append(UUID(imageKey))
elif cname == 'label' and annotationTokens[cname] is not None:
annoValues.append(UUID(annotationTokens[cname]))
elif cname == 'timeCreated':
try:
annoValues.append(dateutil.parser.parse(annotationTokens[cname]))
except:
annoValues.append(datetime.now(tz=pytz.utc))
elif cname == 'timeRequired':
timeReq = annotationTokens[cname]
if timeReq is None: timeReq = 0
annoValues.append(timeReq)
elif cname == 'username':
annoValues.append(username)
elif cname in annotationTokens:
annoValues.append(annotationTokens[cname])
elif cname == 'unsure':
if 'unsure' in annotationTokens and annotationTokens['unsure'] is not None:
annoValues.append(annotationTokens[cname])
else:
annoValues.append(False)
elif cname == 'meta':
annoValues.append(meta)
else:
annoValues.append(None)
if 'id' in annotationTokens:
# existing annotation; update
values_update.append(tuple(annoValues))
else:
# new annotation
values_insert.append(tuple(annoValues))
viewcountValues.append((username, imageKey, 1, lastChecked, lastTimeRequired, meta))
schema = self.config.getProperty('Database', 'schema')
# delete all annotations that are not in submitted batch
imageKeys = list(UUID(k) for k in submissions['entries'])
if len(imageKeys):
if len(ids):
sql = '''
DELETE FROM {schema}.annotation WHERE username = %s AND id IN (
SELECT idQuery.id FROM (
SELECT * FROM {schema}.annotation WHERE id NOT IN %s
) AS idQuery
JOIN (
SELECT * FROM {schema}.annotation WHERE image IN %s
) AS imageQuery ON idQuery.id = imageQuery.id);
'''.format(schema=schema)
self.dbConnector.execute(sql, (username, tuple(ids), tuple(imageKeys),))
else:
# no annotations submitted; delete all annotations submitted before
sql = '''
DELETE FROM {schema}.annotation WHERE username = %s AND image IN %s;
'''.format(schema=schema)
self.dbConnector.execute(sql, (username, tuple(imageKeys),))
# insert new annotations
if len(values_insert):
sql = '''
INSERT INTO {}.annotation ({})
VALUES %s ;
'''.format(
schema,
', '.join(colnames[1:]) # skip 'id' column
)
self.dbConnector.insert(sql, values_insert)
# update existing annotations
if len(values_update):
updateCols = ''
for col in colnames:
if col == 'label':
updateCols += '{col} = UUID(e.{col}),'.format(col=col)
elif col == 'timeRequired':
# we sum the required times together
updateCols += '{col} = COALESCE(a.{col},0) + COALESCE(e.{col},0),'.format(col=col)
else:
updateCols += '{col} = e.{col},'.format(col=col)
sql = '''
UPDATE {schema}.annotation AS a
SET {updateCols}
FROM (VALUES %s) AS e({colnames})
WHERE e.id = a.id;
'''.format(
schema=schema,
updateCols=updateCols.strip(','),
colnames=', '.join(colnames)
)
self.dbConnector.insert(sql, values_update)
# viewcount table
sql = '''
INSERT INTO {}.image_user (username, image, viewcount, last_checked, last_time_required, meta)
VALUES %s
ON CONFLICT (username, image) DO UPDATE SET viewcount = image_user.viewcount + 1, last_checked = EXCLUDED.last_checked, last_time_required = EXCLUDED.last_time_required, meta = EXCLUDED.meta;
'''.format(schema)
self.dbConnector.insert(sql, viewcountValues)
return 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from PyQt5.QtWidgets import QLabel
from PyQt5.QtGui import QPainter, QFontMetrics
from PyQt5.QtCore import Qt
class ElidedLabel(QLabel):
def paintEvent(self, event):
painter = QPainter(self)
metrics = QFontMetrics(self.font())
elided_text = metrics.elidedText(self.text(), Qt.ElideRight, self.width())
painter.drawText(self.rect(), self.alignment(), elided_text)
if __name__ == '__main__':
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QFont
app = QApplication([])
mw = ElidedLabel('abc123' * 100)
mw.setFont(QFont('Arial', 20))
mw.resize(100, 100)
mw.show()
app.exec()
|
from .dressing import DressingEnv
from .agents import pr2, tiago_dualhand, baxter, sawyer, jaco, stretch, panda, human
from .agents.pr2 import PR2
from .agents.tiago_dualhand import tiago_dualhand
from .agents.baxter import Baxter
from .agents.sawyer import Sawyer
from .agents.jaco import Jaco
from .agents.stretch import Stretch
from .agents.panda import Panda
from .agents.human import Human
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.tune.registry import register_env
robot_arm = 'left'
human_controllable_joint_indices = human.left_arm_joints
class DressingPR2Env(DressingEnv):
def __init__(self):
super(DressingPR2Env, self).__init__(robot=PR2(robot_arm), human=Human(human_controllable_joint_indices, controllable=False))
class DressingTiagoDualhandEnv(DressingEnv):
def __init__(self):
super(DressingTiagoDualhandEnv, self).__init__(robot=tiago_dualhand(robot_arm), human=Human(human_controllable_joint_indices, controllable=False))
class DressingBaxterEnv(DressingEnv):
def __init__(self):
super(DressingBaxterEnv, self).__init__(robot=Baxter(robot_arm), human=Human(human_controllable_joint_indices, controllable=False))
class DressingSawyerEnv(DressingEnv):
def __init__(self):
super(DressingSawyerEnv, self).__init__(robot=Sawyer(robot_arm), human=Human(human_controllable_joint_indices, controllable=False))
class DressingJacoEnv(DressingEnv):
def __init__(self):
super(DressingJacoEnv, self).__init__(robot=Jaco(robot_arm), human=Human(human_controllable_joint_indices, controllable=False))
class DressingStretchEnv(DressingEnv):
def __init__(self):
super(DressingStretchEnv, self).__init__(robot=Stretch('wheel_'+robot_arm), human=Human(human_controllable_joint_indices, controllable=False))
class DressingPandaEnv(DressingEnv):
def __init__(self):
super(DressingPandaEnv, self).__init__(robot=Panda(robot_arm), human=Human(human_controllable_joint_indices, controllable=False))
class DressingPR2HumanEnv(DressingEnv, MultiAgentEnv):
def __init__(self):
super(DressingPR2HumanEnv, self).__init__(robot=PR2(robot_arm), human=Human(human_controllable_joint_indices, controllable=True))
register_env('assistive_gym:DressingPR2Human-v1', lambda config: DressingPR2HumanEnv())
class DressingTiagoDualhandHumanEnv(DressingEnv, MultiAgentEnv):
def __init__(self):
super(DressingTiagoDualhandHumanEnv, self).__init__(robot=tiago_dualhand(robot_arm), human=Human(human_controllable_joint_indices, controllable=True))
register_env('assistive_gym:DressingTiagoDualhandHuman-v1', lambda config: DressingTiagoDualhandHumanEnv())
class DressingBaxterHumanEnv(DressingEnv, MultiAgentEnv):
def __init__(self):
super(DressingBaxterHumanEnv, self).__init__(robot=Baxter(robot_arm), human=Human(human_controllable_joint_indices, controllable=True))
register_env('assistive_gym:DressingBaxterHuman-v1', lambda config: DressingBaxterHumanEnv())
class DressingSawyerHumanEnv(DressingEnv, MultiAgentEnv):
def __init__(self):
super(DressingSawyerHumanEnv, self).__init__(robot=Sawyer(robot_arm), human=Human(human_controllable_joint_indices, controllable=True))
register_env('assistive_gym:DressingSawyerHuman-v1', lambda config: DressingSawyerHumanEnv())
class DressingJacoHumanEnv(DressingEnv, MultiAgentEnv):
def __init__(self):
super(DressingJacoHumanEnv, self).__init__(robot=Jaco(robot_arm), human=Human(human_controllable_joint_indices, controllable=True))
register_env('assistive_gym:DressingJacoHuman-v1', lambda config: DressingJacoHumanEnv())
class DressingStretchHumanEnv(DressingEnv, MultiAgentEnv):
def __init__(self):
super(DressingStretchHumanEnv, self).__init__(robot=Stretch('wheel_'+robot_arm), human=Human(human_controllable_joint_indices, controllable=True))
register_env('assistive_gym:DressingStretchHuman-v1', lambda config: DressingStretchHumanEnv())
class DressingPandaHumanEnv(DressingEnv, MultiAgentEnv):
def __init__(self):
super(DressingPandaHumanEnv, self).__init__(robot=Panda(robot_arm), human=Human(human_controllable_joint_indices, controllable=True))
register_env('assistive_gym:DressingPandaHuman-v1', lambda config: DressingPandaHumanEnv())
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "nagracks"
__date__ = "18-07-2016"
__license__ = "MIT"
__copyright__ = "Copyright © 2016 nagracks"
import argparse
import json
import os
import sys
import praw
import requests
import tqdm
from bs4 import BeautifulSoup
def get_top_submissions(subreddit, limit, period):
"""Get top submissions from selection time period with limit
Parameters
----------
subreddit : str
Name of the subreddit
limit : int
Max limit of getting submissions
period : str
A single character. Possible values are `[h]our`, `[d]ay`,
`[w]eek`, `[m]onth`, `[y]ear` and `[a]ll`.
Yields
-------
str
Subreddit submissions
Examples
--------
::
for submission in get_top_submissions('getmotivated', 15, 'h'):
# This is submission title
print(submission)
# This is submission url
print(submission.url)
"""
r = praw.Reddit(user_agent='nagracks')
all_submissions = r.get_subreddit(subreddit, fetch=True)
timeframe = {
'h': all_submissions.get_top_from_hour,
'd': all_submissions.get_top_from_day,
'w': all_submissions.get_top_from_week,
'm': all_submissions.get_top_from_month,
'y': all_submissions.get_top_from_year,
'a': all_submissions.get_top_from_all
}
return timeframe.get(period)(limit=limit)
def image_urls(submissions):
"""Provides downloadable image urls. This works like this, if url
simply ends with image extensions then it will yield it otherwise it
will go on with other conditions. Other conditions are if url
contain ``imgur`` and ``/a`` or ``/gallery/`` then it will yield
urls with the help of ``BeautifulSoup``. And at the last if none of
these two methods works it will try to get downloadable image url by
making raw url by adding `.jpg` at the end of url and then checks
its headers' content-type if it is compatible image then generate
that url.
Parameters
----------
submissions : generator
Subreddit submissions
Yields
-------
str
Downloadable image urls
Examples
--------
::
submissions = get_top_submissions(
subreddit, args.limit, args.period
)
for image_url in image_urls(submissions):
# Downloadble image url
print(image_url)
"""
for submission in submissions:
url = submission.url
img_ext = ('jpg', 'jpeg', 'png', 'gif')
if url.endswith(img_ext):
yield url
elif 'imgur' in url and ('/a/' in url or '/gallery/' in url):
r = requests.get(url).text
soup_ob = BeautifulSoup(r, 'html.parser')
for link in soup_ob.find_all('div', {'class': 'post-image'}):
try:
partial_url = link.img.get('src')
# img_link comes as //imgur.com/id make it
# https://imgur.com/id
url = 'https:' + partial_url
yield url
except:
pass
else:
raw_url = url + '.jpg'
try:
r = requests.get(raw_url)
r.raise_for_status()
extension = r.headers['content-type'].split('/')[-1]
except Exception as e:
extension = ''
if extension in img_ext:
link = '{url}.{ext}'.format(url=url, ext=extension)
yield link
def download_images(url, subreddit_name, destination):
"""Download images
Parameters
----------
url : str
Image url to download
subreddit_name : str
Subreddit name
destination : str
Destination path to save image
Returns
-------
Write data to file
"""
# `?` and `&` should not present in filename so replace them with
# `X`
table = str.maketrans('?&', 'XX')
# Splits url to get last 10 `characters` from `in-url filename`
# This helps to make random filename by joining `subreddit` name and
# `in-url` filename characters
url_chars = (url.split('/')[-1][-10:]).translate(table)
# Make filename
filename = "{name}_{chars}".format(name=subreddit_name, chars=url_chars)
# Make full path to save destination of images
if destination:
path = os.path.expanduser(destination)
else:
path = os.path.expanduser('~')
path = os.path.join(path, 'reddit_pics')
os.makedirs(path, exist_ok=True)
file_path = os.path.join(path, filename)
if os.path.exists(file_path):
print("{} is already downloaded".format(filename))
else:
print("Download to {}".format(file_path))
r = requests.get(url, stream=True)
with open(file_path, 'wb') as outfile:
for chunk in (
tqdm.tqdm(r.iter_content(chunk_size=1024),
total=(int(r.headers.get('content-length', 0)) // 1024),
unit='KB')
):
if chunk:
outfile.write(chunk)
else:
return
class ArgumentConfig(object):
"""ArgumentConfig class
Parameters
----------
parser: argparse.ArgumentParser
All the arguments
Methods
-------
parse_args
"""
def __init__(self, parser: argparse.ArgumentParser):
self.parser = parser
self.parser.add_argument(
'--config', '-c',
nargs='?',
const='stdout',
help= """
Read arguments from config file. The config can be
generated with --write-config option
"""
)
self.parser.add_argument(
'--write-config', '-wc',
nargs='?',
metavar='FILENAME',
const='stdout',
help= """
Print script options to screen in JSON format and if
FILENAME is specified then write to that file
"""
)
def parse_args(self, *args, **kwargs):
"""Parse commandline args args
Parameters
----------
*args
**kwargs
Returns
-------
Argparse namespace
"""
# Parse an empty list to get the defaults
defaults = vars(self.parser.parse_args([]))
passed_args = vars(self.parser.parse_args(*args, **kwargs))
# Only keep the args that aren't the default
passed_args = {
key: value for (key, value) in passed_args.items()
if (key in defaults and defaults[key] != value)
}
config_path = passed_args.pop('config', None)
if config_path:
with open(config_path, 'r') as config_file:
config_args = json.load(config_file)
else:
config_args = dict()
# Override defaults with config with passed args
options = {**defaults, **config_args, **passed_args}
# Remove the config options from options. They're not needed any
# more and we don't want them serialized
options.pop('config', None)
options.pop('write_config', None)
# Print the options (to file) if needed
config_dst = passed_args.pop('write_config', None)
if config_dst:
print(json.dumps(options, sort_keys=True, indent=4))
if config_dst != 'stdout':
with open(config_dst, 'w', encoding='utf-8') as config_file:
print(json.dumps(options, sort_keys=True, indent=4), file=config_file)
print("Current options saved to: {}".format(config_dst))
sys.exit(0)
return argparse.Namespace(**options)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'-s', '--subreddit',
default=['earthporn', 'cityporn'],
nargs='+',
help="""
Name of the subreddits. Provide as many subreddits you
want to download images from.
"""
)
parser.add_argument(
'-p', '--period',
default='w',
choices=['h', 'd', 'w', 'm', 'y', 'a'],
help="""
[h]our, [d]ay, [w]eek, [m]onth, [y]ear, or [a]ll. Period
of time from which you want images. Default to
'get_top_from_[w]eek'
"""
)
parser.add_argument(
'-l', '--limit',
metavar='N',
type=int,
default=15,
help="""
Maximum URL limit per subreddit. Defaults to 15
"""
)
parser.add_argument(
'--destination', '-d',
help="""
Destination path. By default it saves to $HOME/reddit_pics
"""
)
argconfig = ArgumentConfig(parser)
args = vars(argconfig.parse_args())
# Handle control+c nicely
import signal
def exit_(signum, frame):
os.sys.exit(1)
signal.signal(signal.SIGINT, exit_)
# Download images
for subreddit in args['subreddit']:
submissions = get_top_submissions(
subreddit, args['limit'], args['period']
)
for image_url in image_urls(submissions):
download_images(image_url, subreddit, args['destination'])
|
'''
Created on 7 juin 2016
@author: saldenisov
'''
'''
Created on 7 oct. 2015
@author: saldenisov
'''
from configobj import ConfigObj
from validate import Validator
class Configuration(object):
"""
Creates main configuration
"""
def __init__(self, path='C:\\Users\\saldenisov\\Dropbox\\Python\\QY\\Settings\\'):
__configspec = ConfigObj(path + 'configspec.ini', encoding='UTF8',
list_values=False, _inspec=True)
self.__config = ConfigObj(
path + 'config_main.ini', configspec=__configspec)
val = Validator()
if self.config.validate(val) == False:
raise ValueError(
'Configuration file is corrupted. Check in settings config_main.ini and configspec.ini.')
@property
def config(self):
return self.__config
|
import requests
import xmltodict
def buscar_cds():
response = requests.get('http://www.w3schools.com/xml/cd_catalog.xml')
xml = response.text
dct = xmltodict.parse(xml)
cds = dct['CATALOG']['CD']
return [c['TITLE'] for c in cds]
if __name__ == '__main__':
print(buscar_cds())
|
descriptor=[
{ "apis": ["GL", "GLES", "GLES2", "GL_CORE"], "params": [
[ "ALPHA_BITS", "BUFFER_INT(Visual.alphaBits), extra_new_buffers" ],
[ "BLEND", "CONTEXT_BIT0(Color.BlendEnabled), NO_EXTRA" ],
[ "BLEND_SRC", "CONTEXT_ENUM(Color.Blend[0].SrcRGB), NO_EXTRA" ],
[ "BLUE_BITS", "BUFFER_INT(Visual.blueBits), extra_new_buffers" ],
[ "COLOR_CLEAR_VALUE", "LOC_CUSTOM, TYPE_FLOATN_4, 0, extra_new_frag_clamp" ],
[ "COLOR_WRITEMASK", "LOC_CUSTOM, TYPE_INT_4, 0, NO_EXTRA" ],
[ "CULL_FACE", "CONTEXT_BOOL(Polygon.CullFlag), NO_EXTRA" ],
[ "CULL_FACE_MODE", "CONTEXT_ENUM(Polygon.CullFaceMode), NO_EXTRA" ],
[ "DEPTH_BITS", "BUFFER_INT(Visual.depthBits), extra_new_buffers" ],
[ "DEPTH_CLEAR_VALUE", "CONTEXT_FIELD(Depth.Clear, TYPE_DOUBLEN), NO_EXTRA" ],
[ "DEPTH_FUNC", "CONTEXT_ENUM(Depth.Func), NO_EXTRA" ],
[ "DEPTH_RANGE", "LOC_CUSTOM, TYPE_DOUBLEN_2, 0, NO_EXTRA" ],
[ "DEPTH_TEST", "CONTEXT_BOOL(Depth.Test), NO_EXTRA" ],
[ "DEPTH_WRITEMASK", "CONTEXT_BOOL(Depth.Mask), NO_EXTRA" ],
[ "DITHER", "CONTEXT_BOOL(Color.DitherFlag), NO_EXTRA" ],
[ "FRONT_FACE", "CONTEXT_ENUM(Polygon.FrontFace), NO_EXTRA" ],
[ "GREEN_BITS", "BUFFER_INT(Visual.greenBits), extra_new_buffers" ],
[ "LINE_WIDTH", "CONTEXT_FLOAT(Line.Width), NO_EXTRA" ],
[ "ALIASED_LINE_WIDTH_RANGE", "CONTEXT_FLOAT2(Const.MinLineWidth), NO_EXTRA" ],
[ "MAX_ELEMENTS_VERTICES", "CONTEXT_INT(Const.MaxArrayLockSize), NO_EXTRA" ],
[ "MAX_ELEMENTS_INDICES", "CONTEXT_INT(Const.MaxArrayLockSize), NO_EXTRA" ],
[ "MAX_TEXTURE_SIZE", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_context, Const.MaxTextureLevels), NO_EXTRA" ],
[ "MAX_VIEWPORT_DIMS", "CONTEXT_INT2(Const.MaxViewportWidth), NO_EXTRA" ],
[ "PACK_ALIGNMENT", "CONTEXT_INT(Pack.Alignment), NO_EXTRA" ],
[ "ALIASED_POINT_SIZE_RANGE", "CONTEXT_FLOAT2(Const.MinPointSize), NO_EXTRA" ],
[ "POLYGON_OFFSET_FACTOR", "CONTEXT_FLOAT(Polygon.OffsetFactor ), NO_EXTRA" ],
[ "POLYGON_OFFSET_UNITS", "CONTEXT_FLOAT(Polygon.OffsetUnits ), NO_EXTRA" ],
[ "POLYGON_OFFSET_FILL", "CONTEXT_BOOL(Polygon.OffsetFill), NO_EXTRA" ],
[ "RED_BITS", "BUFFER_INT(Visual.redBits), extra_new_buffers" ],
[ "SCISSOR_BOX", "LOC_CUSTOM, TYPE_INT_4, 0, NO_EXTRA" ],
[ "SCISSOR_TEST", "LOC_CUSTOM, TYPE_BOOLEAN, NO_OFFSET, NO_EXTRA" ],
[ "STENCIL_BITS", "BUFFER_INT(Visual.stencilBits), extra_new_buffers" ],
[ "STENCIL_CLEAR_VALUE", "CONTEXT_INT(Stencil.Clear), NO_EXTRA" ],
[ "STENCIL_FAIL", "LOC_CUSTOM, TYPE_ENUM, NO_OFFSET, NO_EXTRA" ],
[ "STENCIL_FUNC", "LOC_CUSTOM, TYPE_ENUM, NO_OFFSET, NO_EXTRA" ],
[ "STENCIL_PASS_DEPTH_FAIL", "LOC_CUSTOM, TYPE_ENUM, NO_OFFSET, NO_EXTRA" ],
[ "STENCIL_PASS_DEPTH_PASS", "LOC_CUSTOM, TYPE_ENUM, NO_OFFSET, NO_EXTRA" ],
[ "STENCIL_REF", "LOC_CUSTOM, TYPE_UINT, NO_OFFSET, NO_EXTRA" ],
[ "STENCIL_TEST", "CONTEXT_BOOL(Stencil.Enabled), NO_EXTRA" ],
[ "STENCIL_VALUE_MASK", "LOC_CUSTOM, TYPE_UINT, NO_OFFSET, NO_EXTRA" ],
[ "STENCIL_WRITEMASK", "LOC_CUSTOM, TYPE_UINT, NO_OFFSET, NO_EXTRA" ],
[ "SUBPIXEL_BITS", "CONTEXT_INT(Const.SubPixelBits), NO_EXTRA" ],
[ "TEXTURE_BINDING_2D", "LOC_CUSTOM, TYPE_INT, TEXTURE_2D_INDEX, NO_EXTRA" ],
[ "UNPACK_ALIGNMENT", "CONTEXT_INT(Unpack.Alignment), NO_EXTRA" ],
[ "VIEWPORT", "LOC_CUSTOM, TYPE_FLOAT_4, 0, NO_EXTRA" ],
# GL_ARB_multitexture
[ "ACTIVE_TEXTURE", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
# Note that all the OES_* extensions require that the Mesa "struct
# gl_extensions" include a member with the name of the extension.
# That structure does not yet include OES extensions (and we're
# not sure whether it will). If it does, all the OES_*
# extensions below should mark the dependency.
# GL_ARB_texture_cube_map
[ "TEXTURE_BINDING_CUBE_MAP_ARB", "LOC_CUSTOM, TYPE_INT, TEXTURE_CUBE_INDEX, extra_ARB_texture_cube_map" ],
# XXX: OES_texture_cube_map
[ "MAX_CUBE_MAP_TEXTURE_SIZE_ARB", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_context, Const.MaxCubeTextureLevels), extra_ARB_texture_cube_map" ],
# XXX: OES_blend_subtract
[ "BLEND_SRC_RGB", "CONTEXT_ENUM(Color.Blend[0].SrcRGB), NO_EXTRA" ],
[ "BLEND_DST_RGB", "CONTEXT_ENUM(Color.Blend[0].DstRGB), NO_EXTRA" ],
[ "BLEND_SRC_ALPHA", "CONTEXT_ENUM(Color.Blend[0].SrcA), NO_EXTRA" ],
[ "BLEND_DST_ALPHA", "CONTEXT_ENUM(Color.Blend[0].DstA), NO_EXTRA" ],
# GL_BLEND_EQUATION_RGB, which is what we're really after, is
# defined identically to GL_BLEND_EQUATION.
[ "BLEND_EQUATION", "CONTEXT_ENUM(Color.Blend[0].EquationRGB), NO_EXTRA" ],
[ "BLEND_EQUATION_ALPHA_EXT", "CONTEXT_ENUM(Color.Blend[0].EquationA), NO_EXTRA" ],
# GL_ARB_texture_compression
[ "NUM_COMPRESSED_TEXTURE_FORMATS_ARB", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
[ "COMPRESSED_TEXTURE_FORMATS", "LOC_CUSTOM, TYPE_INT_N, 0, NO_EXTRA" ],
# GL_ARB_multisample
[ "SAMPLE_ALPHA_TO_COVERAGE_ARB", "CONTEXT_BOOL(Multisample.SampleAlphaToCoverage), NO_EXTRA" ],
[ "SAMPLE_COVERAGE_ARB", "CONTEXT_BOOL(Multisample.SampleCoverage), NO_EXTRA" ],
[ "SAMPLE_COVERAGE_VALUE_ARB", "CONTEXT_FLOAT(Multisample.SampleCoverageValue), NO_EXTRA" ],
[ "SAMPLE_COVERAGE_INVERT_ARB", "CONTEXT_BOOL(Multisample.SampleCoverageInvert), NO_EXTRA" ],
[ "SAMPLE_BUFFERS_ARB", "LOC_CUSTOM, TYPE_INT, 0, extra_new_buffers" ],
[ "SAMPLES_ARB", "LOC_CUSTOM, TYPE_INT, 0, extra_new_buffers" ],
# GL_ARB_sample_shading
[ "SAMPLE_SHADING_ARB", "CONTEXT_BOOL(Multisample.SampleShading), extra_gl40_ARB_sample_shading" ],
[ "MIN_SAMPLE_SHADING_VALUE_ARB", "CONTEXT_FLOAT(Multisample.MinSampleShadingValue), extra_gl40_ARB_sample_shading" ],
# GL_SGIS_generate_mipmap
[ "GENERATE_MIPMAP_HINT_SGIS", "CONTEXT_ENUM(Hint.GenerateMipmap), NO_EXTRA" ],
# GL_ARB_vertex_buffer_object
[ "ARRAY_BUFFER_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
# GL_ARB_vertex_buffer_object
# GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB - not supported
[ "ELEMENT_ARRAY_BUFFER_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
# GL_ARB_color_buffer_float
[ "CLAMP_VERTEX_COLOR", "CONTEXT_ENUM(Light.ClampVertexColor), extra_ARB_color_buffer_float" ],
[ "CLAMP_FRAGMENT_COLOR", "CONTEXT_ENUM(Color.ClampFragmentColor), extra_ARB_color_buffer_float" ],
[ "CLAMP_READ_COLOR", "CONTEXT_ENUM(Color.ClampReadColor), extra_ARB_color_buffer_float_or_glcore" ],
# GL_ARB_copy_buffer
[ "COPY_READ_BUFFER", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
[ "COPY_WRITE_BUFFER", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
# GL_OES_read_format
[ "IMPLEMENTATION_COLOR_READ_TYPE_OES", "LOC_CUSTOM, TYPE_INT, 0, extra_new_buffers" ],
[ "IMPLEMENTATION_COLOR_READ_FORMAT_OES", "LOC_CUSTOM, TYPE_INT, 0, extra_new_buffers" ],
# GL_EXT_framebuffer_object
[ "FRAMEBUFFER_BINDING_EXT", "BUFFER_INT(Name), NO_EXTRA" ],
[ "RENDERBUFFER_BINDING_EXT", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
[ "MAX_RENDERBUFFER_SIZE_EXT", "CONTEXT_INT(Const.MaxRenderbufferSize), NO_EXTRA" ],
# This entry isn't spec'ed for GLES 2, but is needed for Mesa's
# GLSL:
[ "MAX_CLIP_PLANES", "CONTEXT_INT(Const.MaxClipPlanes), NO_EXTRA" ],
# GL_{ARB,OES}_vertex_array_object
[ "VERTEX_ARRAY_BINDING", "ARRAY_INT(Name), NO_EXTRA" ],
# GL_EXT_texture_filter_anisotropic
[ "MAX_TEXTURE_MAX_ANISOTROPY_EXT", "CONTEXT_FLOAT(Const.MaxTextureMaxAnisotropy), extra_EXT_texture_filter_anisotropic" ],
# GL_KHR_debug (GL 4.3)/ GL_ARB_debug_output
[ "DEBUG_OUTPUT", "LOC_CUSTOM, TYPE_BOOLEAN, 0, NO_EXTRA" ],
[ "DEBUG_OUTPUT_SYNCHRONOUS", "LOC_CUSTOM, TYPE_BOOLEAN, 0, NO_EXTRA" ],
[ "DEBUG_LOGGED_MESSAGES", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
[ "DEBUG_NEXT_LOGGED_MESSAGE_LENGTH", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
[ "MAX_DEBUG_LOGGED_MESSAGES", "CONST(MAX_DEBUG_LOGGED_MESSAGES), NO_EXTRA" ],
[ "MAX_DEBUG_MESSAGE_LENGTH", "CONST(MAX_DEBUG_MESSAGE_LENGTH), NO_EXTRA" ],
[ "MAX_LABEL_LENGTH", "CONST(MAX_LABEL_LENGTH), NO_EXTRA" ],
[ "MAX_DEBUG_GROUP_STACK_DEPTH", "CONST(MAX_DEBUG_GROUP_STACK_DEPTH), NO_EXTRA" ],
[ "DEBUG_GROUP_STACK_DEPTH", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
# GL_ARB_polygon_offset_clamp / GL_EXT_polygon_offset_clamp
[ "POLYGON_OFFSET_CLAMP_EXT", "CONTEXT_FLOAT(Polygon.OffsetClamp), extra_ARB_polygon_offset_clamp" ],
]},
# Enums in OpenGL and GLES1
{ "apis": ["GL", "GLES", "GL_CORE"], "params": [
[ "MAX_LIGHTS", "CONTEXT_INT(Const.MaxLights), NO_EXTRA" ],
[ "LIGHT0", "CONTEXT_BOOL(Light.Light[0].Enabled), NO_EXTRA" ],
[ "LIGHT1", "CONTEXT_BOOL(Light.Light[1].Enabled), NO_EXTRA" ],
[ "LIGHT2", "CONTEXT_BOOL(Light.Light[2].Enabled), NO_EXTRA" ],
[ "LIGHT3", "CONTEXT_BOOL(Light.Light[3].Enabled), NO_EXTRA" ],
[ "LIGHT4", "CONTEXT_BOOL(Light.Light[4].Enabled), NO_EXTRA" ],
[ "LIGHT5", "CONTEXT_BOOL(Light.Light[5].Enabled), NO_EXTRA" ],
[ "LIGHT6", "CONTEXT_BOOL(Light.Light[6].Enabled), NO_EXTRA" ],
[ "LIGHT7", "CONTEXT_BOOL(Light.Light[7].Enabled), NO_EXTRA" ],
[ "LIGHTING", "CONTEXT_BOOL(Light.Enabled), NO_EXTRA" ],
[ "LIGHT_MODEL_AMBIENT", "CONTEXT_FIELD(Light.Model.Ambient[0], TYPE_FLOATN_4), NO_EXTRA" ],
[ "LIGHT_MODEL_TWO_SIDE", "CONTEXT_BOOL(Light.Model.TwoSide), NO_EXTRA" ],
[ "ALPHA_TEST", "CONTEXT_BOOL(Color.AlphaEnabled), NO_EXTRA" ],
[ "ALPHA_TEST_FUNC", "CONTEXT_ENUM(Color.AlphaFunc), NO_EXTRA" ],
[ "ALPHA_TEST_REF", "LOC_CUSTOM, TYPE_FLOATN, 0, extra_new_frag_clamp" ],
[ "BLEND_DST", "CONTEXT_ENUM(Color.Blend[0].DstRGB), NO_EXTRA" ],
[ "CLIP_DISTANCE0", "CONTEXT_BIT0(Transform.ClipPlanesEnabled), extra_valid_clip_distance" ],
[ "CLIP_DISTANCE1", "CONTEXT_BIT1(Transform.ClipPlanesEnabled), extra_valid_clip_distance" ],
[ "CLIP_DISTANCE2", "CONTEXT_BIT2(Transform.ClipPlanesEnabled), extra_valid_clip_distance" ],
[ "CLIP_DISTANCE3", "CONTEXT_BIT3(Transform.ClipPlanesEnabled), extra_valid_clip_distance" ],
[ "CLIP_DISTANCE4", "CONTEXT_BIT4(Transform.ClipPlanesEnabled), extra_valid_clip_distance" ],
[ "CLIP_DISTANCE5", "CONTEXT_BIT5(Transform.ClipPlanesEnabled), extra_valid_clip_distance" ],
[ "CLIP_DISTANCE6", "CONTEXT_BIT6(Transform.ClipPlanesEnabled), extra_valid_clip_distance" ],
[ "CLIP_DISTANCE7", "CONTEXT_BIT7(Transform.ClipPlanesEnabled), extra_valid_clip_distance" ],
[ "COLOR_MATERIAL", "CONTEXT_BOOL(Light.ColorMaterialEnabled), NO_EXTRA" ],
[ "CURRENT_COLOR", "CONTEXT_FIELD(Current.Attrib[VERT_ATTRIB_COLOR0][0], TYPE_FLOATN_4), extra_flush_current" ],
[ "CURRENT_NORMAL", "CONTEXT_FIELD(Current.Attrib[VERT_ATTRIB_NORMAL][0], TYPE_FLOATN_3), extra_flush_current" ],
[ "CURRENT_TEXTURE_COORDS", "LOC_CUSTOM, TYPE_FLOAT_4, 0, extra_flush_current_valid_texture_unit" ],
[ "POINT_DISTANCE_ATTENUATION", "CONTEXT_FLOAT3(Point.Params[0]), NO_EXTRA" ],
[ "FOG", "CONTEXT_BOOL(Fog.Enabled), NO_EXTRA" ],
[ "FOG_COLOR", "LOC_CUSTOM, TYPE_FLOATN_4, 0, extra_new_frag_clamp" ],
[ "FOG_DENSITY", "CONTEXT_FLOAT(Fog.Density), NO_EXTRA" ],
[ "FOG_END", "CONTEXT_FLOAT(Fog.End), NO_EXTRA" ],
[ "FOG_HINT", "CONTEXT_ENUM(Hint.Fog), NO_EXTRA" ],
[ "FOG_MODE", "CONTEXT_ENUM(Fog.Mode), NO_EXTRA" ],
[ "FOG_START", "CONTEXT_FLOAT(Fog.Start), NO_EXTRA" ],
[ "LINE_SMOOTH", "CONTEXT_BOOL(Line.SmoothFlag), NO_EXTRA" ],
[ "LINE_SMOOTH_HINT", "CONTEXT_ENUM(Hint.LineSmooth), NO_EXTRA" ],
[ "LINE_WIDTH_RANGE", "CONTEXT_FLOAT2(Const.MinLineWidthAA), NO_EXTRA" ],
[ "COLOR_LOGIC_OP", "CONTEXT_BOOL(Color.ColorLogicOpEnabled), NO_EXTRA" ],
[ "LOGIC_OP_MODE", "CONTEXT_ENUM(Color.LogicOp), NO_EXTRA" ],
[ "MATRIX_MODE", "CONTEXT_ENUM(Transform.MatrixMode), NO_EXTRA" ],
[ "MAX_MODELVIEW_STACK_DEPTH", "CONST(MAX_MODELVIEW_STACK_DEPTH), NO_EXTRA" ],
[ "MAX_PROJECTION_STACK_DEPTH", "CONST(MAX_PROJECTION_STACK_DEPTH), NO_EXTRA" ],
[ "MAX_TEXTURE_STACK_DEPTH", "CONST(MAX_TEXTURE_STACK_DEPTH), NO_EXTRA" ],
[ "MODELVIEW_MATRIX", "CONTEXT_MATRIX(ModelviewMatrixStack.Top), NO_EXTRA" ],
[ "MODELVIEW_STACK_DEPTH", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_context, ModelviewMatrixStack.Depth), NO_EXTRA" ],
[ "NORMALIZE", "CONTEXT_BOOL(Transform.Normalize), NO_EXTRA" ],
[ "PACK_SKIP_IMAGES", "CONTEXT_INT(Pack.SkipImages), NO_EXTRA" ],
[ "PERSPECTIVE_CORRECTION_HINT", "CONTEXT_ENUM(Hint.PerspectiveCorrection), NO_EXTRA" ],
[ "POINT_SIZE", "CONTEXT_FLOAT(Point.Size), NO_EXTRA" ],
[ "POINT_SIZE_RANGE", "CONTEXT_FLOAT2(Const.MinPointSizeAA), NO_EXTRA" ],
[ "POINT_SMOOTH", "CONTEXT_BOOL(Point.SmoothFlag), NO_EXTRA" ],
[ "POINT_SMOOTH_HINT", "CONTEXT_ENUM(Hint.PointSmooth), NO_EXTRA" ],
[ "POINT_SIZE_MIN_EXT", "CONTEXT_FLOAT(Point.MinSize), NO_EXTRA" ],
[ "POINT_SIZE_MAX_EXT", "CONTEXT_FLOAT(Point.MaxSize), NO_EXTRA" ],
[ "POINT_FADE_THRESHOLD_SIZE_EXT", "CONTEXT_FLOAT(Point.Threshold), NO_EXTRA" ],
[ "PROJECTION_MATRIX", "CONTEXT_MATRIX(ProjectionMatrixStack.Top), NO_EXTRA" ],
[ "PROJECTION_STACK_DEPTH", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_context, ProjectionMatrixStack.Depth), NO_EXTRA" ],
[ "RESCALE_NORMAL", "CONTEXT_BOOL(Transform.RescaleNormals), NO_EXTRA" ],
[ "SHADE_MODEL", "CONTEXT_ENUM(Light.ShadeModel), NO_EXTRA" ],
[ "TEXTURE_2D", "LOC_CUSTOM, TYPE_BOOLEAN, 0, NO_EXTRA" ],
[ "TEXTURE_MATRIX", "LOC_CUSTOM, TYPE_MATRIX, 0, extra_valid_texture_unit" ],
[ "TEXTURE_STACK_DEPTH", "LOC_CUSTOM, TYPE_INT, 0, extra_valid_texture_unit" ],
[ "VERTEX_ARRAY", "ARRAY_BOOL(VertexAttrib[VERT_ATTRIB_POS].Enabled), NO_EXTRA" ],
[ "VERTEX_ARRAY_SIZE", "ARRAY_INT(VertexAttrib[VERT_ATTRIB_POS].Size), NO_EXTRA" ],
[ "VERTEX_ARRAY_TYPE", "ARRAY_ENUM(VertexAttrib[VERT_ATTRIB_POS].Type), NO_EXTRA" ],
[ "VERTEX_ARRAY_STRIDE", "ARRAY_INT(VertexAttrib[VERT_ATTRIB_POS].Stride), NO_EXTRA" ],
[ "NORMAL_ARRAY", "ARRAY_BOOL(VertexAttrib[VERT_ATTRIB_NORMAL].Enabled), NO_EXTRA" ],
[ "NORMAL_ARRAY_TYPE", "ARRAY_ENUM(VertexAttrib[VERT_ATTRIB_NORMAL].Type), NO_EXTRA" ],
[ "NORMAL_ARRAY_STRIDE", "ARRAY_INT(VertexAttrib[VERT_ATTRIB_NORMAL].Stride), NO_EXTRA" ],
[ "COLOR_ARRAY", "ARRAY_BOOL(VertexAttrib[VERT_ATTRIB_COLOR0].Enabled), NO_EXTRA" ],
[ "COLOR_ARRAY_SIZE", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
[ "COLOR_ARRAY_TYPE", "ARRAY_ENUM(VertexAttrib[VERT_ATTRIB_COLOR0].Type), NO_EXTRA" ],
[ "COLOR_ARRAY_STRIDE", "ARRAY_INT(VertexAttrib[VERT_ATTRIB_COLOR0].Stride), NO_EXTRA" ],
[ "TEXTURE_COORD_ARRAY", "LOC_CUSTOM, TYPE_BOOLEAN, offsetof(struct gl_array_attributes, Enabled), NO_EXTRA" ],
[ "TEXTURE_COORD_ARRAY_SIZE", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_array_attributes, Size), NO_EXTRA" ],
[ "TEXTURE_COORD_ARRAY_TYPE", "LOC_CUSTOM, TYPE_ENUM, offsetof(struct gl_array_attributes, Type), NO_EXTRA" ],
[ "TEXTURE_COORD_ARRAY_STRIDE", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_array_attributes, Stride), NO_EXTRA" ],
# GL_ARB_multitexture
[ "MAX_TEXTURE_UNITS", "CONTEXT_INT(Const.MaxTextureUnits), NO_EXTRA" ],
[ "CLIENT_ACTIVE_TEXTURE", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
# GL_ARB_texture_cube_map
[ "TEXTURE_CUBE_MAP_ARB", "LOC_CUSTOM, TYPE_BOOLEAN, 0, NO_EXTRA" ],
# S, T, and R are always set at the same time
[ "TEXTURE_GEN_STR_OES", "LOC_TEXUNIT, TYPE_BIT_0, offsetof(struct gl_texture_unit, TexGenEnabled), NO_EXTRA" ],
# GL_ARB_multisample
[ "MULTISAMPLE_ARB", "CONTEXT_BOOL(Multisample.Enabled), NO_EXTRA" ],
[ "SAMPLE_ALPHA_TO_ONE_ARB", "CONTEXT_BOOL(Multisample.SampleAlphaToOne), NO_EXTRA" ],
# GL_ARB_vertex_buffer_object
[ "VERTEX_ARRAY_BUFFER_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_vertex_array_object, BufferBinding[VERT_ATTRIB_POS].BufferObj), NO_EXTRA" ],
[ "NORMAL_ARRAY_BUFFER_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_vertex_array_object, BufferBinding[VERT_ATTRIB_NORMAL].BufferObj), NO_EXTRA" ],
[ "COLOR_ARRAY_BUFFER_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_vertex_array_object, BufferBinding[VERT_ATTRIB_COLOR0].BufferObj), NO_EXTRA" ],
[ "TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, NO_OFFSET, NO_EXTRA" ],
# GL_OES_point_sprite
[ "POINT_SPRITE_NV", "CONTEXT_BOOL(Point.PointSprite), extra_NV_point_sprite_ARB_point_sprite" ],
]},
{ "apis": ["GLES"], "params": [
# OES_point_size_array
[ "POINT_SIZE_ARRAY_OES", "ARRAY_FIELD(VertexAttrib[VERT_ATTRIB_POINT_SIZE].Enabled, TYPE_BOOLEAN), NO_EXTRA" ],
[ "POINT_SIZE_ARRAY_TYPE_OES", "ARRAY_FIELD(VertexAttrib[VERT_ATTRIB_POINT_SIZE].Type, TYPE_ENUM), NO_EXTRA" ],
[ "POINT_SIZE_ARRAY_STRIDE_OES", "ARRAY_FIELD(VertexAttrib[VERT_ATTRIB_POINT_SIZE].Stride, TYPE_INT), NO_EXTRA" ],
[ "POINT_SIZE_ARRAY_BUFFER_BINDING_OES", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
]},
# Enums in GLES2, GLES3
{ "apis": ["GLES2", "GLES3"], "params": [
[ "GPU_DISJOINT_EXT", "LOC_CUSTOM, TYPE_INT, 0, extra_EXT_disjoint_timer_query" ],
]},
{ "apis": ["GL", "GL_CORE", "GLES2"], "params": [
# == GL_MAX_TEXTURE_COORDS_NV
[ "MAX_TEXTURE_COORDS_ARB", "CONTEXT_INT(Const.MaxTextureCoordUnits), extra_ARB_fragment_program" ],
[ "PACK_IMAGE_HEIGHT", "CONTEXT_INT(Pack.ImageHeight), NO_EXTRA" ],
[ "PACK_ROW_LENGTH", "CONTEXT_INT(Pack.RowLength), NO_EXTRA" ],
[ "PACK_SKIP_PIXELS", "CONTEXT_INT(Pack.SkipPixels), NO_EXTRA" ],
[ "PACK_SKIP_ROWS", "CONTEXT_INT(Pack.SkipRows), NO_EXTRA" ],
[ "UNPACK_ROW_LENGTH", "CONTEXT_INT(Unpack.RowLength), NO_EXTRA" ],
[ "UNPACK_SKIP_PIXELS", "CONTEXT_INT(Unpack.SkipPixels), NO_EXTRA" ],
[ "UNPACK_SKIP_ROWS", "CONTEXT_INT(Unpack.SkipRows), NO_EXTRA" ],
[ "UNPACK_SKIP_IMAGES", "CONTEXT_INT(Unpack.SkipImages), NO_EXTRA" ],
[ "UNPACK_IMAGE_HEIGHT", "CONTEXT_INT(Unpack.ImageHeight), NO_EXTRA" ],
# GL_ARB_draw_buffers
[ "MAX_DRAW_BUFFERS_ARB", "CONTEXT_INT(Const.MaxDrawBuffers), NO_EXTRA" ],
# GL_EXT_framebuffer_object / GL_NV_fbo_color_attachments
[ "MAX_COLOR_ATTACHMENTS", "CONTEXT_INT(Const.MaxColorAttachments), NO_EXTRA" ],
# GL_ARB_draw_buffers / GL_NV_draw_buffers (for ES 2.0)
[ "DRAW_BUFFER0_ARB", "BUFFER_ENUM(ColorDrawBuffer[0]), NO_EXTRA" ],
[ "DRAW_BUFFER1_ARB", "BUFFER_ENUM(ColorDrawBuffer[1]), extra_valid_draw_buffer" ],
[ "DRAW_BUFFER2_ARB", "BUFFER_ENUM(ColorDrawBuffer[2]), extra_valid_draw_buffer" ],
[ "DRAW_BUFFER3_ARB", "BUFFER_ENUM(ColorDrawBuffer[3]), extra_valid_draw_buffer" ],
[ "DRAW_BUFFER4_ARB", "BUFFER_ENUM(ColorDrawBuffer[4]), extra_valid_draw_buffer" ],
[ "DRAW_BUFFER5_ARB", "BUFFER_ENUM(ColorDrawBuffer[5]), extra_valid_draw_buffer" ],
[ "DRAW_BUFFER6_ARB", "BUFFER_ENUM(ColorDrawBuffer[6]), extra_valid_draw_buffer" ],
[ "DRAW_BUFFER7_ARB", "BUFFER_ENUM(ColorDrawBuffer[7]), extra_valid_draw_buffer" ],
[ "BLEND_COLOR_EXT", "LOC_CUSTOM, TYPE_FLOATN_4, 0, extra_new_frag_clamp" ],
# GL_ARB_fragment_program
# == GL_MAX_TEXTURE_IMAGE_UNITS_NV
[ "MAX_TEXTURE_IMAGE_UNITS_ARB", "CONTEXT_INT(Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits), extra_ARB_fragment_program" ],
[ "MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB", "CONTEXT_INT(Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits), extra_ARB_vertex_shader" ],
[ "MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB", "CONTEXT_INT(Const.MaxCombinedTextureImageUnits), extra_ARB_vertex_shader" ],
# GL_ARB_shader_objects
# Actually, this token isn't part of GL_ARB_shader_objects, but is
# close enough for now.
[ "CURRENT_PROGRAM", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
# OpenGL 2.0
[ "STENCIL_BACK_FUNC", "CONTEXT_ENUM(Stencil.Function[1]), NO_EXTRA" ],
[ "STENCIL_BACK_VALUE_MASK", "CONTEXT_UINT(Stencil.ValueMask[1]), NO_EXTRA" ],
[ "STENCIL_BACK_WRITEMASK", "CONTEXT_UINT(Stencil.WriteMask[1]), NO_EXTRA" ],
[ "STENCIL_BACK_REF", "LOC_CUSTOM, TYPE_UINT, NO_OFFSET, NO_EXTRA" ],
[ "STENCIL_BACK_FAIL", "CONTEXT_ENUM(Stencil.FailFunc[1]), NO_EXTRA" ],
[ "STENCIL_BACK_PASS_DEPTH_FAIL", "CONTEXT_ENUM(Stencil.ZFailFunc[1]), NO_EXTRA" ],
[ "STENCIL_BACK_PASS_DEPTH_PASS", "CONTEXT_ENUM(Stencil.ZPassFunc[1]), NO_EXTRA" ],
[ "MAX_VERTEX_ATTRIBS_ARB", "CONTEXT_INT(Const.Program[MESA_SHADER_VERTEX].MaxAttribs), extra_ARB_vertex_program_api_es2" ],
# OES_texture_3D
[ "TEXTURE_BINDING_3D", "LOC_CUSTOM, TYPE_INT, TEXTURE_3D_INDEX, NO_EXTRA" ],
[ "MAX_3D_TEXTURE_SIZE", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_context, Const.Max3DTextureLevels), NO_EXTRA" ],
# GL_ARB_fragment_program/OES_standard_derivatives
[ "FRAGMENT_SHADER_DERIVATIVE_HINT", "CONTEXT_ENUM(Hint.FragmentShaderDerivative), extra_ARB_fragment_shader" ],
# GL_NV_read_buffer
[ "READ_BUFFER", "LOC_CUSTOM, TYPE_ENUM, NO_OFFSET, extra_NV_read_buffer_api_gl" ],
# GL_ARB_ES2_compatibility
[ "SHADER_COMPILER", "CONST(1), extra_ARB_ES2_compatibility_api_es2" ],
[ "MAX_VARYING_VECTORS", "CONTEXT_INT(Const.MaxVarying), extra_ARB_ES2_compatibility_api_es2" ],
[ "MAX_VERTEX_UNIFORM_VECTORS", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_ES2_compatibility_api_es2" ],
[ "MAX_FRAGMENT_UNIFORM_VECTORS", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_ES2_compatibility_api_es2" ],
[ "NUM_SHADER_BINARY_FORMATS", "CONST(0), extra_ARB_ES2_compatibility_api_es2" ],
[ "SHADER_BINARY_FORMATS", "LOC_CUSTOM, TYPE_INVALID, 0, extra_ARB_ES2_compatibility_api_es2" ],
# GL_ARB_get_program_binary / GL_OES_get_program_binary
[ "NUM_PROGRAM_BINARY_FORMATS", "CONTEXT_UINT(Const.NumProgramBinaryFormats), NO_EXTRA" ],
[ "PROGRAM_BINARY_FORMATS", "LOC_CUSTOM, TYPE_INT_N, 0, NO_EXTRA" ],
# GL_INTEL_performance_query
[ "PERFQUERY_QUERY_NAME_LENGTH_MAX_INTEL", "CONST(MAX_PERFQUERY_QUERY_NAME_LENGTH), extra_INTEL_performance_query" ],
[ "PERFQUERY_COUNTER_NAME_LENGTH_MAX_INTEL", "CONST(MAX_PERFQUERY_COUNTER_NAME_LENGTH), extra_INTEL_performance_query" ],
[ "PERFQUERY_COUNTER_DESC_LENGTH_MAX_INTEL", "CONST(MAX_PERFQUERY_COUNTER_DESC_LENGTH), extra_INTEL_performance_query" ],
[ "PERFQUERY_GPA_EXTENDED_COUNTERS_INTEL", "CONST(PERFQUERY_HAVE_GPA_EXTENDED_COUNTERS), extra_INTEL_performance_query" ],
# GL_KHR_context_flush_control
[ "CONTEXT_RELEASE_BEHAVIOR", "CONTEXT_ENUM(Const.ContextReleaseBehavior), NO_EXTRA" ],
# blend_func_extended
[ "MAX_DUAL_SOURCE_DRAW_BUFFERS", "CONTEXT_INT(Const.MaxDualSourceDrawBuffers), extra_ARB_blend_func_extended" ],
# GL_KHR_blend_equation_advanced_coherent
[ "BLEND_ADVANCED_COHERENT_KHR", "CONTEXT_BOOL(Color.BlendCoherent), extra_KHR_blend_equation_advanced_coherent" ],
# GL_ARB_robustness / GL_KHR_robustness
[ "CONTEXT_ROBUST_ACCESS", "CONTEXT_ENUM(Const.RobustAccess), extra_KHR_robustness" ],
[ "RESET_NOTIFICATION_STRATEGY_ARB", "CONTEXT_ENUM(Const.ResetStrategy), extra_KHR_robustness_or_GL" ],
]},
# GLES3 is not a typo.
{ "apis": ["GL", "GLES", "GLES3", "GL_CORE"], "params": [
# GL_EXT_texture_lod_bias
[ "MAX_TEXTURE_LOD_BIAS_EXT", "CONTEXT_FLOAT(Const.MaxTextureLodBias), NO_EXTRA" ],
]},
# Enums in OpenGL and ES 3.0
{ "apis": ["GL", "GL_CORE", "GLES3"], "params": [
# GL 3.0 / GLES3
[ "NUM_EXTENSIONS", "LOC_CUSTOM, TYPE_INT, 0, extra_gl30_es3" ],
[ "MAJOR_VERSION", "LOC_CUSTOM, TYPE_INT, 0, extra_gl30_es3" ],
[ "MINOR_VERSION", "LOC_CUSTOM, TYPE_INT, 0, extra_gl30_es3" ],
# GL 3.2 / GLES3
[ "MAX_VERTEX_OUTPUT_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents), extra_gl32_es3" ],
[ "MAX_FRAGMENT_INPUT_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents), extra_gl32_es3" ],
# GL_ARB_ES3_compatibility
[ "MAX_ELEMENT_INDEX", "CONTEXT_INT64(Const.MaxElementIndex), extra_ARB_ES3_compatibility_api_es3"],
[ "PRIMITIVE_RESTART_FIXED_INDEX", "CONTEXT_BOOL(Array.PrimitiveRestartFixedIndex), extra_ARB_ES3_compatibility_api_es3" ],
# GL_ARB_fragment_shader
[ "MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB", "CONTEXT_INT(Const.Program[MESA_SHADER_FRAGMENT].MaxUniformComponents), extra_ARB_fragment_shader" ],
# GL_ARB_framebuffer_object
[ "MAX_SAMPLES", "CONTEXT_INT(Const.MaxSamples), extra_ARB_framebuffer_object_EXT_framebuffer_multisample" ],
# GL_ARB_sampler_objects / GL 3.3 / GLES 3.0
[ "SAMPLER_BINDING", "LOC_CUSTOM, TYPE_INT, GL_SAMPLER_BINDING, NO_EXTRA" ],
# GL_ARB_sync
[ "MAX_SERVER_WAIT_TIMEOUT", "CONTEXT_INT64(Const.MaxServerWaitTimeout), extra_ARB_sync" ],
# GL_ARB_transform_feedback2
[ "TRANSFORM_FEEDBACK_BUFFER_PAUSED", "LOC_CUSTOM, TYPE_BOOLEAN, 0, extra_ARB_transform_feedback2_api_es3" ],
[ "TRANSFORM_FEEDBACK_BUFFER_ACTIVE", "LOC_CUSTOM, TYPE_BOOLEAN, 0, extra_ARB_transform_feedback2_api_es3" ],
[ "TRANSFORM_FEEDBACK_BINDING", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_transform_feedback2_api_es3" ],
# GL_ARB_uniform_buffer_object
[ "MAX_VERTEX_UNIFORM_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_VERTEX].MaxUniformBlocks), extra_ARB_uniform_buffer_object" ],
[ "MAX_FRAGMENT_UNIFORM_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_FRAGMENT].MaxUniformBlocks), extra_ARB_uniform_buffer_object" ],
[ "MAX_COMBINED_UNIFORM_BLOCKS", "CONTEXT_INT(Const.MaxCombinedUniformBlocks), extra_ARB_uniform_buffer_object" ],
[ "MAX_UNIFORM_BLOCK_SIZE", "CONTEXT_INT(Const.MaxUniformBlockSize), extra_ARB_uniform_buffer_object" ],
[ "MAX_UNIFORM_BUFFER_BINDINGS", "CONTEXT_INT(Const.MaxUniformBufferBindings), extra_ARB_uniform_buffer_object" ],
[ "MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_VERTEX].MaxCombinedUniformComponents), extra_ARB_uniform_buffer_object" ],
[ "MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_FRAGMENT].MaxCombinedUniformComponents), extra_ARB_uniform_buffer_object" ],
[ "UNIFORM_BUFFER_OFFSET_ALIGNMENT", "CONTEXT_INT(Const.UniformBufferOffsetAlignment), extra_ARB_uniform_buffer_object" ],
[ "UNIFORM_BUFFER_BINDING", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_uniform_buffer_object" ],
# GL_ARB_vertex_shader
[ "MAX_VERTEX_UNIFORM_COMPONENTS_ARB", "CONTEXT_INT(Const.Program[MESA_SHADER_VERTEX].MaxUniformComponents), extra_ARB_vertex_shader" ],
[ "MAX_VARYING_FLOATS_ARB", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_vertex_shader" ],
# GL_EXT_framebuffer_blit
# NOTE: GL_DRAW_FRAMEBUFFER_BINDING_EXT == GL_FRAMEBUFFER_BINDING_EXT
[ "READ_FRAMEBUFFER_BINDING_EXT", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
# GL_EXT_gpu_shader4 / GLSL 1.30
[ "MIN_PROGRAM_TEXEL_OFFSET", "CONTEXT_INT(Const.MinProgramTexelOffset), extra_GLSL_130_es3" ],
[ "MAX_PROGRAM_TEXEL_OFFSET", "CONTEXT_INT(Const.MaxProgramTexelOffset), extra_GLSL_130_es3" ],
# GL_EXT_pixel_buffer_object
[ "PIXEL_PACK_BUFFER_BINDING_EXT", "LOC_CUSTOM, TYPE_INT, 0, extra_EXT_pixel_buffer_object" ],
[ "PIXEL_UNPACK_BUFFER_BINDING_EXT", "LOC_CUSTOM, TYPE_INT, 0, extra_EXT_pixel_buffer_object" ],
# GL_EXT_texture_array
[ "TEXTURE_BINDING_2D_ARRAY", "LOC_CUSTOM, TYPE_INT, TEXTURE_2D_ARRAY_INDEX, extra_EXT_texture_array_es3" ],
[ "MAX_ARRAY_TEXTURE_LAYERS_EXT", "CONTEXT_INT(Const.MaxArrayTextureLayers), extra_EXT_texture_array_es3" ],
# GL_EXT_transform_feedback
[ "TRANSFORM_FEEDBACK_BUFFER_BINDING", "LOC_CUSTOM, TYPE_INT, 0, extra_EXT_transform_feedback" ],
[ "RASTERIZER_DISCARD", "CONTEXT_BOOL(RasterDiscard), extra_EXT_transform_feedback" ],
[ "MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS", "CONTEXT_INT(Const.MaxTransformFeedbackInterleavedComponents), extra_EXT_transform_feedback" ],
[ "MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS", "CONTEXT_INT(Const.MaxTransformFeedbackBuffers), extra_EXT_transform_feedback" ],
[ "MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS", "CONTEXT_INT(Const.MaxTransformFeedbackSeparateComponents), extra_EXT_transform_feedback" ],
# GL_EXT_window_rectangles
[ "MAX_WINDOW_RECTANGLES_EXT", "CONTEXT_INT(Const.MaxWindowRectangles), extra_EXT_window_rectangles" ],
[ "NUM_WINDOW_RECTANGLES_EXT", "CONTEXT_INT(Scissor.NumWindowRects), extra_EXT_window_rectangles" ],
[ "WINDOW_RECTANGLE_MODE_EXT", "CONTEXT_ENUM(Scissor.WindowRectMode), extra_EXT_window_rectangles" ],
]},
{ "apis": ["GLES", "GLES2"], "params": [
# GL_EXT_shader_framebuffer_fetch. Should be true if the MESA framebuffer
# fetch extension is supported since the latter imposes no restrictions on
# non-uniform per-sample discard.
[ "FRAGMENT_SHADER_DISCARDS_SAMPLES_EXT", "CONTEXT_BOOL(Extensions.MESA_shader_framebuffer_fetch), extra_EXT_shader_framebuffer_fetch" ],
# GL_OES_EGL_image_external
[ "TEXTURE_BINDING_EXTERNAL_OES", "LOC_CUSTOM, TYPE_INT, TEXTURE_EXTERNAL_INDEX, extra_OES_EGL_image_external" ],
[ "TEXTURE_EXTERNAL_OES", "LOC_CUSTOM, TYPE_BOOLEAN, 0, extra_OES_EGL_image_external" ],
]},
# Enums in OpenGL and ES 3.1
{ "apis": ["GL", "GL_CORE", "GLES31"], "params": [
# GL_ARB_texture_buffer_object / GL_OES_texture_buffer
[ "MAX_TEXTURE_BUFFER_SIZE_ARB", "CONTEXT_INT(Const.MaxTextureBufferSize), extra_texture_buffer_object" ],
[ "TEXTURE_BINDING_BUFFER_ARB", "LOC_CUSTOM, TYPE_INT, 0, extra_texture_buffer_object" ],
[ "TEXTURE_BUFFER_DATA_STORE_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, TEXTURE_BUFFER_INDEX, extra_texture_buffer_object" ],
[ "TEXTURE_BUFFER_FORMAT_ARB", "LOC_CUSTOM, TYPE_INT, 0, extra_texture_buffer_object" ],
[ "TEXTURE_BUFFER_ARB", "LOC_CUSTOM, TYPE_INT, 0, extra_texture_buffer_object" ],
# GL_ARB_texture_buffer_range
[ "TEXTURE_BUFFER_OFFSET_ALIGNMENT", "CONTEXT_INT(Const.TextureBufferOffsetAlignment), extra_ARB_texture_buffer_range" ],
# GL_ARB_shader_image_load_store / GLES 3.1
[ "MAX_IMAGE_UNITS", "CONTEXT_INT(Const.MaxImageUnits), extra_ARB_shader_image_load_store" ],
[ "MAX_VERTEX_IMAGE_UNIFORMS", "CONTEXT_INT(Const.Program[MESA_SHADER_VERTEX].MaxImageUniforms), extra_ARB_shader_image_load_store" ],
[ "MAX_FRAGMENT_IMAGE_UNIFORMS", "CONTEXT_INT(Const.Program[MESA_SHADER_FRAGMENT].MaxImageUniforms), extra_ARB_shader_image_load_store" ],
[ "MAX_COMBINED_IMAGE_UNIFORMS", "CONTEXT_INT(Const.MaxCombinedImageUniforms), extra_ARB_shader_image_load_store" ],
# GL_ARB_shader_atomic_counters / GLES 3.1
[ "ATOMIC_COUNTER_BUFFER_BINDING", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_shader_atomic_counters" ],
[ "MAX_ATOMIC_COUNTER_BUFFER_BINDINGS", "CONTEXT_INT(Const.MaxAtomicBufferBindings), extra_ARB_shader_atomic_counters" ],
[ "MAX_ATOMIC_COUNTER_BUFFER_SIZE", "CONTEXT_INT(Const.MaxAtomicBufferSize), extra_ARB_shader_atomic_counters" ],
[ "MAX_VERTEX_ATOMIC_COUNTER_BUFFERS", "CONTEXT_INT(Const.Program[MESA_SHADER_VERTEX].MaxAtomicBuffers), extra_ARB_shader_atomic_counters" ],
[ "MAX_VERTEX_ATOMIC_COUNTERS", "CONTEXT_INT(Const.Program[MESA_SHADER_VERTEX].MaxAtomicCounters), extra_ARB_shader_atomic_counters" ],
[ "MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS", "CONTEXT_INT(Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicBuffers), extra_ARB_shader_atomic_counters" ],
[ "MAX_FRAGMENT_ATOMIC_COUNTERS", "CONTEXT_INT(Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicCounters), extra_ARB_shader_atomic_counters" ],
[ "MAX_COMBINED_ATOMIC_COUNTER_BUFFERS", "CONTEXT_INT(Const.MaxCombinedAtomicBuffers), extra_ARB_shader_atomic_counters" ],
[ "MAX_COMBINED_ATOMIC_COUNTERS", "CONTEXT_INT(Const.MaxCombinedAtomicCounters), extra_ARB_shader_atomic_counters" ],
# GL_ARB_texture_multisample / GLES 3.1
[ "TEXTURE_BINDING_2D_MULTISAMPLE", "LOC_CUSTOM, TYPE_INT, TEXTURE_2D_MULTISAMPLE_INDEX, extra_ARB_texture_multisample" ],
[ "MAX_COLOR_TEXTURE_SAMPLES", "CONTEXT_INT(Const.MaxColorTextureSamples), extra_ARB_texture_multisample" ],
[ "MAX_DEPTH_TEXTURE_SAMPLES", "CONTEXT_INT(Const.MaxDepthTextureSamples), extra_ARB_texture_multisample" ],
[ "MAX_INTEGER_SAMPLES", "CONTEXT_INT(Const.MaxIntegerSamples), extra_ARB_texture_multisample" ],
[ "SAMPLE_MASK", "CONTEXT_BOOL(Multisample.SampleMask), extra_ARB_texture_multisample" ],
[ "MAX_SAMPLE_MASK_WORDS", "CONST(1), extra_ARB_texture_multisample" ],
# GL_ARB_texture_multisample / ES 3.1 with GL_OES_texture_storage_multisample_2d_array
[ "TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY", "LOC_CUSTOM, TYPE_INT, TEXTURE_2D_MULTISAMPLE_ARRAY_INDEX, extra_ARB_texture_multisample" ],
# GL_ARB_texture_gather / GLES 3.1
[ "MIN_PROGRAM_TEXTURE_GATHER_OFFSET", "CONTEXT_INT(Const.MinProgramTextureGatherOffset), extra_ARB_texture_gather"],
[ "MAX_PROGRAM_TEXTURE_GATHER_OFFSET", "CONTEXT_INT(Const.MaxProgramTextureGatherOffset), extra_ARB_texture_gather"],
# GL_ARB_compute_shader / GLES 3.1
[ "MAX_COMPUTE_WORK_GROUP_INVOCATIONS", "CONTEXT_INT(Const.MaxComputeWorkGroupInvocations), extra_ARB_compute_shader_es31" ],
[ "MAX_COMPUTE_UNIFORM_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_COMPUTE].MaxUniformBlocks), extra_ARB_compute_shader_es31" ],
[ "MAX_COMPUTE_TEXTURE_IMAGE_UNITS", "CONTEXT_INT(Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits), extra_ARB_compute_shader_es31" ],
[ "MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS", "CONTEXT_INT(Const.Program[MESA_SHADER_COMPUTE].MaxAtomicBuffers), extra_ARB_compute_shader_es31" ],
[ "MAX_COMPUTE_ATOMIC_COUNTERS", "CONTEXT_INT(Const.Program[MESA_SHADER_COMPUTE].MaxAtomicCounters), extra_ARB_compute_shader_es31" ],
[ "MAX_COMPUTE_SHARED_MEMORY_SIZE", "CONTEXT_INT(Const.MaxComputeSharedMemorySize), extra_ARB_compute_shader_es31" ],
[ "MAX_COMPUTE_UNIFORM_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_COMPUTE].MaxUniformComponents), extra_ARB_compute_shader_es31" ],
[ "MAX_COMPUTE_IMAGE_UNIFORMS", "CONTEXT_INT(Const.Program[MESA_SHADER_COMPUTE].MaxImageUniforms), extra_ARB_compute_shader_es31" ],
[ "DISPATCH_INDIRECT_BUFFER_BINDING", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_compute_shader_es31" ],
[ "MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_COMPUTE].MaxCombinedUniformComponents), extra_ARB_compute_shader_es31" ],
# GL_ARB_framebuffer_no_attachments / GLES 3.1
["MAX_FRAMEBUFFER_WIDTH", "CONTEXT_INT(Const.MaxFramebufferWidth), extra_ARB_framebuffer_no_attachments"],
["MAX_FRAMEBUFFER_HEIGHT", "CONTEXT_INT(Const.MaxFramebufferHeight), extra_ARB_framebuffer_no_attachments"],
["MAX_FRAMEBUFFER_SAMPLES", "CONTEXT_INT(Const.MaxFramebufferSamples), extra_ARB_framebuffer_no_attachments"],
# GL_ARB_framebuffer_no_attachments / geometry shader
[ "MAX_FRAMEBUFFER_LAYERS", "CONTEXT_INT(Const.MaxFramebufferLayers), extra_ARB_framebuffer_no_attachments_and_geometry_shader" ],
# GL_ARB_explicit_uniform_location / GLES 3.1
[ "MAX_UNIFORM_LOCATIONS", "CONTEXT_INT(Const.MaxUserAssignableUniformLocations), extra_ARB_explicit_uniform_location" ],
# GL_ARB_separate_shader_objects / GLES 3.1
[ "PROGRAM_PIPELINE_BINDING", "LOC_CUSTOM, TYPE_INT, GL_PROGRAM_PIPELINE_BINDING, NO_EXTRA" ],
# GL_ARB_vertex_attrib_binding / GLES 3.1
[ "MAX_VERTEX_ATTRIB_RELATIVE_OFFSET", "CONTEXT_ENUM(Const.MaxVertexAttribRelativeOffset), NO_EXTRA" ],
[ "MAX_VERTEX_ATTRIB_BINDINGS", "CONTEXT_ENUM(Const.MaxVertexAttribBindings), NO_EXTRA" ],
# GL 4.4 / GLES 3.1
[ "MAX_VERTEX_ATTRIB_STRIDE", "CONTEXT_ENUM(Const.MaxVertexAttribStride), NO_EXTRA" ],
# GL_ARB_shader_storage_buffer_object / GLES 3.1
[ "MAX_VERTEX_SHADER_STORAGE_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_VERTEX].MaxShaderStorageBlocks), extra_ARB_shader_storage_buffer_object_es31" ],
[ "MAX_FRAGMENT_SHADER_STORAGE_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_FRAGMENT].MaxShaderStorageBlocks), extra_ARB_shader_storage_buffer_object_es31" ],
[ "MAX_COMPUTE_SHADER_STORAGE_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_COMPUTE].MaxShaderStorageBlocks), extra_ARB_shader_storage_buffer_object_es31" ],
[ "MAX_COMBINED_SHADER_STORAGE_BLOCKS", "CONTEXT_INT(Const.MaxCombinedShaderStorageBlocks), extra_ARB_shader_storage_buffer_object_es31" ],
[ "MAX_SHADER_STORAGE_BLOCK_SIZE", "CONTEXT_INT(Const.MaxShaderStorageBlockSize), extra_ARB_shader_storage_buffer_object_es31" ],
[ "MAX_SHADER_STORAGE_BUFFER_BINDINGS", "CONTEXT_INT(Const.MaxShaderStorageBufferBindings), extra_ARB_shader_storage_buffer_object_es31" ],
[ "SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT", "CONTEXT_INT(Const.ShaderStorageBufferOffsetAlignment), extra_ARB_shader_storage_buffer_object_es31" ],
[ "SHADER_STORAGE_BUFFER_BINDING", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_shader_storage_buffer_object_es31" ],
# GL_ARB_shader_image_load_store / GL_ARB_shader_storage_buffer_object / GLES 3.1
# (MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS in GL_ARB_shader_image_load_store)
[ "MAX_COMBINED_SHADER_OUTPUT_RESOURCES", "CONTEXT_INT(Const.MaxCombinedShaderOutputResources), extra_ARB_shader_image_load_store_shader_storage_buffer_object_es31" ],
# GL_ARB_texture_cube_map_array
[ "TEXTURE_BINDING_CUBE_MAP_ARRAY_ARB", "LOC_CUSTOM, TYPE_INT, TEXTURE_CUBE_ARRAY_INDEX, extra_ARB_texture_cube_map_array_OES_texture_cube_map_array" ],
# GL_NUM_SHADING_LANGUAGE_VERSIONS
[ "NUM_SHADING_LANGUAGE_VERSIONS", "LOC_CUSTOM, TYPE_INT, 0, extra_version_43" ],
]},
# Enums in OpenGL Core profile and ES 3.0
{ "apis": ["GL_CORE", "GLES3"], "params": [
# GL_ARB_gpu_shader5 / GL_OES_shader_multisample_interpolation
[ "MIN_FRAGMENT_INTERPOLATION_OFFSET", "CONTEXT_FLOAT(Const.MinFragmentInterpolationOffset), extra_ARB_gpu_shader5_or_OES_sample_variables" ],
[ "MAX_FRAGMENT_INTERPOLATION_OFFSET", "CONTEXT_FLOAT(Const.MaxFragmentInterpolationOffset), extra_ARB_gpu_shader5_or_OES_sample_variables" ],
[ "FRAGMENT_INTERPOLATION_OFFSET_BITS", "CONST(FRAGMENT_INTERPOLATION_OFFSET_BITS), extra_ARB_gpu_shader5_or_OES_sample_variables" ],
]},
# Enums in OpenGL Core profile and ES 3.1
{ "apis": ["GL_CORE", "GLES31"], "params": [
# GL_ARB_draw_indirect / GLES 3.1
[ "DRAW_INDIRECT_BUFFER_BINDING", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_draw_indirect" ],
# GL 3.2 / GL OES_geometry_shader
[ "MAX_GEOMETRY_INPUT_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents), extra_version_32_OES_geometry_shader" ],
[ "MAX_GEOMETRY_OUTPUT_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents), extra_version_32_OES_geometry_shader" ],
[ "MAX_GEOMETRY_TEXTURE_IMAGE_UNITS", "CONTEXT_INT(Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits), extra_version_32_OES_geometry_shader" ],
[ "MAX_GEOMETRY_OUTPUT_VERTICES", "CONTEXT_INT(Const.MaxGeometryOutputVertices), extra_version_32_OES_geometry_shader" ],
[ "MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS", "CONTEXT_INT(Const.MaxGeometryTotalOutputComponents), extra_version_32_OES_geometry_shader" ],
[ "MAX_GEOMETRY_UNIFORM_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_GEOMETRY].MaxUniformComponents), extra_version_32_OES_geometry_shader" ],
# GL_ARB_tessellation_shader / also OES and EXT
[ "PATCH_VERTICES", "CONTEXT_INT(TessCtrlProgram.patch_vertices), extra_ARB_tessellation_shader" ],
[ "PATCH_DEFAULT_OUTER_LEVEL", "CONTEXT_FLOAT4(TessCtrlProgram.patch_default_outer_level), extra_ARB_tessellation_shader" ],
[ "PATCH_DEFAULT_INNER_LEVEL", "CONTEXT_FLOAT2(TessCtrlProgram.patch_default_inner_level), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_GEN_LEVEL", "CONTEXT_INT(Const.MaxTessGenLevel), extra_ARB_tessellation_shader" ],
[ "MAX_PATCH_VERTICES", "CONTEXT_INT(Const.MaxPatchVertices), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_CONTROL_UNIFORM_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_CTRL].MaxUniformComponents), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_EVALUATION_UNIFORM_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_EVAL].MaxUniformComponents), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_CTRL].MaxTextureImageUnits), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_EVAL].MaxTextureImageUnits), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_CONTROL_OUTPUT_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_PATCH_COMPONENTS", "CONTEXT_INT(Const.MaxTessPatchComponents), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS", "CONTEXT_INT(Const.MaxTessControlTotalOutputComponents), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_EVALUATION_OUTPUT_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_CONTROL_INPUT_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_EVALUATION_INPUT_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_CONTROL_UNIFORM_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_CTRL].MaxUniformBlocks), extra_ARB_tessellation_shader" ],
[ "MAX_TESS_EVALUATION_UNIFORM_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_EVAL].MaxUniformBlocks), extra_ARB_tessellation_shader" ],
[ "MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_CTRL].MaxCombinedUniformComponents), extra_ARB_tessellation_shader" ],
[ "MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_EVAL].MaxCombinedUniformComponents), extra_ARB_tessellation_shader" ],
[ "PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED", "CONTEXT_BOOL(Const.PrimitiveRestartForPatches), extra_ARB_tessellation_shader" ],
# GL_ARB_shader_image_load_store / geometry shader
[ "MAX_GEOMETRY_IMAGE_UNIFORMS", "CONTEXT_INT(Const.Program[MESA_SHADER_GEOMETRY].MaxImageUniforms), extra_ARB_shader_image_load_store_and_geometry_shader" ],
# GL_ARB_shader_image_load_store / tessellation shader
[ "MAX_TESS_CONTROL_IMAGE_UNIFORMS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_CTRL].MaxImageUniforms), extra_ARB_shader_image_load_store_and_tessellation"],
[ "MAX_TESS_EVALUATION_IMAGE_UNIFORMS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_EVAL].MaxImageUniforms), extra_ARB_shader_image_load_store_and_tessellation"],
# GL_ARB_shader_atomic_counters / geometry shader
[ "MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS", "CONTEXT_INT(Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicBuffers), extra_ARB_shader_atomic_counters_and_geometry_shader " ],
[ "MAX_GEOMETRY_ATOMIC_COUNTERS", "CONTEXT_INT(Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicCounters), extra_ARB_shader_atomic_counters_and_geometry_shader" ],
# GL_ARB_shader_atomic_counters / tessellation shader
[ "MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_CTRL].MaxAtomicBuffers), extra_ARB_shader_atomic_counters_and_tessellation" ],
[ "MAX_TESS_CONTROL_ATOMIC_COUNTERS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_CTRL].MaxAtomicCounters), extra_ARB_shader_atomic_counters_and_tessellation" ],
[ "MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_EVAL].MaxAtomicBuffers), extra_ARB_shader_atomic_counters_and_tessellation" ],
[ "MAX_TESS_EVALUATION_ATOMIC_COUNTERS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_EVAL].MaxAtomicCounters), extra_ARB_shader_atomic_counters_and_tessellation" ],
# GL_ARB_shader_storage_buffer_object / geometry shader
[ "MAX_GEOMETRY_SHADER_STORAGE_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_GEOMETRY].MaxShaderStorageBlocks), extra_ARB_shader_storage_buffer_object_and_geometry_shader" ],
# GL_ARB_shader_storage_buffer_object / tessellation shader
[ "MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_CTRL].MaxShaderStorageBlocks), extra_ARB_shader_storage_buffer_object" ],
[ "MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_TESS_EVAL].MaxShaderStorageBlocks), extra_ARB_shader_storage_buffer_object" ],
# GL_ARB_uniform_buffer_object / geometry shader
[ "MAX_GEOMETRY_UNIFORM_BLOCKS", "CONTEXT_INT(Const.Program[MESA_SHADER_GEOMETRY].MaxUniformBlocks), extra_ARB_uniform_buffer_object_and_geometry_shader" ],
[ "MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS", "CONTEXT_INT(Const.Program[MESA_SHADER_GEOMETRY].MaxCombinedUniformComponents), extra_ARB_uniform_buffer_object_and_geometry_shader" ],
# GL_ARB_viewport_array / GL_OES_geometry_shader
[ "LAYER_PROVOKING_VERTEX", "CONTEXT_ENUM(Const.LayerAndVPIndexProvokingVertex), extra_ARB_viewport_array_or_oes_geometry_shader" ],
# GL_ARB_gpu_shader5 / GL_OES_geometry_shader
[ "MAX_GEOMETRY_SHADER_INVOCATIONS", "CONST(MAX_GEOMETRY_SHADER_INVOCATIONS), extra_ARB_gpu_shader5_or_oes_geometry_shader" ],
# GL_OES_primitive_bounding_box
[ "PRIMITIVE_BOUNDING_BOX_ARB", "CONTEXT_FLOAT8(PrimitiveBoundingBox), extra_OES_primitive_bounding_box" ],
# GL_ARB_viewport_array / GL_OES_viewport_array
[ "MAX_VIEWPORTS", "CONTEXT_INT(Const.MaxViewports), extra_ARB_viewport_array_or_oes_viewport_array" ],
[ "VIEWPORT_SUBPIXEL_BITS", "CONTEXT_INT(Const.ViewportSubpixelBits), extra_ARB_viewport_array_or_oes_viewport_array" ],
[ "VIEWPORT_BOUNDS_RANGE", "CONTEXT_FLOAT2(Const.ViewportBounds), extra_ARB_viewport_array_or_oes_viewport_array" ],
[ "VIEWPORT_INDEX_PROVOKING_VERTEX", "CONTEXT_ENUM(Const.LayerAndVPIndexProvokingVertex), extra_ARB_viewport_array_or_oes_viewport_array" ],
# INTEL_conservative_rasterization
[ "CONSERVATIVE_RASTERIZATION_INTEL", "CONTEXT_BOOL(IntelConservativeRasterization), extra_INTEL_conservative_rasterization" ],
]},
{ "apis": ["GL_CORE", "GLES32"], "params": [
[ "MULTISAMPLE_LINE_WIDTH_RANGE_ARB", "CONTEXT_FLOAT2(Const.MinLineWidthAA), extra_ES32" ],
[ "MULTISAMPLE_LINE_WIDTH_GRANULARITY_ARB", "CONTEXT_FLOAT(Const.LineWidthGranularity), extra_ES32" ],
]},
{ "apis": ["GL", "GL_CORE", "GLES32"], "params": [
# GL 3.0 or ES 3.2
[ "CONTEXT_FLAGS", "CONTEXT_INT(Const.ContextFlags), extra_version_30" ],
]},
# Remaining enums are only in OpenGL
{ "apis": ["GL", "GL_CORE"], "params": [
[ "ACCUM_RED_BITS", "BUFFER_INT(Visual.accumRedBits), NO_EXTRA" ],
[ "ACCUM_GREEN_BITS", "BUFFER_INT(Visual.accumGreenBits), NO_EXTRA" ],
[ "ACCUM_BLUE_BITS", "BUFFER_INT(Visual.accumBlueBits), NO_EXTRA" ],
[ "ACCUM_ALPHA_BITS", "BUFFER_INT(Visual.accumAlphaBits), NO_EXTRA" ],
[ "ACCUM_CLEAR_VALUE", "CONTEXT_FIELD(Accum.ClearColor[0], TYPE_FLOATN_4), NO_EXTRA" ],
[ "ALPHA_BIAS", "CONTEXT_FLOAT(Pixel.AlphaBias), NO_EXTRA" ],
[ "ALPHA_SCALE", "CONTEXT_FLOAT(Pixel.AlphaScale), NO_EXTRA" ],
[ "ATTRIB_STACK_DEPTH", "CONTEXT_INT(AttribStackDepth), NO_EXTRA" ],
[ "AUTO_NORMAL", "CONTEXT_BOOL(Eval.AutoNormal), NO_EXTRA" ],
[ "AUX_BUFFERS", "BUFFER_INT(Visual.numAuxBuffers), NO_EXTRA" ],
[ "BLUE_BIAS", "CONTEXT_FLOAT(Pixel.BlueBias), NO_EXTRA" ],
[ "BLUE_SCALE", "CONTEXT_FLOAT(Pixel.BlueScale), NO_EXTRA" ],
[ "CLIP_DEPTH_MODE", "CONTEXT_ENUM(Transform.ClipDepthMode), extra_ARB_clip_control" ],
[ "CLIP_ORIGIN", "CONTEXT_ENUM(Transform.ClipOrigin), extra_ARB_clip_control" ],
[ "CLIENT_ATTRIB_STACK_DEPTH", "CONTEXT_INT(ClientAttribStackDepth), NO_EXTRA" ],
[ "COLOR_MATERIAL_FACE", "CONTEXT_ENUM(Light.ColorMaterialFace), NO_EXTRA" ],
[ "COLOR_MATERIAL_PARAMETER", "CONTEXT_ENUM(Light.ColorMaterialMode), NO_EXTRA" ],
[ "CURRENT_INDEX", "CONTEXT_FLOAT(Current.Attrib[VERT_ATTRIB_COLOR_INDEX][0]), extra_flush_current" ],
[ "CURRENT_RASTER_COLOR", "CONTEXT_FIELD(Current.RasterColor[0], TYPE_FLOATN_4), NO_EXTRA" ],
[ "CURRENT_RASTER_DISTANCE", "CONTEXT_FLOAT(Current.RasterDistance), NO_EXTRA" ],
[ "CURRENT_RASTER_INDEX", "CONST(1), NO_EXTRA" ],
[ "CURRENT_RASTER_POSITION", "CONTEXT_FLOAT4(Current.RasterPos[0]), NO_EXTRA" ],
[ "CURRENT_RASTER_SECONDARY_COLOR", "CONTEXT_FIELD(Current.RasterSecondaryColor[0], TYPE_FLOATN_4), NO_EXTRA" ],
[ "CURRENT_RASTER_TEXTURE_COORDS", "LOC_CUSTOM, TYPE_FLOAT_4, 0, extra_valid_texture_unit" ],
[ "CURRENT_RASTER_POSITION_VALID", "CONTEXT_BOOL(Current.RasterPosValid), NO_EXTRA" ],
[ "DEPTH_BIAS", "CONTEXT_FLOAT(Pixel.DepthBias), NO_EXTRA" ],
[ "DEPTH_SCALE", "CONTEXT_FLOAT(Pixel.DepthScale), NO_EXTRA" ],
[ "DOUBLEBUFFER", "BUFFER_INT(Visual.doubleBufferMode), NO_EXTRA" ],
[ "DRAW_BUFFER", "BUFFER_ENUM(ColorDrawBuffer[0]), NO_EXTRA" ],
[ "EDGE_FLAG", "LOC_CUSTOM, TYPE_BOOLEAN, 0, extra_flush_current" ],
[ "FEEDBACK_BUFFER_SIZE", "CONTEXT_INT(Feedback.BufferSize), NO_EXTRA" ],
[ "FEEDBACK_BUFFER_TYPE", "CONTEXT_ENUM(Feedback.Type), NO_EXTRA" ],
[ "FOG_INDEX", "CONTEXT_FLOAT(Fog.Index), NO_EXTRA" ],
[ "GREEN_BIAS", "CONTEXT_FLOAT(Pixel.GreenBias), NO_EXTRA" ],
[ "GREEN_SCALE", "CONTEXT_FLOAT(Pixel.GreenScale), NO_EXTRA" ],
[ "INDEX_BITS", "BUFFER_INT(Visual.indexBits), extra_new_buffers" ],
[ "INDEX_CLEAR_VALUE", "CONTEXT_INT(Color.ClearIndex), NO_EXTRA" ],
[ "INDEX_MODE", "CONST(0) , NO_EXTRA" ],
[ "INDEX_OFFSET", "CONTEXT_INT(Pixel.IndexOffset), NO_EXTRA" ],
[ "INDEX_SHIFT", "CONTEXT_INT(Pixel.IndexShift), NO_EXTRA" ],
[ "INDEX_WRITEMASK", "CONTEXT_INT(Color.IndexMask), NO_EXTRA" ],
[ "LIGHT_MODEL_COLOR_CONTROL", "CONTEXT_ENUM(Light.Model.ColorControl), NO_EXTRA" ],
[ "LIGHT_MODEL_LOCAL_VIEWER", "CONTEXT_BOOL(Light.Model.LocalViewer), NO_EXTRA" ],
[ "LINE_STIPPLE", "CONTEXT_BOOL(Line.StippleFlag), NO_EXTRA" ],
[ "LINE_STIPPLE_PATTERN", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
[ "LINE_STIPPLE_REPEAT", "CONTEXT_INT(Line.StippleFactor), NO_EXTRA" ],
[ "LINE_WIDTH_GRANULARITY", "CONTEXT_FLOAT(Const.LineWidthGranularity), NO_EXTRA" ],
[ "LIST_BASE", "CONTEXT_INT(List.ListBase), NO_EXTRA" ],
[ "LIST_INDEX", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
[ "LIST_MODE", "LOC_CUSTOM, TYPE_ENUM, 0, NO_EXTRA" ],
[ "INDEX_LOGIC_OP", "CONTEXT_BOOL(Color.IndexLogicOpEnabled), NO_EXTRA" ],
[ "MAP1_COLOR_4", "CONTEXT_BOOL(Eval.Map1Color4), NO_EXTRA" ],
[ "MAP1_GRID_DOMAIN", "CONTEXT_FLOAT2(Eval.MapGrid1u1), NO_EXTRA" ],
[ "MAP1_GRID_SEGMENTS", "CONTEXT_INT(Eval.MapGrid1un), NO_EXTRA" ],
[ "MAP1_INDEX", "CONTEXT_BOOL(Eval.Map1Index), NO_EXTRA" ],
[ "MAP1_NORMAL", "CONTEXT_BOOL(Eval.Map1Normal), NO_EXTRA" ],
[ "MAP1_TEXTURE_COORD_1", "CONTEXT_BOOL(Eval.Map1TextureCoord1), NO_EXTRA" ],
[ "MAP1_TEXTURE_COORD_2", "CONTEXT_BOOL(Eval.Map1TextureCoord2), NO_EXTRA" ],
[ "MAP1_TEXTURE_COORD_3", "CONTEXT_BOOL(Eval.Map1TextureCoord3), NO_EXTRA" ],
[ "MAP1_TEXTURE_COORD_4", "CONTEXT_BOOL(Eval.Map1TextureCoord4), NO_EXTRA" ],
[ "MAP1_VERTEX_3", "CONTEXT_BOOL(Eval.Map1Vertex3), NO_EXTRA" ],
[ "MAP1_VERTEX_4", "CONTEXT_BOOL(Eval.Map1Vertex4), NO_EXTRA" ],
[ "MAP2_COLOR_4", "CONTEXT_BOOL(Eval.Map2Color4), NO_EXTRA" ],
[ "MAP2_GRID_DOMAIN", "LOC_CUSTOM, TYPE_FLOAT_4, 0, NO_EXTRA" ],
[ "MAP2_GRID_SEGMENTS", "CONTEXT_INT2(Eval.MapGrid2un), NO_EXTRA" ],
[ "MAP2_INDEX", "CONTEXT_BOOL(Eval.Map2Index), NO_EXTRA" ],
[ "MAP2_NORMAL", "CONTEXT_BOOL(Eval.Map2Normal), NO_EXTRA" ],
[ "MAP2_TEXTURE_COORD_1", "CONTEXT_BOOL(Eval.Map2TextureCoord1), NO_EXTRA" ],
[ "MAP2_TEXTURE_COORD_2", "CONTEXT_BOOL(Eval.Map2TextureCoord2), NO_EXTRA" ],
[ "MAP2_TEXTURE_COORD_3", "CONTEXT_BOOL(Eval.Map2TextureCoord3), NO_EXTRA" ],
[ "MAP2_TEXTURE_COORD_4", "CONTEXT_BOOL(Eval.Map2TextureCoord4), NO_EXTRA" ],
[ "MAP2_VERTEX_3", "CONTEXT_BOOL(Eval.Map2Vertex3), NO_EXTRA" ],
[ "MAP2_VERTEX_4", "CONTEXT_BOOL(Eval.Map2Vertex4), NO_EXTRA" ],
[ "MAP_COLOR", "CONTEXT_BOOL(Pixel.MapColorFlag), NO_EXTRA" ],
[ "MAP_STENCIL", "CONTEXT_BOOL(Pixel.MapStencilFlag), NO_EXTRA" ],
[ "MAX_ATTRIB_STACK_DEPTH", "CONST(MAX_ATTRIB_STACK_DEPTH), NO_EXTRA" ],
[ "MAX_CLIENT_ATTRIB_STACK_DEPTH", "CONST(MAX_CLIENT_ATTRIB_STACK_DEPTH), NO_EXTRA" ],
[ "MAX_EVAL_ORDER", "CONST(MAX_EVAL_ORDER), NO_EXTRA" ],
[ "MAX_LIST_NESTING", "CONST(MAX_LIST_NESTING), NO_EXTRA" ],
[ "MAX_NAME_STACK_DEPTH", "CONST(MAX_NAME_STACK_DEPTH), NO_EXTRA" ],
[ "MAX_PIXEL_MAP_TABLE", "CONST(MAX_PIXEL_MAP_TABLE), NO_EXTRA" ],
[ "NAME_STACK_DEPTH", "CONTEXT_INT(Select.NameStackDepth), NO_EXTRA" ],
[ "PACK_LSB_FIRST", "CONTEXT_BOOL(Pack.LsbFirst), NO_EXTRA" ],
[ "PACK_SWAP_BYTES", "CONTEXT_BOOL(Pack.SwapBytes), NO_EXTRA" ],
[ "PACK_INVERT_MESA", "CONTEXT_BOOL(Pack.Invert), NO_EXTRA" ],
[ "PIXEL_MAP_A_TO_A_SIZE", "CONTEXT_INT(PixelMaps.AtoA.Size), NO_EXTRA" ],
[ "PIXEL_MAP_B_TO_B_SIZE", "CONTEXT_INT(PixelMaps.BtoB.Size), NO_EXTRA" ],
[ "PIXEL_MAP_G_TO_G_SIZE", "CONTEXT_INT(PixelMaps.GtoG.Size), NO_EXTRA" ],
[ "PIXEL_MAP_I_TO_A_SIZE", "CONTEXT_INT(PixelMaps.ItoA.Size), NO_EXTRA" ],
[ "PIXEL_MAP_I_TO_B_SIZE", "CONTEXT_INT(PixelMaps.ItoB.Size), NO_EXTRA" ],
[ "PIXEL_MAP_I_TO_G_SIZE", "CONTEXT_INT(PixelMaps.ItoG.Size), NO_EXTRA" ],
[ "PIXEL_MAP_I_TO_I_SIZE", "CONTEXT_INT(PixelMaps.ItoI.Size), NO_EXTRA" ],
[ "PIXEL_MAP_I_TO_R_SIZE", "CONTEXT_INT(PixelMaps.ItoR.Size), NO_EXTRA" ],
[ "PIXEL_MAP_R_TO_R_SIZE", "CONTEXT_INT(PixelMaps.RtoR.Size), NO_EXTRA" ],
[ "PIXEL_MAP_S_TO_S_SIZE", "CONTEXT_INT(PixelMaps.StoS.Size), NO_EXTRA" ],
[ "POINT_SIZE_GRANULARITY", "CONTEXT_FLOAT(Const.PointSizeGranularity), NO_EXTRA" ],
[ "POLYGON_MODE", "CONTEXT_ENUM2(Polygon.FrontMode), NO_EXTRA" ],
[ "POLYGON_OFFSET_BIAS_EXT", "CONTEXT_FLOAT(Polygon.OffsetUnits), NO_EXTRA" ],
[ "POLYGON_OFFSET_POINT", "CONTEXT_BOOL(Polygon.OffsetPoint), NO_EXTRA" ],
[ "POLYGON_OFFSET_LINE", "CONTEXT_BOOL(Polygon.OffsetLine), NO_EXTRA" ],
[ "POLYGON_SMOOTH", "CONTEXT_BOOL(Polygon.SmoothFlag), NO_EXTRA" ],
[ "POLYGON_SMOOTH_HINT", "CONTEXT_ENUM(Hint.PolygonSmooth), NO_EXTRA" ],
[ "POLYGON_STIPPLE", "CONTEXT_BOOL(Polygon.StippleFlag), NO_EXTRA" ],
[ "RED_BIAS", "CONTEXT_FLOAT(Pixel.RedBias), NO_EXTRA" ],
[ "RED_SCALE", "CONTEXT_FLOAT(Pixel.RedScale), NO_EXTRA" ],
[ "RENDER_MODE", "CONTEXT_ENUM(RenderMode), NO_EXTRA" ],
[ "RGBA_MODE", "CONST(1), NO_EXTRA" ],
[ "SELECTION_BUFFER_SIZE", "CONTEXT_INT(Select.BufferSize), NO_EXTRA" ],
[ "STEREO", "BUFFER_INT(Visual.stereoMode), NO_EXTRA" ],
[ "TEXTURE_1D", "LOC_CUSTOM, TYPE_BOOLEAN, NO_OFFSET, NO_EXTRA" ],
[ "TEXTURE_3D", "LOC_CUSTOM, TYPE_BOOLEAN, NO_OFFSET, NO_EXTRA" ],
[ "TEXTURE_BINDING_1D", "LOC_CUSTOM, TYPE_INT, TEXTURE_1D_INDEX, NO_EXTRA" ],
[ "TEXTURE_BINDING_1D_ARRAY", "LOC_CUSTOM, TYPE_INT, TEXTURE_1D_ARRAY_INDEX, extra_EXT_texture_array" ],
[ "TEXTURE_GEN_S", "LOC_TEXUNIT, TYPE_BIT_0, offsetof(struct gl_texture_unit, TexGenEnabled), NO_EXTRA" ],
[ "TEXTURE_GEN_T", "LOC_TEXUNIT, TYPE_BIT_1, offsetof(struct gl_texture_unit, TexGenEnabled), NO_EXTRA" ],
[ "TEXTURE_GEN_R", "LOC_TEXUNIT, TYPE_BIT_2, offsetof(struct gl_texture_unit, TexGenEnabled), NO_EXTRA" ],
[ "TEXTURE_GEN_Q", "LOC_TEXUNIT, TYPE_BIT_3, offsetof(struct gl_texture_unit, TexGenEnabled), NO_EXTRA" ],
[ "UNPACK_LSB_FIRST", "CONTEXT_BOOL(Unpack.LsbFirst), NO_EXTRA" ],
[ "UNPACK_SWAP_BYTES", "CONTEXT_BOOL(Unpack.SwapBytes), NO_EXTRA" ],
[ "ZOOM_X", "CONTEXT_FLOAT(Pixel.ZoomX), NO_EXTRA" ],
[ "ZOOM_Y", "CONTEXT_FLOAT(Pixel.ZoomY), NO_EXTRA" ],
# Vertex arrays
[ "VERTEX_ARRAY_COUNT_EXT", "CONST(0), NO_EXTRA" ],
[ "NORMAL_ARRAY_COUNT_EXT", "CONST(0), NO_EXTRA" ],
[ "COLOR_ARRAY_COUNT_EXT", "CONST(0), NO_EXTRA" ],
[ "INDEX_ARRAY", "ARRAY_BOOL(VertexAttrib[VERT_ATTRIB_COLOR_INDEX].Enabled), NO_EXTRA" ],
[ "INDEX_ARRAY_TYPE", "ARRAY_ENUM(VertexAttrib[VERT_ATTRIB_COLOR_INDEX].Type), NO_EXTRA" ],
[ "INDEX_ARRAY_STRIDE", "ARRAY_INT(VertexAttrib[VERT_ATTRIB_COLOR_INDEX].Stride), NO_EXTRA" ],
[ "INDEX_ARRAY_COUNT_EXT", "CONST(0), NO_EXTRA" ],
[ "TEXTURE_COORD_ARRAY_COUNT_EXT", "CONST(0), NO_EXTRA" ],
[ "EDGE_FLAG_ARRAY", "ARRAY_BOOL(VertexAttrib[VERT_ATTRIB_EDGEFLAG].Enabled), NO_EXTRA" ],
[ "EDGE_FLAG_ARRAY_STRIDE", "ARRAY_INT(VertexAttrib[VERT_ATTRIB_EDGEFLAG].Stride), NO_EXTRA" ],
[ "EDGE_FLAG_ARRAY_COUNT_EXT", "CONST(0), NO_EXTRA" ],
# GL_ARB_texture_compression
[ "TEXTURE_COMPRESSION_HINT_ARB", "CONTEXT_INT(Hint.TextureCompression), NO_EXTRA" ],
# GL_EXT_compiled_vertex_array
[ "ARRAY_ELEMENT_LOCK_FIRST_EXT", "CONTEXT_INT(Array.LockFirst), NO_EXTRA" ],
[ "ARRAY_ELEMENT_LOCK_COUNT_EXT", "CONTEXT_INT(Array.LockCount), NO_EXTRA" ],
# GL_ARB_compressed_texture_pixel_storage
[ "UNPACK_COMPRESSED_BLOCK_WIDTH", "CONTEXT_INT(Unpack.CompressedBlockWidth), NO_EXTRA" ],
[ "UNPACK_COMPRESSED_BLOCK_HEIGHT", "CONTEXT_INT(Unpack.CompressedBlockHeight), NO_EXTRA" ],
[ "UNPACK_COMPRESSED_BLOCK_DEPTH", "CONTEXT_INT(Unpack.CompressedBlockDepth), NO_EXTRA" ],
[ "UNPACK_COMPRESSED_BLOCK_SIZE", "CONTEXT_INT(Unpack.CompressedBlockSize), NO_EXTRA" ],
[ "PACK_COMPRESSED_BLOCK_WIDTH", "CONTEXT_INT(Pack.CompressedBlockWidth), NO_EXTRA" ],
[ "PACK_COMPRESSED_BLOCK_HEIGHT", "CONTEXT_INT(Pack.CompressedBlockHeight), NO_EXTRA" ],
[ "PACK_COMPRESSED_BLOCK_DEPTH", "CONTEXT_INT(Pack.CompressedBlockDepth), NO_EXTRA" ],
[ "PACK_COMPRESSED_BLOCK_SIZE", "CONTEXT_INT(Pack.CompressedBlockSize), NO_EXTRA" ],
# GL_ARB_transpose_matrix
[ "TRANSPOSE_MODELVIEW_MATRIX_ARB", "CONTEXT_MATRIX_T(ModelviewMatrixStack), NO_EXTRA" ],
[ "TRANSPOSE_PROJECTION_MATRIX_ARB", "CONTEXT_MATRIX_T(ProjectionMatrixStack.Top), NO_EXTRA" ],
[ "TRANSPOSE_TEXTURE_MATRIX_ARB", "CONTEXT_MATRIX_T(TextureMatrixStack), NO_EXTRA" ],
# GL_EXT_secondary_color
[ "COLOR_SUM", "CONTEXT_BOOL(Fog.ColorSumEnabled), NO_EXTRA" ],
[ "CURRENT_SECONDARY_COLOR", "CONTEXT_FIELD(Current.Attrib[VERT_ATTRIB_COLOR1][0], TYPE_FLOATN_4), extra_flush_current" ],
[ "SECONDARY_COLOR_ARRAY", "ARRAY_BOOL(VertexAttrib[VERT_ATTRIB_COLOR1].Enabled), NO_EXTRA" ],
[ "SECONDARY_COLOR_ARRAY_TYPE", "ARRAY_ENUM(VertexAttrib[VERT_ATTRIB_COLOR1].Type), NO_EXTRA" ],
[ "SECONDARY_COLOR_ARRAY_STRIDE", "ARRAY_INT(VertexAttrib[VERT_ATTRIB_COLOR1].Stride), NO_EXTRA" ],
[ "SECONDARY_COLOR_ARRAY_SIZE", "LOC_CUSTOM, TYPE_INT, 0, NO_EXTRA" ],
# GL_EXT_fog_coord
[ "CURRENT_FOG_COORDINATE", "CONTEXT_FLOAT(Current.Attrib[VERT_ATTRIB_FOG][0]), extra_flush_current" ],
[ "FOG_COORDINATE_ARRAY", "ARRAY_BOOL(VertexAttrib[VERT_ATTRIB_FOG].Enabled), NO_EXTRA" ],
[ "FOG_COORDINATE_ARRAY_TYPE", "ARRAY_ENUM(VertexAttrib[VERT_ATTRIB_FOG].Type), NO_EXTRA" ],
[ "FOG_COORDINATE_ARRAY_STRIDE", "ARRAY_INT(VertexAttrib[VERT_ATTRIB_FOG].Stride), NO_EXTRA" ],
[ "FOG_COORDINATE_SOURCE", "CONTEXT_ENUM(Fog.FogCoordinateSource), NO_EXTRA" ],
# GL_NV_fog_distance
[ "FOG_DISTANCE_MODE_NV", "CONTEXT_ENUM(Fog.FogDistanceMode), extra_NV_fog_distance" ],
# GL_IBM_rasterpos_clip
[ "RASTER_POSITION_UNCLIPPED_IBM", "CONTEXT_BOOL(Transform.RasterPositionUnclipped), NO_EXTRA" ],
# GL_NV_point_sprite
[ "POINT_SPRITE_R_MODE_NV", "CONTEXT_ENUM(Point.SpriteRMode), extra_NV_point_sprite" ],
[ "POINT_SPRITE_COORD_ORIGIN", "CONTEXT_ENUM(Point.SpriteOrigin), extra_NV_point_sprite_ARB_point_sprite" ],
# GL_NV_texture_rectangle
[ "TEXTURE_RECTANGLE_NV", "LOC_CUSTOM, TYPE_BOOLEAN, 0, extra_NV_texture_rectangle" ],
[ "TEXTURE_BINDING_RECTANGLE_NV", "LOC_CUSTOM, TYPE_INT, TEXTURE_RECT_INDEX, extra_NV_texture_rectangle" ],
[ "MAX_RECTANGLE_TEXTURE_SIZE_NV", "CONTEXT_INT(Const.MaxTextureRectSize), extra_NV_texture_rectangle" ],
# GL_EXT_stencil_two_side
[ "STENCIL_TEST_TWO_SIDE_EXT", "CONTEXT_BOOL(Stencil.TestTwoSide), extra_EXT_stencil_two_side" ],
[ "ACTIVE_STENCIL_FACE_EXT", "LOC_CUSTOM, TYPE_ENUM, NO_OFFSET, NO_EXTRA" ],
# GL_NV_light_max_exponent
[ "MAX_SHININESS_NV", "CONTEXT_FLOAT(Const.MaxShininess), NO_EXTRA" ],
[ "MAX_SPOT_EXPONENT_NV", "CONTEXT_FLOAT(Const.MaxSpotExponent), NO_EXTRA" ],
# GL_NV_primitive_restart
[ "PRIMITIVE_RESTART_NV", "CONTEXT_BOOL(Array.PrimitiveRestart), extra_NV_primitive_restart" ],
[ "PRIMITIVE_RESTART_INDEX_NV", "CONTEXT_INT(Array.RestartIndex), extra_NV_primitive_restart" ],
# GL_ARB_vertex_buffer_object
[ "INDEX_ARRAY_BUFFER_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_vertex_array_object, BufferBinding[VERT_ATTRIB_COLOR_INDEX].BufferObj), NO_EXTRA" ],
[ "EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_vertex_array_object, BufferBinding[VERT_ATTRIB_EDGEFLAG].BufferObj), NO_EXTRA" ],
[ "SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_vertex_array_object, BufferBinding[VERT_ATTRIB_COLOR1].BufferObj), NO_EXTRA" ],
[ "FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, offsetof(struct gl_vertex_array_object, BufferBinding[VERT_ATTRIB_FOG].BufferObj), NO_EXTRA" ],
# GL_ARB_vertex_program
# == GL_VERTEX_PROGRAM_NV
[ "VERTEX_PROGRAM_ARB", "CONTEXT_BOOL(VertexProgram.Enabled), extra_ARB_vertex_program" ],
# == GL_VERTEX_PROGRAM_POINT_SIZE_NV
[ "VERTEX_PROGRAM_POINT_SIZE_ARB", "CONTEXT_BOOL(VertexProgram.PointSizeEnabled), extra_ARB_vertex_program" ],
# == GL_VERTEX_PROGRAM_TWO_SIDE_NV
[ "VERTEX_PROGRAM_TWO_SIDE_ARB", "CONTEXT_BOOL(VertexProgram.TwoSideEnabled), extra_ARB_vertex_program" ],
# == GL_MAX_TRACK_MATRIX_STACK_DEPTH_NV
[ "MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB", "CONTEXT_INT(Const.MaxProgramMatrixStackDepth), extra_ARB_vertex_program_ARB_fragment_program" ],
# == GL_MAX_TRACK_MATRICES_NV
[ "MAX_PROGRAM_MATRICES_ARB", "CONTEXT_INT(Const.MaxProgramMatrices), extra_ARB_vertex_program_ARB_fragment_program" ],
# == GL_CURRENT_MATRIX_STACK_DEPTH_NV
[ "CURRENT_MATRIX_STACK_DEPTH_ARB", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_vertex_program_ARB_fragment_program" ],
# == GL_CURRENT_MATRIX_NV
[ "CURRENT_MATRIX_ARB", "LOC_CUSTOM, TYPE_MATRIX, 0, extra_ARB_vertex_program_ARB_fragment_program" ],
# == GL_CURRENT_MATRIX_NV
[ "TRANSPOSE_CURRENT_MATRIX_ARB", "LOC_CUSTOM, TYPE_MATRIX_T, 0, extra_ARB_vertex_program_ARB_fragment_program" ],
# == GL_PROGRAM_ERROR_POSITION_NV
[ "PROGRAM_ERROR_POSITION_ARB", "CONTEXT_INT(Program.ErrorPos), extra_ARB_vertex_program_ARB_fragment_program" ],
# GL_ARB_fragment_program
[ "FRAGMENT_PROGRAM_ARB", "CONTEXT_BOOL(FragmentProgram.Enabled), extra_ARB_fragment_program" ],
# GL_EXT_packed_float
[ "RGBA_SIGNED_COMPONENTS_EXT", "LOC_CUSTOM, TYPE_INT_4, 0, extra_EXT_packed_float" ],
# GL_EXT_depth_bounds_test
[ "DEPTH_BOUNDS_TEST_EXT", "CONTEXT_BOOL(Depth.BoundsTest), extra_EXT_depth_bounds_test" ],
[ "DEPTH_BOUNDS_EXT", "CONTEXT_FLOAT2(Depth.BoundsMin), extra_EXT_depth_bounds_test" ],
# GL_ARB_depth_clamp
[ "DEPTH_CLAMP", "CONTEXT_BOOL(Transform.DepthClamp), extra_ARB_depth_clamp" ],
# GL_ATI_fragment_shader
[ "FRAGMENT_SHADER_ATI", "CONTEXT_BOOL(ATIFragmentShader.Enabled), extra_ATI_fragment_shader" ],
[ "NUM_FRAGMENT_REGISTERS_ATI", "CONST(6), extra_ATI_fragment_shader" ],
[ "NUM_FRAGMENT_CONSTANTS_ATI", "CONST(8), extra_ATI_fragment_shader" ],
[ "NUM_PASSES_ATI", "CONST(2), extra_ATI_fragment_shader" ],
[ "NUM_INSTRUCTIONS_PER_PASS_ATI", "CONST(8), extra_ATI_fragment_shader" ],
[ "NUM_INSTRUCTIONS_TOTAL_ATI", "CONST(16), extra_ATI_fragment_shader" ],
[ "COLOR_ALPHA_PAIRING_ATI", "CONST(GL_TRUE), extra_ATI_fragment_shader" ],
[ "NUM_LOOPBACK_COMPONENTS_ATI", "CONST(3), extra_ATI_fragment_shader" ],
[ "NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI", "CONST(3), extra_ATI_fragment_shader" ],
# GL_EXT_provoking_vertex
[ "PROVOKING_VERTEX_EXT", "CONTEXT_ENUM(Light.ProvokingVertex), extra_EXT_provoking_vertex" ],
[ "QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION_EXT", "CONTEXT_BOOL(Const.QuadsFollowProvokingVertexConvention), extra_EXT_provoking_vertex_32" ],
# GL_ARB_seamless_cube_map
[ "TEXTURE_CUBE_MAP_SEAMLESS", "CONTEXT_BOOL(Texture.CubeMapSeamless), extra_ARB_seamless_cube_map" ],
# GL_EXT_texture_integer
[ "RGBA_INTEGER_MODE_EXT", "LOC_CUSTOM, TYPE_INT, 0, extra_EXT_texture_integer_and_new_buffers" ],
# GL_ARB_transform_feedback3
[ "MAX_TRANSFORM_FEEDBACK_BUFFERS", "CONTEXT_INT(Const.MaxTransformFeedbackBuffers), extra_ARB_transform_feedback3" ],
[ "MAX_VERTEX_STREAMS", "CONTEXT_INT(Const.MaxVertexStreams), extra_ARB_transform_feedback3_ARB_gpu_shader5" ],
# GL_ARB_color_buffer_float
[ "RGBA_FLOAT_MODE_ARB", "BUFFER_FIELD(Visual.floatMode, TYPE_BOOLEAN), extra_core_ARB_color_buffer_float_and_new_buffers" ],
# GL3.0 / GL_EXT_framebuffer_sRGB
[ "FRAMEBUFFER_SRGB_EXT", "CONTEXT_BOOL(Color.sRGBEnabled), extra_EXT_framebuffer_sRGB" ],
[ "FRAMEBUFFER_SRGB_CAPABLE_EXT", "BUFFER_INT(Visual.sRGBCapable), extra_EXT_framebuffer_sRGB_and_new_buffers" ],
# GL 3.1
# NOTE: different enum values for GL_PRIMITIVE_RESTART_NV
# vs. GL_PRIMITIVE_RESTART!
[ "PRIMITIVE_RESTART", "CONTEXT_BOOL(Array.PrimitiveRestart), extra_version_31" ],
[ "PRIMITIVE_RESTART_INDEX", "CONTEXT_INT(Array.RestartIndex), extra_version_31" ],
# GL 3.2
[ "CONTEXT_PROFILE_MASK", "CONTEXT_INT(Const.ProfileMask), extra_version_32" ],
# GL_ARB_timer_query
[ "TIMESTAMP", "LOC_CUSTOM, TYPE_INT64, 0, extra_ARB_timer_query" ],
# GL_ARB_map_buffer_alignment
[ "MIN_MAP_BUFFER_ALIGNMENT", "CONTEXT_INT(Const.MinMapBufferAlignment), NO_EXTRA" ],
# GL_ARB_texture_gather
[ "MAX_PROGRAM_TEXTURE_GATHER_COMPONENTS_ARB", "CONTEXT_INT(Const.MaxProgramTextureGatherComponents), extra_ARB_texture_gather"],
# GL_ARB_shader_image_load_store
[ "MAX_IMAGE_SAMPLES", "CONTEXT_INT(Const.MaxImageSamples), extra_ARB_shader_image_load_store" ],
# GL_ARB_query_buffer_object
[ "QUERY_BUFFER_BINDING", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_query_buffer_object" ],
# GL_ATI_meminfo
[ "VBO_FREE_MEMORY_ATI", "LOC_CUSTOM, TYPE_INT_4, NO_OFFSET, extra_ATI_meminfo" ],
[ "TEXTURE_FREE_MEMORY_ATI", "LOC_CUSTOM, TYPE_INT_4, NO_OFFSET, extra_ATI_meminfo" ],
[ "RENDERBUFFER_FREE_MEMORY_ATI", "LOC_CUSTOM, TYPE_INT_4, NO_OFFSET, extra_ATI_meminfo" ],
# GL_NVX_gpu_memory_info
[ "GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX", "LOC_CUSTOM, TYPE_INT, NO_OFFSET, extra_NVX_gpu_memory_info" ],
[ "GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX", "LOC_CUSTOM, TYPE_INT, NO_OFFSET, extra_NVX_gpu_memory_info" ],
[ "GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX", "LOC_CUSTOM, TYPE_INT, NO_OFFSET, extra_NVX_gpu_memory_info" ],
[ "GPU_MEMORY_INFO_EVICTION_COUNT_NVX", "LOC_CUSTOM, TYPE_INT, NO_OFFSET, extra_NVX_gpu_memory_info" ],
[ "GPU_MEMORY_INFO_EVICTED_MEMORY_NVX", "LOC_CUSTOM, TYPE_INT, NO_OFFSET, extra_NVX_gpu_memory_info" ],
# GL_ARB_cull_distance
[ "MAX_CULL_DISTANCES", "CONTEXT_INT(Const.MaxClipPlanes), extra_ARB_cull_distance" ],
[ "MAX_COMBINED_CLIP_AND_CULL_DISTANCES", "CONTEXT_INT(Const.MaxClipPlanes), extra_ARB_cull_distance" ],
# GL_ARB_compute_variable_group_size
[ "MAX_COMPUTE_VARIABLE_GROUP_INVOCATIONS_ARB", "CONTEXT_INT(Const.MaxComputeVariableGroupInvocations), extra_ARB_compute_variable_group_size" ],
# GL_ARB_sparse_buffer
[ "SPARSE_BUFFER_PAGE_SIZE_ARB", "CONTEXT_INT(Const.SparseBufferPageSize), extra_ARB_sparse_buffer" ],
]},
# Enums restricted to OpenGL Core profile
{ "apis": ["GL_CORE"], "params": [
# GL_ARB_shader_subroutine
[ "MAX_SUBROUTINES", "CONST(MAX_SUBROUTINES), NO_EXTRA" ],
[ "MAX_SUBROUTINE_UNIFORM_LOCATIONS", "CONST(MAX_SUBROUTINE_UNIFORM_LOCATIONS), NO_EXTRA" ],
# GL_ARB_indirect_parameters
[ "PARAMETER_BUFFER_BINDING_ARB", "LOC_CUSTOM, TYPE_INT, 0, extra_ARB_indirect_parameters" ],
]}
]
|
import string
import pandas as pd
import numpy as np
import sklearn
import os
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsClassifier
from random import randint
import random
import decimal
import requests
def predictKnn():
print('\n - Lendo o arquivo com o dataset sobre diabetes')
data = pd.read_csv('diabetes_test.csv')
print(' - Criando X e y para o algoritmo de aprendizagem a partir do arquivo diabetes_dataset')
feature_cols = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness',
'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']
X = data[feature_cols]
y = data.Outcome
print(' - Criando modelo preditivo')
neigh = KNeighborsClassifier(n_neighbors=3)
neigh.fit(X, y)
print(' - Aplicando modelo e enviando para o servidor')
data_app = pd.read_csv('diabetes_sample_test.csv')
y_pred = neigh.predict(data_app)
URL = "https://aydanomachado.com/MachineLearning/PreProcessing.php"
DEV_KEY = "Café com leite"
data = {'dev_key': DEV_KEY,
'predictions': pd.Series(y_pred).to_json(orient='values')}
r = requests.post(url=URL, data=data)
pastebin_url = r.text
print(" - Resposta do servidor:\n", r.text, "\n")
def generate_norm_data(data):
data = data.dropna(thresh=5)
data = data.interpolate()
data = data.fillna(data.median())
data = data.astype(np.float64)
min_max_scaler = preprocessing.MinMaxScaler()
np_scaled = min_max_scaler.fit_transform(data)
data_norm = pd.DataFrame(np_scaled)
return data_norm
def multiply_features(data_norm, sample, scalars):
pregnancies = scalars[0]
glucose = scalars[1]
blood_pressure = scalars[2]
skin_thickness = scalars[3]
insulin = scalars[4]
bmi = scalars[5]
diabetes_pedigree_function = scalars[6]
age = scalars[7]
if sample == 0:
data_norm.columns = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI',
'DiabetesPedigreeFunction', 'Age', 'Outcome']
else:
data_norm.columns = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI',
'DiabetesPedigreeFunction', 'Age']
data_norm.loc[:, 'Pregnancies'] *= pregnancies
data_norm.loc[:, 'Glucose'] *= glucose
data_norm.loc[:, 'BloodPressure'] *= blood_pressure
data_norm.loc[:, 'SkinThickness'] *= skin_thickness
data_norm.loc[:, 'Insulin'] *= insulin
data_norm.loc[:, 'BMI'] *= bmi
data_norm.loc[:, 'DiabetesPedigreeFunction'] *= diabetes_pedigree_function
data_norm.loc[:, 'Age'] *= age
data_norm = data_norm.round(6)
return data_norm
def random_with_range(begin, end):
begin *= 10
end *= 10
return float(random.randrange(begin, end))/10
def id_generator(size=10, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def neighbors(scalar):
constant = 0.5
size = len(scalar)
list_of_neighbors = []
for i in range(size):
temp1 = scalar.copy()
temp2 = scalar.copy()
temp1[i] += constant
temp2[i] -= constant
list_of_neighbors.append(temp1)
list_of_neighbors.append(temp2)
return list_of_neighbors
def predict(best_accuracy, scalars, data, data_sample):
accuracy = predictKnn()
if accuracy > best_accuracy:
best_accuracy = accuracy
temp = "best_guesses/accuracy" + str(accuracy) + "KEY" + id_generator()
os.mkdir(temp)
f = open(temp + "/scalars.txt", "w")
file_content = "pregnancies= " + str(scalars[0]) + '\n'
file_content += "glucose= " + str(scalars[1]) + '\n'
file_content += "blood_pressure= " + str(scalars[2]) + '\n'
file_content += "skin_thickness= " + str(scalars[3]) + '\n'
file_content += "insulin= " + str(scalars[4]) + '\n'
file_content += "bmi= " + str(scalars[5]) + '\n'
file_content += "diabetes_pedigree_function= " + str(scalars[6]) + '\n'
file_content += "age= " + str(scalars[7]) + '\n'
file_content += "accuracy= " + str(accuracy)
f.write(file_content)
f.close()
print(file_content)
print("------------------------------------")
data.to_csv(temp + "/data_train", index=False)
data_sample.to_csv(temp + "/data_test", index=False)
return accuracy
def test_accuracy(data, data_sample, guess, best_accuracy_global, flag):
data_sample = multiply_features(data_sample, 1, guess)
data = multiply_features(data, 0, guess)
data_sample.to_csv("diabetes_sample_test.csv", index=False)
data.to_csv("diabetes_test.csv", index=False)
if flag is True:
best_accuracy = predict(best_accuracy_global, guess, data, data_sample)
else:
best_accuracy = predictKnn()
return best_accuracy
def simulated_annealing(data, data_sample, start, best_accuracy_global):
current = start
while True:
neigh = neighbors(start)
best_accuracy = 0
best_neighbor = None
for i in neigh:
current_accuracy = test_accuracy(data, data_sample, i, best_accuracy_global, False)
print(" GUESS", i, " ACCURACY: ", current_accuracy)
if current_accuracy > best_accuracy:
best_neighbor = i
best_accuracy = current_accuracy
if best_accuracy <= test_accuracy(data, data_sample, current, best_accuracy_global, False):
return current
current = best_neighbor
def generate_random(i):
return random_with_range(0, 5)
def scalar():
best_accuracy = 0
count = 0
data_sample = pd.read_csv('diabetes_app.csv')
data = pd.read_csv('diabetes_dataset.csv')
data_sample = generate_norm_data(data_sample)
data = generate_norm_data(data)
data_sample.to_csv("diabetes_sample_norm.csv", index=False)
data.to_csv("diabetes_norm.csv", index=False)
scalars = [1, 1, 1, 1, 1, 1, 1, 1]
while True:
data_sample = pd.read_csv('diabetes_sample_norm.csv')
data = pd.read_csv('diabetes_norm.csv')
ans = simulated_annealing(data, data_sample, scalars, best_accuracy)
best_accuracy = test_accuracy(data, data_sample, ans, best_accuracy, True)
scalars = list(map(generate_random, scalars))
count += 1
def main():
scalar()
main()
|
# coding: utf-8
"""
Hydrogen Proton API
Financial engineering module of Hydrogen Atom # noqa: E501
OpenAPI spec version: 1.7.18
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BacktestRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'start_date': 'date',
'end_date': 'date',
'model_id': 'str',
'asset_size': 'float',
'initial_weights': 'object',
'settings': 'object',
'trades': 'bool',
'stats': 'bool',
'holdings': 'bool',
'asset_sizes': 'bool'
}
attribute_map = {
'start_date': 'start_date',
'end_date': 'end_date',
'model_id': 'model_id',
'asset_size': 'asset_size',
'initial_weights': 'initial_weights',
'settings': 'settings',
'trades': 'trades',
'stats': 'stats',
'holdings': 'holdings',
'asset_sizes': 'asset_sizes'
}
def __init__(self, start_date=None, end_date=None, model_id=None, asset_size=None, initial_weights=None, settings=None, trades=True, stats=True, holdings=True, asset_sizes=True): # noqa: E501
"""BacktestRequest - a model defined in Swagger""" # noqa: E501
self._start_date = None
self._end_date = None
self._model_id = None
self._asset_size = None
self._initial_weights = None
self._settings = None
self._trades = None
self._stats = None
self._holdings = None
self._asset_sizes = None
self.discriminator = None
self.start_date = start_date
self.end_date = end_date
if model_id is not None:
self.model_id = model_id
if asset_size is not None:
self.asset_size = asset_size
if initial_weights is not None:
self.initial_weights = initial_weights
if settings is not None:
self.settings = settings
if trades is not None:
self.trades = trades
if stats is not None:
self.stats = stats
if holdings is not None:
self.holdings = holdings
if asset_sizes is not None:
self.asset_sizes = asset_sizes
@property
def start_date(self):
"""Gets the start_date of this BacktestRequest. # noqa: E501
:return: The start_date of this BacktestRequest. # noqa: E501
:rtype: date
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this BacktestRequest.
:param start_date: The start_date of this BacktestRequest. # noqa: E501
:type: date
"""
if start_date is None:
raise ValueError("Invalid value for `start_date`, must not be `None`") # noqa: E501
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this BacktestRequest. # noqa: E501
:return: The end_date of this BacktestRequest. # noqa: E501
:rtype: date
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this BacktestRequest.
:param end_date: The end_date of this BacktestRequest. # noqa: E501
:type: date
"""
if end_date is None:
raise ValueError("Invalid value for `end_date`, must not be `None`") # noqa: E501
self._end_date = end_date
@property
def model_id(self):
"""Gets the model_id of this BacktestRequest. # noqa: E501
:return: The model_id of this BacktestRequest. # noqa: E501
:rtype: str
"""
return self._model_id
@model_id.setter
def model_id(self, model_id):
"""Sets the model_id of this BacktestRequest.
:param model_id: The model_id of this BacktestRequest. # noqa: E501
:type: str
"""
self._model_id = model_id
@property
def asset_size(self):
"""Gets the asset_size of this BacktestRequest. # noqa: E501
:return: The asset_size of this BacktestRequest. # noqa: E501
:rtype: float
"""
return self._asset_size
@asset_size.setter
def asset_size(self, asset_size):
"""Sets the asset_size of this BacktestRequest.
:param asset_size: The asset_size of this BacktestRequest. # noqa: E501
:type: float
"""
if asset_size is not None and asset_size < 0: # noqa: E501
raise ValueError("Invalid value for `asset_size`, must be a value greater than or equal to `0`") # noqa: E501
self._asset_size = asset_size
@property
def initial_weights(self):
"""Gets the initial_weights of this BacktestRequest. # noqa: E501
:return: The initial_weights of this BacktestRequest. # noqa: E501
:rtype: object
"""
return self._initial_weights
@initial_weights.setter
def initial_weights(self, initial_weights):
"""Sets the initial_weights of this BacktestRequest.
:param initial_weights: The initial_weights of this BacktestRequest. # noqa: E501
:type: object
"""
self._initial_weights = initial_weights
@property
def settings(self):
"""Gets the settings of this BacktestRequest. # noqa: E501
:return: The settings of this BacktestRequest. # noqa: E501
:rtype: object
"""
return self._settings
@settings.setter
def settings(self, settings):
"""Sets the settings of this BacktestRequest.
:param settings: The settings of this BacktestRequest. # noqa: E501
:type: object
"""
self._settings = settings
@property
def trades(self):
"""Gets the trades of this BacktestRequest. # noqa: E501
:return: The trades of this BacktestRequest. # noqa: E501
:rtype: bool
"""
return self._trades
@trades.setter
def trades(self, trades):
"""Sets the trades of this BacktestRequest.
:param trades: The trades of this BacktestRequest. # noqa: E501
:type: bool
"""
self._trades = trades
@property
def stats(self):
"""Gets the stats of this BacktestRequest. # noqa: E501
:return: The stats of this BacktestRequest. # noqa: E501
:rtype: bool
"""
return self._stats
@stats.setter
def stats(self, stats):
"""Sets the stats of this BacktestRequest.
:param stats: The stats of this BacktestRequest. # noqa: E501
:type: bool
"""
self._stats = stats
@property
def holdings(self):
"""Gets the holdings of this BacktestRequest. # noqa: E501
:return: The holdings of this BacktestRequest. # noqa: E501
:rtype: bool
"""
return self._holdings
@holdings.setter
def holdings(self, holdings):
"""Sets the holdings of this BacktestRequest.
:param holdings: The holdings of this BacktestRequest. # noqa: E501
:type: bool
"""
self._holdings = holdings
@property
def asset_sizes(self):
"""Gets the asset_sizes of this BacktestRequest. # noqa: E501
:return: The asset_sizes of this BacktestRequest. # noqa: E501
:rtype: bool
"""
return self._asset_sizes
@asset_sizes.setter
def asset_sizes(self, asset_sizes):
"""Sets the asset_sizes of this BacktestRequest.
:param asset_sizes: The asset_sizes of this BacktestRequest. # noqa: E501
:type: bool
"""
self._asset_sizes = asset_sizes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BacktestRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BacktestRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import math
import numpy as np
import torch
import torch.nn as nn
import pl_bolts.models.vision.unet as plt_unet
import PyGenBrix.dist_layers.common_layers as dl
base_slice = (slice(0, None, 1), slice(0, None, 1))
ground_slice = (slice(0, None, 2), slice(0, None, 2))
upsampling_slices = [(slice(1, None, 2), slice(1, None, 2)),
(slice(0, None, 2), slice(1, None, 2)),
(slice( 1, None, 2), slice(0, None, 2))]
def delete_batch_norm_unet(unet):
del unet.layers[0].net[1]
del unet.layers[0].net[3]
for l in range(1, len(unet.layers)//2):
del unet.layers[l].net[1].net[1]
del unet.layers[l].net[1].net[3]
for l in range((len( unet.layers)+1)//2, len(unet.layers)-1 ):
del unet.layers[l].conv.net[1]
del unet.layers[l].conv.net[3]
class _ParallelCNNDistribution(nn.Module):
def __init__(self, event_shape, output_distribution_layer, num_upsampling_stages, max_unet_layers=3, num_conditional_channels=None):
super(_ParallelCNNDistribution, self).__init__()
self.output_distribution_layer = output_distribution_layer
num_output_distribution_params = output_distribution_layer.params_size(1)
input_channels = event_shape[0]
if num_conditional_channels is not None:
input_channels += num_conditional_channels
base_width = event_shape[1]/2**num_upsampling_stages
unet_num_layers = int(min( math.log( base_width )+1, max_unet_layers))
self.base_nets = nn.ModuleList([
plt_unet.UNet(num_classes=num_output_distribution_params, input_channels=input_channels, num_layers=unet_num_layers) for c in range(event_shape[0])])
upsampler_nets = []
for l in range(num_upsampling_stages):
output_width = base_width * 2**(l+1)
unet_num_layers = int(min(math.log(output_width)+1, max_unet_layers))
upsampler_nets.append(
nn.ModuleList([
nn.ModuleList([plt_unet.UNet(num_output_distribution_params, input_channels=input_channels, num_layers=unet_num_layers) for c in range(event_shape[0])]) for s in range(3)]))
self.num_upsampling_stages = len(upsampler_nets)
self.upsampler_nets = nn.ModuleList(upsampler_nets)
self.event_shape = event_shape
self.num_conditional_channels = num_conditional_channels
#Note this will alter masked_value
def log_prob_block(self, upsampled_images, distribution_params, masked_value, block_parallel_cnns, slice):
block_log_prob = 0.0
for channel in range(len(block_parallel_cnns)):
if distribution_params is not None:
network_input = torch.cat((masked_value, distribution_params), dim=1 )
else:
network_input = torch.cat([masked_value], dim=1)
output_distribution_params = block_parallel_cnns[channel](network_input)
block_log_prob += self.output_distribution_layer(output_distribution_params[:,:,slice[0],slice[1]]).log_prob(upsampled_images[:,channel:channel+1,slice[0],slice[1]])["log_prob"]
masked_value[:,channel,slice[0],slice[1]] = upsampled_images[:,channel,slice[0],slice[1]]
return block_log_prob
#Note, alters samples
def sample_block(self, samples, distribution_params, block_parallel_cnns, slice, temperature=1.0):
for channel in range(len(block_parallel_cnns)):
if distribution_params is not None:
network_input = torch.cat((samples, distribution_params), dim=1 )
else:
network_input = torch.cat([samples], dim=1)
output_distribution_params = block_parallel_cnns[channel](network_input)
if temperature > .01:
samples[:,channel,slice[0],slice[1]] = self.output_distribution_layer(output_distribution_params).sample(temperature)[:,0,slice[0],slice[1]]
else:
samples[:,channel,slice[0],slice[1]] = self.output_distribution_layer(output_distribution_params).mode()[:,0,slice[0],slice[1]]
#Note, alters samples
def mode_block(self, samples, distribution_params, block_parallel_cnns, slice):
sample_block(samples, distribution_params, block_parallel_cnns, slice, 0.0)
def upsampler_log_prob(self, value, distribution_params, parallel_cnns):
logging_dict = {}
masked_value = torch.zeros_like(value)
masked_value[:,:,ground_slice[0],ground_slice[1]] = value[:,:,ground_slice[0],ground_slice[1]]
log_prob = 0.0
for s in range(3):
block_log_prob = self.log_prob_block(value, distribution_params, masked_value, parallel_cnns[s], upsampling_slices[s])
logging_dict["block"+str(s)+"_log_prob"] = block_log_prob
log_prob += block_log_prob
logging_dict["log_prob"] = log_prob
return logging_dict
def upsampler_sample(self, downsampled_images, distribution_params, parallel_nets, temperature=1.0):
samples = torch.zeros([downsampled_images.shape[0], downsampled_images.shape[1], downsampled_images.shape[2]*2, downsampled_images.shape[3]*2], device=downsampled_images.device)
samples[:,:,ground_slice[0],ground_slice[1]] = downsampled_images
for s in range(3):
self.sample_block(samples, distribution_params, parallel_nets[s], upsampling_slices[s], temperature)
return samples
def upsampler_mode(self, downsampled_images, distribution_params, parallel_nets):
return upsampler_sample(downsampled_images, distribution_params, parallel_nets, 0.0)
def log_prob(self, value, conditionals=None):
if self.num_conditional_channels is not None:
if value.size()[0] != conditionals.size()[0]:
raise RuntimeError("value batch size {}, but conditionals has batch size {}"
.format(value.shape[0], conditionals.shape[0]))
if value.size()[1:4] != torch.Size( self.event_shape):
raise RuntimeError("value shape {}, but event_shape has shape {}"
.format(value.shape[1:4], self.event_shape))
base_samples = value[:,:,::2**self.num_upsampling_stages,::2**self.num_upsampling_stages]
if conditionals is not None:
base_distribution_params = conditionals[:,:,::2**self.num_upsampling_stages,::2**self.num_upsampling_stages]
else:
base_distribution_params = None
logging_dict = {}
log_prob = 0.0
masked_value = torch.zeros_like(base_samples)
#predict all base pixels
log_prob = self.log_prob_block(base_samples, base_distribution_params, masked_value, self.base_nets, base_slice)
logging_dict["base_log_prob"] = log_prob.clone()
for level in range( self.num_upsampling_stages ):
level_subsample_rate = 2**(self.num_upsampling_stages-level-1)
if conditionals is not None:
upsample_distribution_params = conditionals[:,:,::level_subsample_rate,::level_subsample_rate]
else:
upsample_distribution_params = None
upsample_log_prob_dict = self.upsampler_log_prob(
value[:,:,::level_subsample_rate,::level_subsample_rate],
upsample_distribution_params,
self.upsampler_nets[ level ] )
for k, v in upsample_log_prob_dict.items():
logging_dict["upsample_level_"+str(level)+"/"+k] = upsample_log_prob_dict[k]
log_prob += upsample_log_prob_dict["log_prob"]
logging_dict["log_prob"] = log_prob
return logging_dict
def sample(self, conditionals=None, temperature=1.0):
with torch.no_grad():
sample = torch.zeros([1, self.event_shape[0], self.event_shape[1]//2**self.num_upsampling_stages, self.event_shape[2]//2**self.num_upsampling_stages ], device=next(self.parameters()).device)
base_conditionals = conditionals[:,:,::2**self.num_upsampling_stages,::2**self.num_upsampling_stages] if conditionals is not None else None
self.sample_block(sample, base_conditionals, self.base_nets, base_slice, temperature)
for level in range(self.num_upsampling_stages):
level_subsample_rate = 2**(self.num_upsampling_stages-level-1)
upsample_conditionals = conditionals[:,:,::level_subsample_rate,::level_subsample_rate] if conditionals is not None else None
sample = self.upsampler_sample(
sample,
upsample_conditionals,
self.upsampler_nets[ level ], temperature)
return sample
def mode(self, conditionals=None):
return self.sample(conditionals, temperature=0.0)
class ParallelCNNDistribution(dl.Distribution):
def __init__(self, event_shape, output_distribution_layer, num_upsampling_stages, max_unet_layers=3):
super(ParallelCNNDistribution, self).__init__()
self.distribution = _ParallelCNNDistribution(event_shape, output_distribution_layer, num_upsampling_stages, max_unet_layers=3)
class ParallelCNNLayer(dl.Layer):
def __init__(self, event_shape, output_distribution_layer, num_upsampling_stages, max_unet_layers, num_conditional_channels):
super(ParallelCNNLayer, self).__init__(_ParallelCNNDistribution(event_shape, output_distribution_layer, num_upsampling_stages, max_unet_layers=3, num_conditional_channels=num_conditional_channels))
|
import json
import os
from paver.easy import pushd
import numpy as np
import matplotlib
matplotlib.use('Agg') # in the case of perform on server
import matplotlib.pyplot as plt
import pickle
import csv
from sklearn import metrics
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('directory')
# opts = parser.parse_args()
summary = Summary()
figs_dir = 'summary_figs'
os.path.exists(figs_dir) or os.mkdir(figs_dir)
with pushd(figs_dir):
summary.a_rand_index(summary.sample_letters,summary.input_data,'l')
summary.a_rand_index(summary.sample_states,summary.input_data2,'s')
with open('word_list.txt',"w") as f:
for num, key in enumerate(summary.word_list):
f.write("iter%d:: " % num)
for num2, key2 in enumerate(key):
f.write("%d:" % num2 + str(key2) + " ")
f.write("\n")
# plot sample states and letters
for idx in range(summary.data_size):
summary.plot_states(idx)
plt.savefig('sample_states_%d.png' % idx)
summary.plot_state_boundaries(idx)
plt.savefig('state_boundary_%d.png' % idx)
summary.plot_letters(idx)
plt.savefig('sample_letters_%d.png' % idx)
plt.clf()
class Summary(object):
def __init__(self, dirpath = '.'):
with open('parameter.json') as f:
params = self.params = json.load(f)
with open('fig_title.json') as f2:
fig_title = self.fig_title = json.load(f2)
with open('sample_word_list.txt') as f3:
self.word_list = pickle.load(f3)
self.data_size = params['DATA_N']
self.input_data=[]
self.input_data2=[]
for i in fig_title:
data_l = np.loadtxt(i + ".txt")
data_l2 = np.loadtxt(i + ".lab")
self.input_data.append(data_l[0])
self.input_data2.append(data_l2)
self.sample_states = [np.loadtxt('sample_states_%d.txt' % i)for i in range(params['DATA_N'])]
self.sample_letters = [np.loadtxt('sample_letters_%d.txt' % i)for i in range(params['DATA_N'])]
self.state_ranges = []
for i in range(params['DATA_N']):
with open('state_ranges_%d.txt' % i) as f:
self.state_ranges.append(pickle.load(f))
llist = np.loadtxt("loglikelihood.txt").tolist()
self.maxlikelihood = (max(llist), llist.index(max(llist)))
def a_rand_index(self,sample_data,true_data,char):
RIs=[]
for idx in range(len(sample_data[0])):
true=[]
sample=[]
for key,key2 in zip(sample_data,true_data):
sample.extend(key[idx])
true.extend(key2)
ris=metrics.adjusted_rand_score(true, sample)
RIs.append(ris)
np.savetxt("aRIs_"+char+".txt",RIs)
true=[]
sample=[]
for key,key2 in zip(sample_data,true_data):
sample.extend(key[99])
true.extend(key2)
ri=metrics.adjusted_rand_score(true, sample)
str="max_adjusted_rand_index_"+char+".txt"
f = open(str,'w')
writer = csv.writer(f)
writer.writerow(["adjusted_rand_score",ri])
def _plot_discreate_sequence(self, true_data, title, sample_data, label = u'', plotopts = {}):
ax = plt.subplot2grid((10, 1), (1, 0))
plt.sca(ax)
ax.matshow([true_data], aspect = 'auto')
plt.ylabel('Truth Label')
# label matrix
ax = plt.subplot2grid((10, 1), (2, 0), rowspan = 8)
plt.suptitle(title)
plt.sca(ax)
ax.matshow(sample_data, aspect = 'auto', **plotopts)
plt.xlabel('Frame')
plt.ylabel('Iteration')
plt.xticks(())
def _plot_label_boundary(self, true_data, title, sample_data, label = u''):
boundaries = [[stop for state, (start, stop) in r] for r in sample_data]
size = boundaries[0][-1]
data = np.zeros((len(sample_data), size))
for i, b in enumerate(boundaries):
for x in b[:-1]:
data[i, x] = 1.0
self._plot_discreate_sequence(true_data, title, data, label, plotopts = {'cmap': 'Greys'})
def plot_letters(self, idx):
self._plot_discreate_sequence(
self.input_data[idx],
self.fig_title[idx],
self.sample_letters[idx],
label=self.sample_letters[idx]
)
def plot_states(self, idx):
self._plot_discreate_sequence(
self.input_data2[idx],
self.fig_title[idx],
self.sample_states[idx],
label=self.sample_states[idx]
)
def plot_state_boundaries(self, idx):
self._plot_label_boundary(
self.input_data2[idx],
self.fig_title[idx],
self.state_ranges[idx],
label=self.sample_states[idx]
)
if __name__ == '__main__':
main()
|
from django.urls import path
from rest_framework import routers
from .views import UniversityViewSet, StudentViewSet, index, students
router = routers.DefaultRouter()
router.register("university", UniversityViewSet, basename='uni')
router.register("students", StudentViewSet)
urlpatterns = [
path('index/', index, name='index'),
path('show_students/', students, name='show_students')
]
urlpatterns += router.urls
|
import os
import sys
import cv2
import numpy as np
import random
def augment_image(input_img, mode):
input_img = cv2.imread(input_img)
if mode == 2:
bright = np.ones(input_img.shape, dtype="uint8") * 70
return cv2.subtract(input_img, bright)
elif mode == 3:
sharpening = np.array([[-1, -1, -1, [-1, 10, -1], [-1, -1, -1]]])
return cv2.filter2D(image, -1, sharpening)
else:
bright = np.ones(input_img.shape, dtype="uint8") * 70
return cv2.add(input_img, bright)
if __name__ == "__main__":
in_dir = "./dataset/" + sys.argv[1]
out_dir = "./dataset_new/" + sys.argv[1]
augment_mode = sys.argv[2]
for data_inner_dir in os.listdir(in_dir):
try:
for filename in os.listdir(os.path.join(in_dir, data_inner_dir)):
if str(filename).endswith(".png"):
in_file = os.path.join(
os.path.join(in_dir, data_inner_dir), filename
)
out_file = os.path.join(
os.path.join(out_dir, data_inner_dir), filename
)
try:
os.remove(out_file)
except FileNotFoundError:
pass
image = augment_image(in_file, augment_mode)
cv2.imwrite(out_file, image)
except NotADirectoryError:
pass |
#!/usr/bin/env python
#coding:utf-8
"""
Author: LICFACE --<licface@yahoo.com>
Purpose: Download from Androidfilehost helper
Created: 7/18/2018
"""
import sys
import os
import requests
from bs4 import BeautifulSoup as bs
from urlparse import urlparse
from debug import *
import re
#import cfscrape
import argparse
import json
import idm
import wget
import traceback
class AFH(object):
def __init__(self):
super(AFH, self)
self.URL = 'https://androidfilehost.com/'
self.Sess = requests.Session()
self.STATUS_CODE = ''
def setCookies(self, url = None, cookies = None):
'''
``params:``
* cookies => dict
'''
if not cookies:
cookies = self.getCookies(url)
if not cookies:
return {}, '', {}
debug(cookies = cookies)
cookies_dict = {}
cookies_str = ''
if cookies.keys():
for i in cookies.keys():
cookies_dict.update({str(i): cookies.get(i),})
cookies_str = cookies_str + str(i) + "=" + str(cookies.get(i)) + ";"
debug(cookies_str = cookies_str)
debug(cookies_dict = cookies_dict)
return cookies, cookies_str, cookies_dict
def getCookies(self, url = None, cookies = None):
if not url:
return False
debug(url = url)
req = self.Sess.get(url)
if not cookies:
cookies = req.cookies
return cookies
def setHeaders(self, accept = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', connection = '', content_length = '', cache_control = '', upgrade_insecure_requests = '', referer = '', x_mod_sbb_ctype = '', x_requested_with = '', headers_response = None, url = None, cookies = None):
if not url:
url = self.URL
debug(url = url)
debug(cookies0 = cookies)
cookies, cookies_str, cookies_dict = self.setCookies(url, cookies)
host = urlparse(self.URL).netloc
headers = {}
headers.update({'Accept': accept,})
headers.update({'Accept-Encoding': 'gzip, deflate, br',})
headers.update({'Accept-Language': 'en-US,en;q=0.5',})
if connection:
headers.update({'Connection': connection,})
if content_length:
headers.update({'Content-Length': content_length,}) #62
if cache_control:
headers.update({'Cache-Control': cache_control,}) #max-age=3600
headers.update({'Cookie': cookies_str,})
headers.update({'Host': host,})
if referer:
headers.update({'Referer': referer,})
if upgrade_insecure_requests:
headers.update({'Upgrade-Insecure-Requests': upgrade_insecure_requests,})
headers.update({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',})
if x_mod_sbb_ctype:
headers.update({'X_MOD_SBB_CTYPE': x_mod_sbb_ctype,})
if x_requested_with:
headers.update({'X-Requested-With': x_requested_with,})
if headers_response:
cookies_add_for_header = headers_response.get('Set-Cookie')
debug(cookies_add_for_header = cookies_add_for_header)
cookies_add_for_header_list = re.split(", |; |", cookies_add_for_header)
debug(cookies_add_for_header_list = cookies_add_for_header_list)
cookies_add_for_header_dict = {}
for i in cookies_add_for_header_list:
data = re.split("=", str(i), 1)
if len(data) == 2:
key, value = data
cookies_add_for_header_dict.update({key: value,})
debug(cookies_add_for_header_dict = cookies_add_for_header_dict)
if cookies_add_for_header_dict.get('UTGv2'):
cookies_dict.update({'UTGv2': cookies_add_for_header_dict.get('UTGv2'),})
if cookies_add_for_header_dict.get('SPSI'):
cookies_dict.update({'SPSI': cookies_add_for_header_dict.get('SPSI'),})
if cookies_add_for_header_dict.get('spcsrf'):
cookies_dict.update({'spcsrf': cookies_add_for_header_dict.get('spcsrf'),})
if cookies_add_for_header_dict.get('sp_lit'):
cookies_dict.update({'sp_lit': cookies_add_for_header_dict.get('sp_lit'),})
debug(cookies_dict = cookies_dict)
cookies, cookies_str, cookies_dict = self.setCookies(cookies = cookies)
headers.update({'Cookie': cookies_str,})
debug(headers = headers)
debug(cookies_str = cookies_str)
debug(cookies_dict = cookies_dict)
return headers, cookies, cookies_str, cookies_dict
def getContent(self, url, headers = None):
if '/' in url[0]:
url = url[1:]
url = self.URL + url
debug(url = url)
if not headers:
headers, cookies, cookies_str, cookies_dict = self.setHeaders(cache_control= 'max-age=3600', upgrade_insecure_requests = '1')
debug(headers = headers)
req = self.Sess.get(url, cookies = cookies_dict, headers = headers)
#print "Content:"
#print req.content
print "STATUS: {0}[{1}]".format(str(req.status_code), str(req.ok))
return req.cookies, req.headers
def getDownloadLink(self, url = 'https://androidfilehost.com/?fid=890129502657592740'):
cookies, headers = self.getContent(url) #example: https://androidfilehost.com/?fid=890129502657592740
debug(cookies0= cookies)
debug(headers0 = headers)
cookies, cookies_str, cookies_dict = self.setCookies(cookies= cookies)
debug(cookies_0 = cookies)
debug(cookies_str_0 = cookies_str)
debug(cookies_dict_0 = cookies_dict)
headers, cookies, cookies_str, cookies_dict = self.setHeaders(accept= '*/*', connection= 'keep-alive', content_length= '62', cookies = cookies, headers_response= headers)
debug(headers1 = headers)
debug(cookies_1 = cookies)
debug(cookies_str_1 = cookies_str)
debug(cookies_dict_1 = cookies_dict)
url1 = self.URL + "libs/otf/mirrors.otf.php"
headers.update({'X-MOD-SBB-CTYPE': 'xhr',})
headers.update({'X-Requested-With': 'XMLHttpRequest',})
debug(headers_x = headers)
data = {
'action': 'getdownloadmirrors',
'fid': re.split("=", str(urlparse(url).query), 1)[1],
'submit': 'submit',
}
debug(data = data)
debug(url1 = url1)
req = self.Sess.post(url1, cookies = cookies, headers = headers, data = data)
#cf = cfscrape.create_scraper()
#req = cf.get(url1, cookies = cookies, headers = headers, data = data)
contents = req.content
debug(STATUS_CODE = req.status_code)
debug(OK = req.ok)
debug(req_content = contents)
if req.status_code < 350:
self.STATUS_CODE = req.status_code
return json.loads(contents)
else:
self.STATUS_CODE = req.status_code
return {}
def usage(self):
parser = argparse.ArgumentParser(formatter_class= argparse.RawTextHelpFormatter)
parser.add_argument('URL', action = 'store', help = 'androidfilehost url, example: https://androidfilehost.com/?fid=890129502657592740')
parser.add_argument('-c', '--clip', action = 'store_true', help = "Download url from clipboard")
parser.add_argument('-d', '--download', action = 'store_true', help = 'it will download directly, for windows default using IDM if installed and wget (build in) if not exists and for *nix')
parser.add_argument('-p', '--path', action = 'store', help =\
'directory of downloaded', default = os.path.abspath( os.getcwd()))
parser.add_argument('-n', '--name', action = 'store', help = 'Option name it')
parser.add_argument('-i', '--wget', action = 'store_true', help = 'Use wget (build in) download manager instead')
parser.add_argument('-D', '--debug', action = 'store_true', help = 'Debugging Process')
if len(sys.argv) == 1:
parser.print_help()
else:
args = parser.parse_args()
if args.debug:
os.environ.update({'DEBUG': '1',})
else:
os.environ.update({'DEBUG': '',})
if args.clip:
try:
import clipboard
except ImportError:
print "Module Clipboard not installed, please install before or don't use clip arguments"
sys.exit(0)
data = self.getDownloadLink(args.URL)
if not data:
print "Error [%s]: Can't download, please contact support <licface@yahoo.com>" % (str(self.STATUS_CODE))
sys.exit(0)
debug(data_message = data.get('MESSAGE'))
if 'success' in data.get('MESSAGE'):
mirrors = data.get('MIRRORS')
if len(mirrors) > 1:
n = 1
for i in mirrors:
print str(n) + ". " + str(i.get('name'))
q = raw_input('Select Server downloading: ')
if str(q).isdigit() and not int(q) > len(mirrors):
URL = mirrors[int(q)-1].get('url').replace('\\', '')
debug(URL_SELECTED = URL)
if args.download:
if args.wget:
if args.name:
OUT = os.path.join(args.path, args.name)
else:
OUT = args.path
name = wget.download(str(URL), OUT)
print "FILE DOWNLOADED (WGET):", str(name)
else:
try:
#download(self, link, path_to_save=None, output=None, referrer=None, cookie=None, postData=None, user=None, password=None, confirm = False, lflag = None, clip=False)
dm = idm.IDMan()
dm.download(str(URL), args.path, args.name)
print "FILE DOWNLOADING (IDM):",\
os.path.join( args.path,\
os.path.basename(url))
except:
if os.getenv('DEBUG') == 1:
traceback.format_exc()
if args.name:
OUT = os.path.join(args.path, args.name)
else:
OUT = args.path
debug(URL = URL)
name = wget.download(str(URL), OUT)
print "FILE DOWNLOADED (WGET ~ Exception):", str(name)
else:
URL = mirrors[0].get('url').replace('\\', '')
debug(URL_SELECTED = URL)
if args.download:
if args.wget:
if args.name:
OUT = os.path.join(args.path, args.name)
else:
OUT = args.path
name = wget.download(URL, OUT)
print "FILE DOWNLOADED (WGET):", str(name)
else:
try:
#download(self, link, path_to_save=None, output=None, referrer=None, cookie=None, postData=None, user=None, password=None, confirm = False, lflag = None, clip=False)
dm = idm.IDMan()
dm.download(str(URL), args.path, args.name)
print "FILE DOWNLOADING (IDM):",\
os.path.join( args.path,\
os.path.basename(url))
except:
if args.name:
OUT = os.path.join(args.path, args.name)
else:
OUT = args.path
name = wget.download(URL, OUT)
print "FILE DOWNLOADED (WGET ~ Exception):", str(name)
else:
print "Can't downloading ... !, please contact support (licface@yahoo.com)"
if __name__ == '__main__':
c = AFH()
c.usage()
#url = c.URL
#cookies, cookies_str, cookies_dict = c.setCookies(url)
#import pprint
#pprint.pprint(cookies)
#headers = c.setHeaders()
#pprint.pprint(headers)
#url_content = '?fid=890129502657592740'
#cookies1, headers1 = c.getContent(url_content)
#print "cookies1 =", cookies1
#print "headers1 =", headers1
#c.getDownloadLink() |
sal = float(input('\033[1;37mQual é o salário do funcionário?\033[m'))
nsal = sal + (sal * 15/100)
print('\033[1;34mUm funcionário que ganhava R${}, com 15% de aumento, passa a receber R${:.2f}'.format(sal, nsal))
|
# -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from datetime import date
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader
from django.http import HttpResponse
from django import template
from app import Populate
from app.models import personel , complain , crew , task , infrastructure
from .forms import NewCrew , NewTask
from django.core import serializers
import math
from numpy import linalg
#from app.Populate import *
@login_required(login_url="/login/")
def index(request):
context = {}
populating = Populate.Populate("index")
taskform = NewTask(request.POST or None)
if 'newtask' in request.POST:
if taskform.is_valid():
title = taskform.cleaned_data.get("title")
text = taskform.cleaned_data.get("text")
try:
task.objects.create(title = title, text = text )
msgproblem = 'New Task added'
except Exception as e:
msgproblem = 'No task added' + e
else:
msgproblem = 'No task added'
# data send it
lastweekdays = populating.lastweek().get('days')
lastweek = populating.lastweek().get('count')
sumlastweek = populating.lastweek().get('sum')
total = populating.count_incid()
resolved = populating.lastweek_resolved().get('count')
sumresolved = populating.lastweek_resolved().get('sum')
un_complain_table = complain.objects.filter(resolved=False)
task_table = task.objects.all()
perMonthCosts = populating.perMonthCosts()
perTypeIncidents = populating.perTypeIncidents()
perTypeFailPos = populating.perTypeFailPos()
perMonthIncidents = populating.perMonthIncidents()
context = {"lastweek" : lastweek ,
"lastdays" : lastweekdays ,
"total" : total,
"resolved" : resolved ,
"unresolved" : un_complain_table ,
"task_table" : task_table ,
"NewTask" : taskform ,
"sumlastweek" : sumlastweek ,
"sumresolved" : sumresolved,
"perMonthCosts" : perMonthCosts ,
"perTypeIncidents" : perTypeIncidents,
"perTypeFailPos" : perTypeFailPos,
"perMonthIncidents" : perMonthIncidents}
return render(request, "index.html" ,context)
@login_required(login_url="/login/")
def pages(request):
context = {}
msg = None
msgproblem = None
crewform = NewCrew(request.POST or None)
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
if 'newcrew' in request.POST:
if crewform.is_valid():
name = crewform.cleaned_data.get("name")
working_hours = crewform.cleaned_data.get("working_hours")
crew_members = crewform.cleaned_data.get("crew_members")
complains_id = crewform.cleaned_data.get("complains_id")
total = complains_id.count(",") + 1
48
try:
crew.objects.create(name = name, working_hours = working_hours, crew_members = crew_members,complains_id=complains_id,total_assigments = total )
msgproblem = 'New crew added'
except Exception as e:
msgproblem = 'No new crew' + e
else:
msgproblem = 'No new crew'
try:
load_template = request.path.split('/')[-1]
html_template = loader.get_template( load_template )
if "ui-tables.html" in request.path:
crew_table = crew.objects.all()
personel_table = personel.objects.all()
context = { "table" : personel_table , "crewtable" : crew_table }
elif "crew_add.html" in request.path:
complain_table = complain.objects.filter(resolved=False)
personel_table = personel.objects.all()
context = { "comptable" : complain_table , "crewform" : crewform ,"msgproblem" : msgproblem, "pertable" : personel_table }
elif "ui-maps.html" in request.path:
infrastructure_table = infrastructure.objects.all()
data = serializers.serialize("json", infrastructure_table , ensure_ascii=False , fields=('google_location','type' ) )
context = { "infrastructure" : data , "infrastructure_table" : infrastructure_table }
elif "ui-notifications.html" in request.path:
populating = Populate.Populate("index")
perTypeAlerts1 = populating.perTypeAlerts1()
perTypeAlerts2 = populating.perTypeAlerts2()
perTypeAlerts3 = populating.perTypeAlerts3()
perTypeAlerts4 = populating.perTypeAlerts4()
perTypeAlerts5 = populating.perTypeAlerts5()
perTypeAlerts6 = populating.perTypeAlerts6()
curMonthIncidents = populating.curMonthIncidents()
curMonthType = populating.curMonthType()
curMonthCost = populating.curMonthCost()
successfulRepairs = populating.successfulRepairs()
context = {"perTypeAlerts1" : perTypeAlerts1 ,
"perTypeAlerts2" : perTypeAlerts2 ,
"perTypeAlerts3" : perTypeAlerts3 ,
"perTypeAlerts4" : perTypeAlerts4 ,
"perTypeAlerts5" : perTypeAlerts5 ,
"perTypeAlerts6" : perTypeAlerts6 ,
"curMonthIncidents" : curMonthIncidents ,
"curMonthType" : curMonthType,
"curMonthCost" : curMonthCost,
"successfulRepairs": successfulRepairs}
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template( 'error-404.html' )
return HttpResponse(html_template.render(context, request))
except Exception as e:
html_template = loader.get_template( 'error-500.html' )
return HttpResponse(e)
def forms(request):
try:
load_template = request.path.split('/')[-1]
html_template = loader.get_template( load_template )
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template( 'error-404.html' )
return HttpResponse(html_template.render(context, request))
except:
html_template = loader.get_template( 'error-500.html' )
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/login/")
def delete(request):
if "task" in request.GET:
context = {}
task.objects.filter(id = request.GET.get('task', '')).delete()
return redirect('/')
else:
context = {}
crew.objects.filter(UUID = request.GET.get('crew', '')).delete()
return redirect('/ui-tables.html')
def crewLogin(theuuid):
theuuid.replace(' ','')
crewUUID = list(crew.objects.all())
finalUUID = 'crew object (' + theuuid + ')'
#return HttpResponse(str(crewUUID))
if str(finalUUID) in str(crewUUID):
return True
else:
return False
def euclidianDistance(x1,x2,y1,y2):
x = float(sqrt((float(x2)-float(x1)) + (float(y2)-float(y1))**2))
return x
def smartPath():
firstTime = True
allComplains = complain.objects.all().filter(resolved = False)
google_locations = []
startX, startY = 37.9415179, 23.6506794
counter = 0
for entry in allComplains:
counter += 1
infID = str(entry.infrastructure_id).replace('infrastructure object (' , '').replace(')', '')
findinf = infrastructure.objects.all().filter(UUID = infID)
type = ''
for result in findinf:
type = str(result.google_location).split(',')
google_locations.append([float(type[0]),float(type[1])])
answer = float(10000000000000000000000.0)
answerX, answerY = 0.0, 0.0
done = []
for k in range(counter):
answer = float(10000000000000000000000.0)
for coords in google_locations:
if([coords[0], coords[1]] not in done):
x = math.pow(startX + float(coords[0]) , 2)
y = math.pow(startY + float(coords[1]) , 2)
final = math.pow(x+y , 0.5)
if final < answer:
answer = final
answerX, answerY = coords[0], coords[1]
done.append([startX, startY])
google_locations.remove([answerX, answerY])
startX, startY = float(answerX), float(answerY)
done.append([startX, startY])
bigString = ''
i = 1
for coords in done:
if firstTime:
bigString += "You are at " + str(coords) + "\n"
firstTime = False
else:
bigString += "Stop No " + str(i) + " at " + str(coords) + "\n"
i += 1
for entry in allComplains:
entry.resolved = True
entry.save()
return bigString
def givePreviousComplains(userAFM):
userComplains = complain.objects.all().filter(made_afm = userAFM)
answer = ""
complainResolved= ''
counter = 0
for entry in userComplains:
counter += 1
if entry.resolved == True:
complainResolved = 'Yes'
else:
complainResolved = 'False'
answer += "Complain No " + str(entry.slug) + "\nMade in " + str(entry.created) + "\nNoted: " + str(entry.notes) + "\nResolved: " + complainResolved + "\n\n"
if counter > 0:
return str(answer)
else:
return "No complains found with given AFM!"
def returnOptions():
toreturn = infrastructure.objects.all()
answer = []
for entry in toreturn:
answer.append(entry.UUID)
answer.append(',')
return answer
def dbOnCreateResponse(afm, infID, dmgtype):
infrIdent = 'infrastructure object (' + infID + ')'
infr = infrastructure.objects.all().filter(UUID = infID)
for entry in infr:
infID = entry
try:
complain.objects.create(made_afm = afm, infrastructure_id = infID, notes = dmgtype)
return 'New complain added'
except Exception as e:
return e
def api(request):
if request.method == 'GET':
type = request.GET.get('action')
#arg = request.GET.get('uuid', '')
if type == 'login':
theuuid = request.GET.get('uuid')
if crewLogin(theuuid):
return HttpResponse("crew")
else:
return HttpResponse("notACrew")
elif type == 'ontofix':
return HttpResponse(str(smartPath()))
elif type == 'previous':
userAFM = request.GET.get('afm')
return HttpResponse(str(givePreviousComplains(userAFM)))
elif type == 'icomplain':
afm = request.GET.get('afm')
infID = request.GET.get('infID')
dmgtype = request.GET.get('type')
return HttpResponse(str(dbOnCreateResponse(afm, infID, dmgtype)))
elif type == 'givemeoptions':
return HttpResponse(returnOptions())
|
# make sure to install dependencies in 'conversion.requirements.txt'
import os
import os.path as osp
import shutil
import sqlite3
import numpy as np
import pandas as pd
import tables as pytables
import mdtraj as mdj
from wepy.util.mdtraj import mdtraj_to_json_topology
from wepy.util.json_top import json_top_chain_df, json_top_residue_df, json_top_atom_df
traj = mdj.load('../lysozyme_pxylene.pdb')
if osp.exists("outputs"):
shutil.rmtree("outputs")
os.makedirs("outputs")
# the JSON format
json_top = mdtraj_to_json_topology(traj.top)
with open('outputs/lysozyme_pxylene.top.json', 'w') as wf:
wf.write(json_top)
# FASTA residue sequence
fasta_str = traj.top.to_fasta(chain=0)
with open('outputs/lysozyme_pxylene.res.fasta', 'w') as wf:
wf.write(fasta_str)
## topology tables
# Bonds
# you can get a table using mdtraj, but we just use the bonds here
mdj_atoms_df, bonds = traj.top.to_dataframe()
# just the first two columns (atom indices) for our purposes
bonds = bonds[:,0:2]
# we can just write this multiple ways with numpy
np.savetxt('outputs/lysozyme_pxylene.bonds.npy_txt', bonds)
np.save('outputs/lysozyme_pxylene.bonds.npy', bonds)
# make a pandas data frame
bond_df = pd.DataFrame(bonds)
# but wepy provides the ability to get normalized versions for each
# level
chain_df = json_top_chain_df(json_top)
residue_df = json_top_residue_df(json_top)
atom_df = json_top_atom_df(json_top)
bond_df.to_csv('outputs/lysozyme_pxylene.bond.csv', index=False)
chain_df.to_csv('outputs/lysozyme_pxylene.chain.csv', index=False)
residue_df.to_csv('outputs/lysozyme_pxylene.residue.csv', index=False)
atom_df.to_csv('outputs/lysozyme_pxylene.atom.csv', index=False)
# to an SQLite3 database
db = sqlite3.Connection("outputs/lysozyme_pxylene.sqlite3")
bond_df.to_sql('bonds', db)
chain_df.to_sql('chains', db)
residue_df.to_sql('residues', db)
atom_df.to_sql('atoms', db)
# to an HDF5 file
bond_df.to_hdf('outputs/lysozyme_pxylene.top.h5', 'bonds')
chain_df.to_hdf('outputs/lysozyme_pxylene.top.h5', 'chains')
residue_df.to_hdf('outputs/lysozyme_pxylene.top.h5', 'residues')
atom_df.to_hdf('outputs/lysozyme_pxylene.top.h5', 'atoms')
# to an excel spreadsheet
with pd.ExcelWriter('outputs/lysozyme_pxylene.top.xlsx', mode='r+') as writer:
bond_df.to_excel(writer, sheet_name='bonds')
chain_df.to_excel(writer, sheet_name='chains')
residue_df.to_excel(writer, sheet_name='residues')
atom_df.to_excel(writer, sheet_name='atoms')
## coordinates
# separately, in binary format
coords = traj.xyz
np.savez('outputs/lysozyme_pxylene_reference.npz', coords)
|
from __future__ import print_function
import os
import shutil
import re
import hashlib
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
from setuptools import setup
from distutils.core import Extension
def download_url(url):
package = os.path.basename(url)
if not os.path.isfile(package):
print('Downloading', url)
urlretrieve(url, package+'.download')
os.rename(package+'.download', package)
return package
ASF_URL = 'https://archive.apache.org/dist/'
APR_VERSION = '1.7.0'
APR_UTIL_VERSION = '1.6.1'
HTTPD_VERSION = '2.4.48'
APR_URL = ASF_URL + 'apr/apr-%s.tar.gz' % APR_VERSION
APR_UTIL_URL = ASF_URL + 'apr/apr-util-%s.tar.gz' % APR_UTIL_VERSION
HTTPD_URL = ASF_URL + 'httpd/httpd-%s.tar.gz' % HTTPD_VERSION
download_url(APR_URL)
download_url(APR_UTIL_URL)
download_url(HTTPD_URL)
PCRE_VERSION = '8.45'
PCRE_URL = 'https://ftp.pcre.org/pub/pcre/pcre-%s.tar.gz' % PCRE_VERSION
download_url(PCRE_URL)
VERSIONS_HASH = ':'.join([APR_VERSION, APR_UTIL_VERSION,
PCRE_VERSION, HTTPD_VERSION])
if not isinstance(VERSIONS_HASH, bytes):
VERSIONS_HASH = VERSIONS_HASH.encode('UTF-8')
VERSIONS_HASH = hashlib.md5(VERSIONS_HASH).hexdigest()
VERSION_HASH_FILE = os.path.join('build', VERSIONS_HASH)
if not os.path.isfile(VERSION_HASH_FILE):
if not os.path.isdir('build'):
os.mkdir('build')
shutil.rmtree('build/httpd', ignore_errors=True)
builddir = os.path.join(os.getcwd(), 'build/httpd')
shutil.rmtree('src/httpd', ignore_errors=True)
destdir = os.path.join(os.getcwd(), 'src/httpd')
res = os.system('rm -rf build/apr-%(version)s && '
'tar -x -v -C build -f apr-%(version)s.tar.gz && '
'cd build/apr-%(version)s && '
'./configure --prefix=%(builddir)s && '
'make && make install' % dict(builddir=builddir,
version=APR_VERSION))
if res:
raise RuntimeError('Failed to build APR.')
res = os.system('rm -rf build/apr-util-%(version)s && '
'tar -x -v -C build -f apr-util-%(version)s.tar.gz && '
'cd build/apr-util-%(version)s && '
'./configure --prefix=%(builddir)s '
'--with-apr=%(builddir)s/bin/apr-1-config && '
'make && make install' % dict(builddir=builddir,
version=APR_UTIL_VERSION))
if res:
raise RuntimeError('Failed to build APR-UTIL.')
res = os.system('rm -rf build/pcre-%(version)s && '
'tar -x -v -C build -f pcre-%(version)s.tar.gz && '
'cd build/pcre-%(version)s && '
'./configure --prefix=%(builddir)s '
'--disable-cpp && '
'make && make install' % dict(builddir=builddir,
version=PCRE_VERSION))
if res:
raise RuntimeError('Failed to build PCRE.')
res = os.system('rm -rf build/httpd-%(version)s && '
'tar -x -v -C build -f httpd-%(version)s.tar.gz && '
'cd build/httpd-%(version)s && '
'./configure --prefix=%(builddir)s '
'--enable-mpms-shared=all --enable-so --enable-rewrite '
'--with-apr=%(builddir)s/bin/apr-1-config '
'--with-apr-util=%(builddir)s/bin/apu-1-config '
'--with-pcre=%(builddir)s/bin/pcre-config && '
'make && make install' % dict(builddir=builddir,
version=HTTPD_VERSION))
if res:
raise RuntimeError('Failed to build HTTPD.')
shutil.rmtree('build/httpd/build-1', ignore_errors=True)
shutil.rmtree('build/httpd/cgi-bin', ignore_errors=True)
shutil.rmtree('build/httpd/error', ignore_errors=True)
shutil.rmtree('build/httpd/htdocs', ignore_errors=True)
shutil.rmtree('build/httpd/icons', ignore_errors=True)
shutil.rmtree('build/httpd/logs', ignore_errors=True)
shutil.rmtree('build/httpd/man', ignore_errors=True)
shutil.rmtree('build/httpd/manual', ignore_errors=True)
shutil.rmtree('build/httpd/share', ignore_errors=True)
with open('build/httpd/build/config_vars.mk') as fpin:
config_vars = fpin.readlines()
with open('build/httpd/build/config_vars.mk', 'w') as fpout:
prefix = re.escape(os.path.join(os.getcwd(), 'build/httpd'))
for line in config_vars:
line = re.sub(prefix, '${mod_wsgi_httpd_prefix}', line)
print(line, end='', file=fpout)
shutil.move(builddir, destdir)
open('src/httpd/__init__.py', 'a').close()
open(VERSION_HASH_FILE, 'a').close()
package_files = []
for root, dirs, files in os.walk('src/httpd', topdown=False):
for name in files:
path = os.path.join(root, name).split('/', 1)[1]
package_files.append(path)
print('adding ', path)
long_description = open('README.rst').read()
setup(name = 'mod_wsgi-httpd',
version = '%s.1' % HTTPD_VERSION,
description = 'Installer for Apache httpd server.',
long_description = long_description,
author = 'Graham Dumpleton',
author_email = 'Graham.Dumpleton@gmail.com',
maintainer = 'Graham Dumpleton',
maintainer_email = 'Graham.Dumpleton@gmail.com',
url = 'http://www.modwsgi.org/',
bugtrack_url = 'https://github.com/GrahamDumpleton/mod_wsgi-httpd/issues',
license = 'Apache License, Version 2.0',
platforms = [],
download_url = None,
classifiers= [
'Development Status :: 6 - Mature',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: SunOS/Solaris',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server'
],
packages = ['mod_wsgi_packages', 'mod_wsgi_packages.httpd',
'mod_wsgi_packages.apxs'],
package_dir = {'mod_wsgi_packages': 'src'},
package_data = {'mod_wsgi_packages': package_files},
ext_modules = [Extension("mod_wsgi_packages.apxs._dummy", ["_module.c"])],
entry_points = { 'console_scripts':
['mod_wsgi-apxs = mod_wsgi_packages.apxs.__main__:main'],},
)
|
from graphene import ObjectType, Schema
from api.schema import ArticleListQuery
class Query(ArticleListQuery, ObjectType):
pass
schema = Schema(query=Query)
|
#!/usr/bin/env python3
# ==============================================================
# author: Lars Gabriel
#
# eval_exp1.py: Evaluate a set of partitions from a genome for:
# BRAKER1, BRAKER2, TSEBRA_default
# ==============================================================
import argparse
import subprocess as sp
import os
import csv
import sys
class EvalError(Exception):
pass
species_dir = ''
modes = ['cds', 'trans', 'gene']
measures = ['F1', 'Sn', 'Sp']
methods = ['BRAKER1', 'BRAKER2', 'TSEBRA_default']
methods_files = ['braker1/braker.gtf', 'braker2/{}/braker.gtf', 'tsebra_default/{}/tsebra_default.gtf']
test_level = ''
def main():
global species_dir, test_level
args = parseCmd()
species_dir = os.path.abspath(args.species_dir)
test_level = args.test_level
methods_files[1] = methods_files[1].format(args.test_level)
methods_files[2] = methods_files[2].format(args.test_level)
eval = {}
for method, gene_pred in zip(methods, methods_files):
eval.update({method : evaluation('{}/{}'.format(species_dir, gene_pred))})
for mea in measures:
single_eval(eval, mea)
sys.stderr.write('### Finished, results are located in ' \
+ '{}/tsebra_default/{}/\n'.format(species_dir, test_level))
def single_eval(eval, mea):
# prints a table for one measure (row = species, col = mode_method)
# header
tab = []
line = []
for mo in modes:
line += [' ', mo, ' ']
tab.append(line)
tab.append([] + methods*3)
# body
line = []
for mo in modes:
for meth in methods:
line.append(round(eval[meth][mo][mea],2))
tab.append(line)
csv_writer(tab, '{}/tsebra_default/{}/{}.eval.tab'.format(species_dir, test_level, mea))
def csv_writer(tab, out_path):
with open(out_path, 'w+') as file:
table = csv.writer(file, delimiter='\t')
for line in tab:
table.writerow(line)
def evaluation(gene_pred):
# compute for all measures (F1, Sn, Sp) on all eval_level (gene, transcript and CDS)
# returns dict eval[eval_level][measure] = value
eval = {}
# run evaluation script
cmd = "compute_accuracies.sh {}/annot/annot.gtf {}/annot/pseudo.gff3 {} gene trans cds".\
format(species_dir, species_dir, gene_pred)
p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
if stderr.decode():
raise EvalError(stderr.decode())
stdout = stdout.decode()
stdout = [s.split('\t') for s in stdout.split('\n') if s]
# read result into eval
for line in stdout:
eval_level, measure = line[0].split('_')
if eval_level not in eval:
eval.update({eval_level : {}})
eval[eval_level].update({measure : float(line[1])})
# add F1 score
for key in eval:
eval[key].update({'F1' : (2 * eval[key]['Sn'] * eval[key]['Sp']) / \
(eval[key]['Sn'] + eval[key]['Sp'])})
return eval
def parseCmd():
"""Parse command line arguments
Returns:
dictionary: Dictionary with arguments
"""
parser = argparse.ArgumentParser(description='Evaluate predictions ' \
+ 'from: BRAKER1, BRAKER2, TSEBRA_default ')
parser.add_argument('--test_level', type=str,
help='One of "species_excluded", "family_excluded" or "order_excluded"')
parser.add_argument('--species_dir', type=str,
help='Directory containing the results of TSEBRA-experiment 1 for one species')
return parser.parse_args()
if __name__ == '__main__':
main()
|
from sprocket.lib import *
from sprocket.run import BLUEPRINT, prepare
|
#!/usr/bin/env python3
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectivly.
See --help to see further details.
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import torch.backends.cudnn as cudnn
import pathlib
import cv2
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import torch
import numpy as np
from scipy.misc import imread
from scipy import linalg
from torch.autograd import Variable
from torch.nn.functional import adaptive_avg_pool2d
from inception import InceptionV3
import shutil
import random
from options.train_options import TrainOptions
opt = TrainOptions().parse()
# import option
# from desnet import DenseNet
# parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
# parser.add_argument('--path0', type=str, default='/home/jingjie/xinzi/dataset/CelebA/img_align_celeba', help=('Path to the generated images or to .npz statistic files'))
# parser.add_argument('--path1', type=str, default='/home/jingjie/xinzi/dataset/test_celeba', help=('Path to the generated images or to .npz statistic files'))
# parser.add_argument('--batch-size', type=int, default=1, help='Batch size to use')
# parser.add_argument('--dims', type=int, default=2048, choices=list(InceptionV3.BLOCK_INDEX_BY_DIM), help=('Dimensionality of Inception features to use.By default, uses pool3 features'))
# parser.add_argument('-c', '--gpu', default='1', type=str, help='GPU to use (leave blank for CPU only)')
def get_activations(images, model, batch_size=64, dims=2048, cuda=False, verbose=False):#dim=2048
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- images : Numpy array of dimension (n_images, 3, hi, wi). The values
must lie between 0 and 1.
-- model : Instance of inception model
-- batch_size : the images numpy array is split into batches with
batch size batch_size. A reasonable batch size depends
on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
d0 = images.shape[0]
if batch_size > d0:
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = d0
n_batches = d0 // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, dims))
for i in range(n_batches):
# if verbose:
# print('\rPropagating batch %d/%d' % (i + 1, n_batches),
# end='', flush=True)
start = i * batch_size
end = start + batch_size
batch = torch.from_numpy(images[start:end]).type(torch.FloatTensor)
batch = Variable(batch, volatile=True)
if cuda:
batch = batch.cuda()
# pred = model(batch)[0] # pred(bs*2048*1*1)
pred = model(batch)[-1]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
# if pred.shape[2] != 1 or pred.shape[3] != 1:
# pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1)
if verbose:
print(' done')
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representive data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(images, model, batch_size=64, dims=2048, cuda=False, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- images : Numpy array of dimension (n_images, 3, hi, wi). The values
must lie between 0 and 1.
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the
number of calculated batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the inception model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
act = get_activations(images, model, batch_size, dims, cuda, verbose)#(80,2048)
mu = np.mean(act, axis=0)#(2048,)
sigma = np.cov(act, rowvar=False)#(2048,2048)
return mu, sigma
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
# print (files[0])
imgs = np.array([imread(str(fn),mode = 'RGB').astype(np.float32) for fn in files])
# imgs=imgs[:,:,:,np.newaxis]
print(imgs.shape)
##the same size of every pic
imgs = imgs.transpose((0, 3, 1, 2))
# Rescale images to be between 0 and 1
imgs /= 255
# #patch
imgs = torch.from_numpy(imgs)
imgs=torch.nn.functional.unfold(input=imgs,kernel_size=(128,128),stride=(128,128)).transpose(1,2)
imgs=imgs.reshape(-1,3,128,128).numpy()
print(imgs.shape,"3333333")
m, s = calculate_activation_statistics(imgs, model, batch_size, dims, cuda)
## different size of every pic
# for i in range(0,len(imgs)):
# # Bring images to shape (B, 3, H, W)
# img = imgs[i]
# img = img[np.newaxis,:,:,:]
# print (img.shape)
# img = img.transpose((0, 3, 1, 2))
# # Rescale images to be between 0 and 1
# img /= 255
# m, s = calculate_activation_statistics(img, model, batch_size, dims, cuda)
return m, s
def calculate_fid_given_paths(paths0, paths1, batch_size, cuda, dims):
"""Calculates the FID of two paths"""
'''
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
'''
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = InceptionV3([block_idx])
#class
# model = DenseNet(growthRate=6, depth=10, reduction=0.5, bottleneck=True, nClasses=11)
if cuda:
model.cuda()
'''
if device == 'cuda':
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
'''
m1, s1 = _compute_statistics_of_path(paths0, model, batch_size, dims, cuda)#[80,2018]
m2, s2 = _compute_statistics_of_path(paths1, model, batch_size, dims, cuda)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
def random_copyfile(srcPath, dstPath, numfiles):
name_list = list(os.path.join(srcPath, name) for name in os.listdir(srcPath))
random_name_list = list(random.sample(name_list, numfiles))
if not os.path.exists(dstPath):
os.mkdir(dstPath)
for oldname in random_name_list:
shutil.copyfile(oldname, oldname.replace(srcPath, dstPath))
# if __name__ == '__main__':
# # path1 = './output/700'
# # # path1 = '/home/lingna/dataset/cuhk_a'
# # path2 = '/home/lingna/dataset/cuhk_a'
# # fid_value = calculate_fid_given_paths(path1, path2, 8, 1, 2048)
# # print(fid_value)
# for i in range(1, 11):
# path1 = '/home/lingna/dataset/HumanRankedDataset/version_10/'+ str(i) + '/rank0'
# # path1 = '/home/lingna/dataset/cuhk_a'
# path2 = '/home/lingna/dataset/HumanRankedDataset/version_10/'+ str(i) + '/rank1'
# path3 = '/home/lingna/dataset/HumanRankedDataset/version_10/'+ str(i) + '/rank2'
#
# fid_value1 = calculate_fid_given_paths(path1, path2, 8, 1, 2048)
# fid_value2 = calculate_fid_given_paths(path1, path3, 8, 1, 2048)
# print('part:%d, rank0-rank1:%.4f, rank0-rank2:%.4f' % (i, fid_value1, fid_value2))
if __name__ == '__main__':
# opt = option.init()
# f = open(opt.FID, 'a')
# device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
# for i in range(200,801,20):
# print(i)
# fid_value = calculate_fid_given_paths('/home/lixiang/lx/pix2pix-pytorch-master/output/CUHK_CLASS/'+str(i),'/home/lixiang/dataset/photosketch/CUFS/test/sketch', 8, opt.gpuid != '', 2048)
# print(fid_value)
# str1 = str(i) + ':' + str(fid_value) + '\n'
# f.write(str1)
# f.close()
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
fid_value = calculate_fid_given_paths("/home/meimei/mayme/code/sketch_parsing_m3/results/spade_test/pixHD_test_rename",
"/home/meimei/mayme/code/sketch_parsing_m3/results/sketch_test_B/", 8, opt.gpu_ids != '', 2048)
print(fid_value)
# fid_value = calculate_fid_given_paths('/home/lixiang/lx/SAND-pytorvh-master/result/sand_pix_D/2.09/orggan/x',
# '/home/lixiang/dataset/test/cufs_sketch', 8, opt.gpuid != '', 2048)
# print(fid_value) |
"""
@file
@brief Implements command line ``python -m onnxcustom <command> <args>``.
"""
import fire
from onnxcustom import check
if __name__ == "__main__": # pragma: no cover
from onnxcustom.cli.profiling import nvprof2json
fire.Fire({
'check': check,
'nvprof2json': nvprof2json,
})
|
TREE = "#"
def hit_test(map_, y, x, width) -> bool:
while x >= width:
x -= width
return map_[y][x] == TREE
def hit_test_map(map_, vy, vx) -> int:
height, width = len(map_), len(map_[0])
r = 0
for x, y in enumerate(range(0, height, vy)):
r += hit_test(map_, y, x * vx, width)
return r
def convert_to_map(s: str) -> list:
return [n.strip() for n in s.strip("\n").splitlines()]
def hit_test_multi(s: str) -> int:
map_ = convert_to_map(s)
return hit_test_map(map_, 1, 1) * hit_test_map(map_, 1, 3) * hit_test_map(map_, 1, 5) * hit_test_map(map_, 1, 7) * hit_test_map(map_, 2, 1)
def run_tests():
test_input = """..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#"""
test_output = 336
test_map = convert_to_map(test_input)
assert hit_test_map(test_map, 1, 1) == 2
assert hit_test_map(test_map, 1, 3) == 7
assert hit_test_map(test_map, 1, 5) == 3
assert hit_test_map(test_map, 1, 7) == 4
assert hit_test_map(test_map, 2, 1) == 2
assert hit_test_multi(test_input) == test_output
def run() -> int:
with open("inputs/input_03.txt") as file:
data = file.read()
return hit_test_multi(data)
if __name__ == "__main__":
run_tests()
print(run())
|
#!C:\Users\USER\PycharmProjects\project\venv\Scripts\python.exe
print("Content-Type: text/html")
print()
import cgi
print("<h1 style = \"text-align:center;\">Congratulations</h1>")
print("<hr/>")
print("<h1 style = \"text-align:center;\">Registration has been Successful</h1>")
print("<h6 style = \"text-align:center; font-size:15px;\">Click Here to <a style = \"color:#EB5758; text-decoration:none\" href = \"log.html\">log in</a></h6>")
print("<body bgcolor = '#e0e5ec'>")
form = cgi.FieldStorage()
firstName = form.getvalue("fname")
lastName = form.getvalue("lname")
userName = form.getvalue("uname")
email = form.getvalue("email")
passs = form.getvalue("passs")
repass = form.getvalue("repass")
import mysql.connector
con = mysql.connector.connect(user = 'root', password = '', host = 'localhost', database = 'we4us')
cur = con.cursor()
cur.execute("insert into reginfo values(%s, %s, %s, %s, %s, %s)", (firstName, lastName, userName, email, passs, repass))
con.commit()
cur.close()
con.close() |
class Solution:
# @param A, a list of integers
# @return an integer
def firstMissingPositive(self, A):
i = 0
length = len(A)
while i < length:
while A[i] != i+1 and 1<= A[i] <= length and A[A[i]-1] != A[i]:
temp = A[i]
A[i] = A[A[i]-1]
A[temp-1] = temp
i += 1
for i in range(len(A)):
if A[i] != i+1:
return i+1
return length+1
|
import json
from mainengine import MainEngine
import traceback
settingFile = 'conf/kwargs.json'
loggingConfigFile = 'conf/logconfig.json'
# serverChanFile = 'conf/serverChan.json'
if __debug__:
settingFile = 'tmp/kwargs.json'
loggingConfigFile = 'tmp/logconfig.json'
# with open(serverChanFile, 'r') as f:
# serverChanUrls = json.load(f)['serverChanSlaveUrls']
with open(settingFile, 'r') as f:
kwargs = json.load(f)
with open(loggingConfigFile, 'r') as f:
logConfig = json.load(f)
mainEngine = MainEngine(logconfig=logConfig, **kwargs)
mainEngine.init()
mainEngine.start()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from setuptools import setup, find_packages
from codecs import open
requires = [
'click',
'cookiecutter',
'networkx',
'numpy',
'pandas',
'tornado >= 4.2, < 5.0.0',
'tqdm',
]
extras_require = {
'dev': [
'coverage',
'flake8',
'pytest >= 3.6',
'pytest-cov',
'sphinx',
],
'docs': [
'sphinx',
]
}
version = ''
with open('mesa/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
with open('README.rst', 'rb', encoding='utf-8') as f:
readme = f.read()
setup(
name='Mesa',
version=version,
description="Agent-based modeling (ABM) in Python 3+",
long_description=readme,
author='Project Mesa Team',
author_email='projectmesa@googlegroups.com',
url='https://github.com/projectmesa/mesa',
packages=find_packages(),
package_data={'mesa': ['visualization/templates/*.html', 'visualization/templates/css/*',
'visualization/templates/fonts/*', 'visualization/templates/js/*'],
'cookiecutter-mesa': ['cookiecutter-mesa/*']},
include_package_data=True,
install_requires=requires,
extras_require=extras_require,
keywords='agent based modeling model ABM simulation multi-agent',
license='Apache 2.0',
zip_safe=False,
classifiers=[
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Life',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Natural Language :: English',
],
entry_points='''
[console_scripts]
mesa=mesa.main:cli
''',
)
|
a = []
print('WELCOME TO MY SMARTNESS TEST')
def q(qu, ae):
qe = input(qu + '\n')
if qe == ae:
return True
if qe != ae:
return False
c = '' # question
d = '' # question (3 * 3)
def q_idea():
from random import randint as r
num = r(2, 3)
nums = []
if num == 2:
num_1 = r(10, 100)
num_2 = r(50, 150)
nums.append(num_1)
nums.append(num_2)
if num == 3:
num_1 = r(10, 100)
num_2 = r(50, 150)
num_3 = r(100, 200)
nums.append(num_1)
nums.append(num_2)
nums.append(num_3)
def question_def(opr):
if len(nums) == 2:
questionr = ('What is ' + str(nums[0]) + opr + str(nums[1]) + '?')
if len(nums) == 3:
questionr = ('What is ' + str(nums[0]) + opr + str(nums[1]) + opr + str(nums[2]) + '?')
return questionr
op = r(1, 4)
if op == 1:
question = question_def(' + ')
elif op == 2:
question = question_def(' - ')
elif op == 3:
question = question_def(' * ')
elif op == 4:
question = question_def(' / ')
global c
global d
c = question
if len(nums) == 2:
if op == 1:
d = nums[0] + nums[1]
if op == 2:
d = nums[0] - nums[1]
if op == 3:
d = nums[0] * nums[1]
if op == 4:
d = nums[0] / nums[1]
elif len(nums) == 3:
if op == 1:
d = nums[0] + nums[1] + nums[2]
if op == 2:
d = nums[0] - nums[1] - nums[2]
if op == 3:
d = nums[0] * nums[1] * nums[2]
if op == 4:
d = nums[0] / nums[1] / nums[2]
for i in range(9):
q_idea()
a.append(q(c, d))
i = 0
for item in a:
if item:
i += 1
rank = 'terrible.'
if i >= 1:
rank = 'bad.'
if i >= 3:
rank = 'ok.'
if i >= 5:
rank = 'pretty good.'
if i >= 7:
rank = 'good.'
if i >= 8:
rank = 'great!'
if i >= 9:
rank = 'Outstanding!!'
print('you ranked ' + rank)
|
import datetime
from app.models.base.base import BaseModel
from app import db
class UserProfitModel(db.Model, BaseModel):
__bind_key__ = "a_coffer"
__tablename__ = "user_profit"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, default=0)
money = db.Column(db.Integer, default=0)
consumer_money = db.Column(db.Integer, default=0)
today_money = db.Column(db.Integer, default=0)
created_time = db.Column(db.DateTime, default=datetime.datetime.now)
updated_time = db.Column(db.DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now)
@staticmethod
def query_user_profit(user_id):
result = UserProfitModel.query.filter_by(user_id=user_id).first()
return result
|
import time
import sys
import pickle
from ..fakepath import new_fakefolder , fakepath_abs
from .filelock import FileLock
import os.path as P
import os
class LockerClient:
NAME = "YLocker"
VALFILE = "val.pkl"
EMPTY_FLAG = "_EMPTY"
def __init__(self):
pass
def getfolder(self , foldername):
# 给定目录名,返回其绝对路径
return new_fakefolder(P.join(self.NAME , foldername))
def ensurefile(self , foldername):
# 给定目录名,返回值文件的绝对路径
filename = self.VALFILE
path = P.join(self.getfolder(foldername) , filename)
open(path , "ab").close()
return path
def encode(self , val):
return pickle.dumps(val)
def decode(self , content):
if len(content) == 0:
return None
return pickle.loads(content)
def get(self , key):
path = self.ensurefile(key)
with open(path , "rb") as fil:
content = fil.read()
return self.decode(content)
def set(self , key , val):
path = self.ensurefile(key)
with FileLock(self.getfolder(key)): #锁定然后写入
with open(path , "wb") as fil:
fil.write(self.encode(val))
return True
def remove(self , key):
path = self.ensurefile(key)
with FileLock(self.getfolder(key)): #锁定然后写入
os.remove(path)
return True
def plus(self , key , val = 1):
'''同步加一,返回加后的值'''
path = self.ensurefile(key)
with FileLock(self.getfolder(key)): #锁定然后加一
with open(path , "rb") as fil:
store_val = self.decode(fil.read())
store_val = store_val + val
with open(path , "wb") as fil:
fil.write(self.encode(store_val))
return store_val
def set_if(self , key , expect_val , set_val):
'''如果当前值 = expect_val,则设为set_val'''
path = self.ensurefile(key)
with FileLock(self.getfolder(key)):
with open(path , "rb") as fil:
store_val = self.decode(fil.read())
if store_val == expect_val:
with open(path , "wb") as fil:
fil.write(self.encode(set_val))
return True
def _list_folders(self , folder):
'''列出所目标子文件夹'''
ret = []
subfolers = os.listdir(folder)
if self.VALFILE in subfolers: #存在值文件的是一个目标节点
ret.append(folder)
for p in subfolers:
p = P.join(folder,p)
if P.isdir(p):
ret = ret + self._list_folders(p)
return ret
def ask_prefix(self , prefix , return_abs = False , only_suffix = False , not_none = False):
'''查询所有key的前缀是给定prfix的key
not_none:只返回非None的元素
'''
path = self.getfolder(prefix)
ret = self._list_folders(path) #询问前缀下面的所有文件夹
if not_none:
ret = [x for x in ret if self.get(x) is not None]
if return_abs:
return ret
if only_suffix:
ret = [P.relpath(x , path) for x in ret] #求相对与前缀的相对路径
else:
the_path = self.getfolder("")
ret = [P.relpath(x , the_path) for x in ret] #求伪路径
return ret
def clear(self):
'''清除所有key'''
all_dirs = self.ask_prefix("" , return_abs = True)
for p in all_dirs:
self.remove(p)
if len( os.listdir(p) ) == 0: #为空才删除文件夹,否则就保留
os.removedirs( p )
self.clear_dir()
return True
def clear_dir(self , path = None):
'''清除所有空文件夹'''
if path is None:
path = self.getfolder("")
flag = True #自己是否能清除
all_dirs = [P.join(path,p) for p in os.listdir(path) ]
for p in all_dirs:
if P.isdir(p):
flag = flag and self.clear_dir(p)
else:
flag = False
if flag: #现在应该已经没有文件了
try:
os.removedirs( path )
except Exception:
flag = False #不让删就算了
return flag
|
from setuptools import find_packages, setup
#from numpy.distutils.core import setup, Extension
from json import load as json_load
if __name__ == '__main__':
with open('setup.json', 'r') as info:
kwargs = json_load(info)
setup(
include_package_data=True,
packages=find_packages(),
**kwargs
)
|
##
# File: Connection.py
# Author: J. Westbrook
# Date: 1-Apr-2018
#
# Connection methods for Crate DB.
#
# Updates:
#
##
"""
Connection methods for Crate DB.
"""
__docformat__ = "restructuredtext en"
__author__ = "John Westbrook"
__email__ = "jwest@rcsb.rutgers.edu"
__license__ = "Apache 2.0"
import copy
import logging
from crate import client
# from crate.client.exceptions import (DatabaseError, OperationalError, ProgrammingError, Warning)
logger = logging.getLogger(__name__)
class Connection(object):
"""Class to encapsulate Crate RDBMS DBI connection."""
def __init__(self, cfgOb=None, infoD=None, resourceName=None, sectionName="site_info_configuration", verbose=False):
self.__verbose = verbose
self.__db = None
self.__dbcon = None
self.__infoD = infoD
self.__dbName = None
self.__dbHost = None
self.__dbUser = None
self.__dbPw = None
self.__dbSocket = None
self.__dbPort = None
self.__dbAdminDb = None
self.__dbPort = None
#
self.__defaultPort = 4200
self.__dbServer = "crate"
self.__resourceName = resourceName
self.__cfgOb = cfgOb
#
if infoD:
self.setPreferences(infoD)
#
if resourceName:
self.assignResource(resourceName, sectionName)
def getPreferences(self):
return self.__infoD
def setPreferences(self, infoD):
try:
self.__infoD = copy.deepcopy(infoD)
self.__dbName = self.__infoD.get("DB_NAME", None)
self.__dbHost = self.__infoD.get("DB_HOST", "localhost")
self.__dbUser = self.__infoD.get("DB_USER", None)
self.__dbPw = self.__infoD.get("DB_PW", None)
self.__dbSocket = self.__infoD.get("DB_SOCKET", None)
self.__dbServer = self.__infoD.get("DB_SERVER", "crate")
#
port = self.__infoD.get("DB_PORT", self.__defaultPort)
if port and str(port):
self.__dbPort = int(str(port))
except Exception as e:
logger.exception("Failing with %s", str(e))
def assignResource(self, resourceName=None, sectionName=None):
#
defaultPort = 4200
defaultHost = "localhost"
dbServer = "crate"
self.__resourceName = resourceName
infoD = {}
if not self.__cfgOb:
return infoD
#
if resourceName == "CRATE_DB":
infoD["DB_NAME"] = self.__cfgOb.get("CRATE_DB_NAME", sectionName=sectionName)
infoD["DB_HOST"] = self.__cfgOb.get("CRATE_DB_HOST", sectionName=sectionName)
infoD["DB_SOCKET"] = self.__cfgOb.get("CRATE_DB_SOCKET", default=None, sectionName=sectionName)
infoD["DB_PORT"] = int(str(self.__cfgOb.get("CRATE_DB_PORT", default=defaultPort, sectionName=sectionName)))
infoD["DB_USER"] = self.__cfgOb.get("CRATE_DB_USER_NAME", sectionName=sectionName)
infoD["DB_PW"] = self.__cfgOb.get("CRATE_DB_PASSWORD", sectionName=sectionName)
else:
infoD["DB_NAME"] = self.__cfgOb.get("DB_NAME", sectionName=sectionName)
infoD["DB_HOST"] = self.__cfgOb.get("DB_HOST", default=defaultHost, sectionName=sectionName)
infoD["DB_SOCKET"] = self.__cfgOb.get("DB_SOCKET", default=None, sectionName=sectionName)
infoD["DB_PORT"] = int(str(self.__cfgOb.get("DB_PORT", default=defaultPort, sectionName=sectionName)))
infoD["DB_USER"] = self.__cfgOb.get("DB_USER_NAME", sectionName=sectionName)
infoD["DB_PW"] = self.__cfgOb.get("DB_PASSWORD", sectionName=sectionName)
#
infoD["DB_SERVER"] = dbServer
self.setPreferences(infoD)
#
return copy.deepcopy(infoD)
#
def connect(self):
"""Create a database connection and return a connection object.
Returns None on failure
"""
#
crateHost = "{host}:{port}".format(host=self.__dbHost, port=self.__dbPort)
crateUri = "http://%s" % crateHost
logger.debug("Connection using uri %s", crateUri)
#
dbcon = client.connect(crateUri)
#
if self.__dbcon is not None:
# Close an open connection -
logger.info("Closing an existing connection.\n")
self.close()
try:
dbcon = self.__dbcon = dbcon
except Exception as e:
logger.exception("Connection error to server %s host %s port %d %s", self.__dbServer, self.__dbHost, self.__dbPort, str(e))
self.__dbcon = None
return self.__dbcon
def close(self):
"""Close any open database connection."""
if self.__dbcon is not None:
try:
self.__dbcon.close()
self.__dbcon = None
return True
except Exception as e:
logger.exception("Connection close error %s", str(e))
return False
def __enter__(self):
return self.connect()
def __exit__(self, *args):
return self.close()
|
"""
socket_server_credentials_template.py
Template to store credentials, port numbers, and IP addresses.
author: @justjoshtings
created: 3/16/2022
"""
PORT = int()
HOST_IP = ''
HOST_IP_alt = '' |
"""
molecool
A Python package for reading .pdb and .xyz files
"""
# Add imports here
from .functions import canvas, zen
from .molecule import build_bond_list
from .visualize import draw_molecule, bond_histogram
from .measure import calculate_distance, calculate_angle
from .atom_data import atomic_weights, atom_colors
from .io import open_pdb, open_xyz, write_xyz
#import molecool.functions
# Handle versioneer
from ._version import get_versions
versions = get_versions()
__version__ = versions['version']
__git_revision__ = versions['full-revisionid']
del get_versions, versions
|
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""
printer_context.py
YANG model driven API, common definitions.
"""
class PrinterContext(object):
"""
Print Context.
Used to encapsulate information needed by the printers.
"""
def __init__(self):
self.fd = None
self.lvl = 0
# internal
self.all_classes = []
self.class_list = []
self.class_name = ''
self.class_stack = []
self.comment = False
self.contact = ''
self.depth = 0
self.env = None
self.first = True
self.group_list = []
self.groupings = {}
self.idx = 0
self.idx_stack = []
self.import_enum = []
self.imports = {}
self.loader = None
self.local_group_list = []
self.meta = True
self.module = None
self.module_name = ''
self.namespace = ''
self.ns = []
self.organization = ''
self.prefix = ''
self.printer = None
self.revision = ''
self.rpc = []
self.tab_size = 4
self.target = ''
self.templates = None
self.types = []
self.uses = []
def str(self, msg):
self.fd.write(msg)
def tab(self, lvl=None):
if lvl is None:
lvl = self.lvl
if lvl > 0:
fmt = '%%%ds' % (lvl * self.tab_size)
else:
return ''
return fmt % ' '
def write(self, msg):
if self.lvl > 0:
fmt = '%%%ds' % (self.lvl * self.tab_size)
self.fd.write(fmt % ' ')
self.fd.write(msg)
def writeln(self, msg, tab=0):
if self.lvl + tab > 0:
fmt = '%%%ds' % ((self.lvl + tab) * self.tab_size)
self.fd.write(fmt % ' ')
self.fd.write(msg)
self.fd.write('\n')
def writelns(self, lines, tab=0):
indent = ''
if self.lvl + tab > 0:
indent = ' ' * ((self.lvl + tab) * self.tab_size)
fmt = '\n%s' % indent
lines = fmt.join(lines)
self.fd.write('%s%s' % (indent, lines))
def get_indent(self):
indent = ''
if self.lvl > 0:
indent = ' ' * (self.lvl * self.tab_size)
return indent
def bline(self):
self.fd.write('\n')
def lvl_inc(self, tab=1):
self.lvl += tab
def lvl_dec(self, tab=1):
self.lvl -= tab
def push_idx(self):
self.idx_stack.append(self.idx)
def pop_idx(self):
self.idx = self.idx_stack.pop()
def push_class(self):
self.class_stack.append(self.class_list)
self.class_list = []
def pop_class(self):
self.class_list = self.class_stack.pop()
|
import pathlib
APP_KEY = 'aiohttp_admin'
PROJ_ROOT = pathlib.Path(__file__).parent
|
"""
Discussion:
Because we have a facilitatory synapses, as the input rate increases synaptic
resources released per spike also increase. Therefore, we expect that the synaptic
conductance will increase with input rate. However, total synaptic resources are
finite. And they recover in a finite time. Therefore, at high frequency inputs
synaptic resources are rapidly deleted at a higher rate than their recovery, so
after first few spikes, only a small number of synaptic resources are left. This
results in decrease in the steady-state synaptic conductance at high frequency inputs.
"""; |
import numpy as np, math
import matplotlib.pyplot as pp
import roslib; roslib.load_manifest('sandbox_advait_darpa_m3')
import hrl_lib.matplotlib_util as mpu
# trying to see if it makes any difference if I choose R1 or R2 to be
# the variable resistor in the voltage divider.
#
# Answer: NO. Something I should have realized without having to make
# a plot.
def which_variable():
r_static = 2e3
r_min = 800.
r_max = 5e3
r_step = 10.
r_var = np.arange(r_min, r_max, r_step)
v_cc = 5.
# case I - variable resistor is R2
v1 = r_var / (r_static + r_var) * v_cc
# case II - variable resistor is R1
v2 = r_static / (r_static + r_var) * v_cc
mpu.figure()
pp.plot(r_var, v1, 'b', label='variable R2')
pp.plot(r_var, v2, 'g', label='variable R1')
pp.xlabel('Variable Resistance')
pp.ylabel('Voltage')
mpu.legend()
pp.show()
# what is the effect of the pull up resistance. Can I choose a value
# that is good for our application?
def pull_up_resistor_value(rmax, rmin):
n_r = 200
n_pullups = 4
adc_counts = 1024
pullup_best = math.sqrt(rmax*rmin)
pullup_max = 2 * pullup_best
pullup_min = 0.5 * pullup_best
pullup_max = 500
pullup_min = 50
pullup_step = (pullup_max - pullup_min) / n_pullups
pullup_arr = np.arange(pullup_min, pullup_max, pullup_step)
r_step = (rmax - rmin) / n_r
r_var = np.arange(rmin, rmax, r_step)
v_cc = 5.
v_diff_list = []
mpu.figure()
for r_static in pullup_arr:
v = r_var / (r_static + r_var) * v_cc
pp.plot(r_var, v, mpu.random_color(), label='R1: %.1f'%r_static)
v_diff_list.append(v[-1] - v[0])
pp.xlabel('Variable Resistance')
pp.ylabel('Voltage')
mpu.legend()
mpu.figure()
pp.plot(pullup_arr, v_diff_list)
pp.axvline(pullup_best, c='k', label='Analytically computed optimal value')
pp.xlabel('Pull up resistance (ohms)')
pp.ylabel('Difference in Voltage')
mpu.legend()
l1 = (r_static + rmin) / (r_static + rmax) * adc_counts
l2 = rmin / rmax * (r_static + rmax) / (r_static + rmin) * adc_counts
print 'ADC lost if piezo to GND:', l2
print 'ADC lost if piezo to Vcc:', l1
pp.show()
if __name__ == '__main__':
#which_variable()
# Velostat
rmax = 5e3
rmin = 100
# Eeonyx LTT-SL-PA-MM-1-58B
#rmax = 15e3
#rmin = 1000
# Eeonyx LR-SL-PA-MM-1-54
#rmax = 15e3
#rmin = 1500
# Eeonyx LVY-SL-PA-10E6 RP-3-89-3
#rmax = 1.5e6
#rmin = 150e3
# Eeonyx NW170-SL-PA
#rmax = 1.5e3
#rmin = 130
pull_up_resistor_value(rmax, rmin)
|
# Written by K. M. Knausgård 2021-09-23
import itertools
import random
import time
import phue as hue
max_number_of_exercises = 100
enable_hue = True
hue_bridge_ip = '10.0.0.169'
hue_light_name = 'Stue ved skyvedør høyre'
def input_integer_number(message):
while True:
try:
return int(input(message))
except:
pass
# Color space conversion from phue github https://github.com/studioimaginaire/phue/blob/master/examples/rgb_colors.py
def rgb_to_xy(red, green, blue):
""" conversion of RGB colors to CIE1931 XY colors
Formulas implemented from: https://gist.github.com/popcorn245/30afa0f98eea1c2fd34d
Args:
red (float): a number between 0.0 and 1.0 representing red in the RGB space
green (float): a number between 0.0 and 1.0 representing green in the RGB space
blue (float): a number between 0.0 and 1.0 representing blue in the RGB space
Returns:
xy (list): x and y
"""
# gamma correction
red = pow((red + 0.055) / (1.0 + 0.055), 2.4) if red > 0.04045 else (red / 12.92)
green = pow((green + 0.055) / (1.0 + 0.055), 2.4) if green > 0.04045 else (green / 12.92)
blue = pow((blue + 0.055) / (1.0 + 0.055), 2.4) if blue > 0.04045 else (blue / 12.92)
# convert rgb to xyz
x = red * 0.649926 + green * 0.103455 + blue * 0.197109
y = red * 0.234327 + green * 0.743075 + blue * 0.022598
z = green * 0.053077 + blue * 1.035763
# convert xyz to xy
x = x / (x + y + z)
y = y / (x + y + z)
return [x, y]
def connect_hue_bridge():
while True:
try:
hb = hue.Bridge(hue_bridge_ip)
hb.connect()
return hb
except hue.PhueRegistrationException as pre:
print("\nPlease connect to Philips Hue bridge before first use.")
print("Set Hue Bridge IP address and light name for the light to be controlled.")
print("Also put light in color-mode in your Hue-app.")
print("\nIf this is OK, press the button on you Hue bridge now, and within 30 s hit ENTER.")
print("\nNo Hue light available? Set enable_hue to False to get rid of this!")
input("Press ENTER to continue...")
print("\n")
except Exception as e:
print("Unknown error occurred..")
print("\nNo Hue light available? Set enable_hue to False to get rid of this!")
quit(0)
def main():
print("Python Multiplication Table Learner 1.0\n")
if enable_hue:
hb = connect_hue_bridge()
origxy = hb.get_light(hue_light_name, 'xy')
message = "Select number of exercises, maximum {}: ".format(max_number_of_exercises)
number_of_exercises = min(input_integer_number(message), max_number_of_exercises)
print("\n Ready!")
exercises = list(itertools.product(range(0, 10), repeat=2))
random.shuffle(exercises)
for ii, exercise in enumerate(exercises[:number_of_exercises]):
print("\n Exercise number {} of {}:".format(ii + 1, number_of_exercises))
answer = input_integer_number(" {} x {} = ".format(exercise[0], exercise[1]))
while answer != (exercise[0] * exercise[1]):
# command = {'bri': 254, 'hue': 8042, 'sat': 174}
hb.set_light(hue_light_name, 'xy', rgb_to_xy(1.0, 0, 0), transitiontime=5)
print(" Wrong!")
time.sleep(1)
hb.set_light(hue_light_name, 'xy', origxy, transitiontime=50)
answer = input_integer_number(" {} x {} = ".format(exercise[0], exercise[1]))
hb.set_light(hue_light_name, 'xy', rgb_to_xy(0.0, 1.0, 0), transitiontime=5)
print(" CORRECT!")
time.sleep(1)
hb.set_light(hue_light_name, 'xy', origxy, transitiontime=50)
if __name__ == "__main__":
main()
|
def Websocket(url):
return __new__(WebSocket(url))
|
"""
Developer : Naveen Kambham
Description: Based on the Battery sensor Data, charger plug in time and duration of plug in time are extracted on a daily basis.
"""
#Importing the required libraries.
import collections as col
import functools
from collections import Counter
import pandas as pd
import FeatureExtraction.CommonFunctions.converters as converters
from FeatureExtraction.CommonFunctions import dataprocessing_helper
def TakeMostProbableTimeInStudy(study_values,day_values):
"""
Method to get most probable time based on give data.
Sometimes it is possible that partcipant can charge the mobile multiple times in a day,
in such cases we consider the most probable time of corresponding participant
occurred in the entire study period.
:param StudyValues: study charge time values
:param DayValues: charge time values for a given day
:return:
"""
#if total number of values in a day are one then return the only value i.e only one charger plugin time in a given day
if day_values.count ==1 :
return day_values
#more than one time found, hence get the day values and the time values for entire study
else:
#get the study time and day values values count
counter = Counter(study_values)
return functools.reduce(lambda max_key,current_key: max_key if counter[max_key]>counter[current_key] else current_key, study_values)
def get_charger_plugintime_daily(file):
"""
Method to compute the battery charger plug in time
:param file:
:return data frame:
"""
#read the data in to a dataframe
df= pd.read_csv(file)
#splitting datetime in to date and time columns
df['Date'], df['Time'] = zip(*df['start_time'].map(lambda x: x.split(' ')))
#removing rows with battery plugged status as o which is unplugged and converting the time to Integer for easy caliculations
df= df[df.plugged !=0]
df['Time'] =df['Time'].apply(converters.ConvertTime)
df['Time'] =df['Time'].apply(converters.ConvertToInt)
#getting the all plug in times for a particular participant in the entire study of 30 days.
tempdf = df
tempgrouping = tempdf.groupby(['user_id'])
batterychargeTimePerStudy= [(key,col.Counter(converters.ConvertToIntList(value['Time']))) for (key, value) in tempgrouping.__iter__()]
batterychargeTimePerStudydf= pd.DataFrame(batterychargeTimePerStudy,columns=['ID','Values'])
#grouping by date and userid
grouping = df.groupby(['user_id','Date'])
#Get battery time for each day by taking the most probable time in the entire study if there are more than one recod
batterychargeTime_perDay= [(key[0],key[1],TakeMostProbableTimeInStudy(batterychargeTimePerStudydf[batterychargeTimePerStudydf.ID ==key[0]],value['Time'])) for (key,value) in grouping.__iter__()]
outputdf= pd.DataFrame(batterychargeTime_perDay,columns=['ID','Date','CharginTimeDaily'])
return outputdf
def max_battery_plugin_time_daily(file):
"""
computes the maximum plug in time of battery in a give day for all participants
:param file:
:return:
"""
#read the data in to data fram
df= pd.read_csv(file)
#create new df columns for start,end date and time columns and convert the values for math advantages
df['StartDate'],df['StartTime'] = zip(*df['start_time'].map(lambda x:x.split(' ')))
df['ConvertedStartTime'] = df['StartTime'].apply(converters.ConvertTime)
df['ConvertedStartDate'] = df['StartDate'].apply(converters.ConvertDate)
df['EndDate'],df['EndTime'] = zip(*df['end_time'].map(lambda x:x.split(' ')))
df['ConvertedEndTime'] = df['EndTime'].apply(converters.ConvertTime)
df['ConvertedEndDate'] = df['EndDate'].apply(converters.ConvertDate)
userIds= df.user_id.unique()
outputlist=[]
# Since this depends on continous data records we need to iterate the records, smart aggregation doesn't help much
#processing for corresponding participant
for user in userIds:
tempdf = df.loc[df.user_id == user]
Dates = tempdf.StartDate.unique()
#processing for each day
for date in Dates:
tmpdf = tempdf.loc[((df.StartDate == date))]
tmpdf= tmpdf.sort_values(['ConvertedStartTime'],ascending=(True))
tmpdf= tmpdf[tmpdf.plugged.isin([1,2])]
durations =[0]
for index,value in tmpdf.iterrows():
if (tmpdf.loc[index,'StartDate'] == tmpdf.loc[index,'EndDate']):
durations.append(tmpdf.loc[index,'ConvertedEndTime'] - tmpdf.loc[index,'ConvertedStartTime'])
else:
durations.append((24.0 - tmpdf.loc[index,'ConvertedStartTime']) + tmpdf.loc[index,'ConvertedEndTime'])
output_dataFrame = pd.DataFrame(outputlist,columns=['ID','Date','Battery_Charging_Duration',])
return output_dataFrame
def extract(path):
"""
Method to extract the features based on the csv path given
"""
#getting the daily charger plug in times in a day for each participants
df_charge_plugin_times=get_charger_plugintime_daily(path)
#getting the maximum charger plugin duration in a day for each participants
df_max_plugin_duration=max_battery_plugin_time_daily(path)
#merging the extracted features
battery_df= dataprocessing_helper.merge([df_charge_plugin_times, df_max_plugin_duration], ['ID', 'Date'])
return battery_df
#Code to test the functionality independently
# df_battery=extract(r"/home/naveen/Data/Shed10/Filtered/battery_events.csv")
# print((df_battery))
|
import testutils, arc, unittest
class JobSupervisorTest(testutils.ARCClientTestCase):
def setUp(self):
self.usercfg = arc.UserConfig(arc.initializeCredentialsType(arc.initializeCredentialsType.SkipCredentials))
def test_constructor(self):
id1 = "http://test.nordugrid.org/1234567890test1"
id2 = "http://test.nordugrid.org/1234567890test2"
js = arc.JobSupervisor(self.usercfg, [
self.create_test_job(job_id = id1),
self.create_test_job(job_id = id2)
]);
self.expect(js.GetAllJobs()).not_to_be_empty()
jobs = js.GetAllJobs()
self.expect(jobs).to_have(2).jobs()
self.expect(jobs[0].JobID).to_be(id1)
self.expect(jobs[1].JobID).to_be(id2)
def test_add_job(self):
js = arc.JobSupervisor(self.usercfg, arc.JobList())
self.expect(js.GetAllJobs()).to_be_empty()
job = self.create_test_job(job_id = "http://test.nordugrid.org/1234567890test1")
self.expect(js.AddJob(job)).to_be(True, message = "AddJob was expected to return True")
self.expect(js.GetAllJobs()).not_to_be_empty()
job.JobManagementInterfaceName = ""
self.expect(js.AddJob(job)).to_be(False, message = "AddJob was expected to return False")
self.expect(js.GetAllJobs()).to_have(1).job()
job.JobManagementInterfaceName = "non.existent.interface"
self.expect(js.AddJob(job)).to_be(False, message = "AddJob was expected to return False")
self.expect(js.GetAllJobs()).to_have(1).job()
def test_resubmit(self):
self.usercfg.Broker("TEST")
arc.TargetInformationRetrieverPluginTESTControl.targets = [self.create_test_target("http://test2.nordugrid.org")]
arc.TargetInformationRetrieverPluginTESTControl.status = arc.EndpointQueryingStatus(arc.EndpointQueryingStatus.SUCCESSFUL)
js = arc.JobSupervisor(self.usercfg, [
self.create_test_job(job_id = "http://test.nordugrid.org/1234567890test1", state = arc.JobState.FAILED),
self.create_test_job(job_id = "http://test.nordugrid.org/1234567890test2", state = arc.JobState.RUNNING)
])
self.expect(js.GetAllJobs()).to_have(2).jobs()
endpoints = [arc.Endpoint("http://test2.nordugrid.org", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.tirtest")]
resubmitted = arc.JobList()
result = js.Resubmit(0, endpoints, resubmitted)
# TODO: When using the wrapped arc.TargetInformationRetrieverPluginTESTControl.targets static variable, the bindings sometimes segfaults.
# Particular when accessing member of the arc.TargetInformationRetrieverPluginTESTControl.targets[].ComputingManager map, e.g. arc.TargetInformationRetrieverPluginTESTControl.targets[<some-existing-key>].ComputingManager["some-key"]
#self.expect(result).to_be(True)
#self.expect(resubmitted).to_have(2).jobs()
def test_cancel(self):
id1 = "http://test.nordugrid.org/1234567890test1"
id2 = "http://test.nordugrid.org/1234567890test2"
id3 = "http://test.nordugrid.org/1234567890test3"
id4 = "http://test.nordugrid.org/1234567890test4"
js = arc.JobSupervisor(self.usercfg, [
self.create_test_job(job_id = id1, state = arc.JobState.RUNNING),
self.create_test_job(job_id = id2, state = arc.JobState.FINISHED),
self.create_test_job(job_id = id3, state = arc.JobState.UNDEFINED)
])
arc.JobControllerPluginTestACCControl.cancelStatus = True
self.expect(js.Cancel()).to_be(True, message = "Cancel was expected to return True")
self.expect(js.GetIDsProcessed()).to_have(1).ID()
self.expect(js.GetIDsProcessed()[0]).to_be(id1)
self.expect(js.GetIDsNotProcessed()).to_have(2).IDs()
self.expect(js.GetIDsNotProcessed()[0]).to_be(id2)
self.expect(js.GetIDsNotProcessed()[1]).to_be(id3)
js.ClearSelection()
arc.JobControllerPluginTestACCControl.cancelStatus = False
self.expect(js.Cancel()).to_be(False, message = "Cancel was expected to return False")
self.expect(js.GetIDsProcessed()).to_have(0).IDs()
self.expect(js.GetIDsNotProcessed()).to_have(3).IDs()
self.expect(js.GetIDsNotProcessed()[0]).to_be(id1)
self.expect(js.GetIDsNotProcessed()[1]).to_be(id2)
self.expect(js.GetIDsNotProcessed()[2]).to_be(id3)
js.ClearSelection()
job = self.create_test_job(job_id = id4, state = arc.JobState.ACCEPTED, state_text = "Accepted")
self.expect(js.AddJob(job)).to_be(True, message = "AddJob was expected to return True")
arc.JobControllerPluginTestACCControl.cancelStatus = True
js.SelectByStatus(["Accepted"])
self.expect(js.Cancel()).to_be(True, message = "Cancel was expected to return False")
self.expect(js.GetIDsProcessed()).to_have(1).ID()
self.expect(js.GetIDsProcessed()[0]).to_be(id4)
self.expect(js.GetIDsNotProcessed()).to_have(0).IDs()
js.ClearSelection()
arc.JobControllerPluginTestACCControl.cancelStatus = False
js.SelectByStatus(["Accepted"])
self.expect(js.Cancel()).to_be(False, message = "Cancel was expected to return False")
self.expect(js.GetIDsProcessed()).to_have(0).IDs()
self.expect(js.GetIDsNotProcessed()).to_have(1).ID()
self.expect(js.GetIDsNotProcessed()[0]).to_be(id4)
js.ClearSelection()
def test_clean(self):
id1 = "http://test.nordugrid.org/1234567890test1"
id2 = "http://test.nordugrid.org/1234567890test2"
js = arc.JobSupervisor(self.usercfg, [
self.create_test_job(job_id = id1, state = arc.JobState.FINISHED, state_text = "Finished"),
self.create_test_job(job_id = id2, state = arc.JobState.UNDEFINED)
])
self.expect(js.GetAllJobs()).to_have(2).jobs()
arc.JobControllerPluginTestACCControl.cleanStatus = True
self.expect(js.Clean()).to_be(True, message = "Clean was expected to return True")
self.expect(js.GetIDsProcessed()).to_have(1).ID()
self.expect(js.GetIDsProcessed()[0]).to_be(id1)
self.expect(js.GetIDsNotProcessed()).to_have(1).ID()
self.expect(js.GetIDsNotProcessed()[0]).to_be(id2)
js.ClearSelection()
arc.JobControllerPluginTestACCControl.cleanStatus = False
self.expect(js.Clean()).to_be(False, message = "Clean was expected to return False")
self.expect(js.GetIDsProcessed()).to_have(0).IDs()
self.expect(js.GetIDsNotProcessed()).to_have(2).IDs()
self.expect(js.GetIDsNotProcessed()[0]).to_be(id1)
self.expect(js.GetIDsNotProcessed()[1]).to_be(id2)
js.ClearSelection()
arc.JobControllerPluginTestACCControl.cleanStatus = True
js.SelectByStatus(["Finished"])
self.expect(js.Clean()).to_be(True, message = "Clean was expected to return True")
self.expect(js.GetIDsProcessed()).to_have(1).ID()
self.expect(js.GetIDsProcessed()[0]).to_be(id1)
self.expect(js.GetIDsNotProcessed()).to_have(0).IDs()
js.ClearSelection()
arc.JobControllerPluginTestACCControl.cleanStatus = False
js.SelectByStatus(["Finished"])
self.expect(js.Clean()).to_be(False, message = "Clean was expected to return False")
self.expect(js.GetIDsProcessed()).to_have(0).IDs()
self.expect(js.GetIDsNotProcessed()).to_have(1).ID()
self.expect(js.GetIDsNotProcessed()[0]).to_be(id1)
js.ClearSelection()
if __name__ == '__main__':
unittest.main()
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if not root:
return None
if root.val > p.val and root.val > q.val:
# search left subtree if root is greater than both p, q
return self.lowestCommonAncestor(root.left, p, q)
if root.val < p.val and root.val < q.val:
# search right subtree if root is smaller than both p, q
return self.lowestCommonAncestor(root.right, p, q)
# base case: root itself is the LCA
return root
|
import requests
from coord.exceptions import InvalidAPIKeyException, InvalidEmailFormatException
class BaseAPI:
BASE_URL = 'https://api.coord.co/v1'
INVALID_KEY_MSG = "Forbidden: Coord API calls must include an access_key or an Authorization header"
BIKE_ENDPOINT = f'{BASE_URL}/bike/'
USER_ENDPOINT = f'{BASE_URL}/users/'
CURB_ENDPOINT = f'{BASE_URL}/search/curbs/'
BLANK = ''
def __init__(self, secret_key, user=None):
self.secret_key = 'access_key=' + secret_key
if user is not None:
path = f'{self.USER_ENDPOINT}testing/user_and_jwt?{self.secret_key}'
headers = {'Content-Type': 'application/json'}
body = {"user": {"email": user}}
response = requests.post(path, headers=headers, json=body).json()
if 601 in response.values():
raise InvalidEmailFormatException()
else:
self._jwt_token = response['jwt_token']
self.email = user
self.AUTH_HEADER = {'Content-Type': 'application/json', 'Authorization': f'Bearer {self._jwt_token}'}
self.link_account()
def check_api_key(self, obj):
"""
Checks if the API Key that was entered is valid or not
:param obj: dict
"""
if self.INVALID_KEY_MSG in obj.values():
raise InvalidAPIKeyException()
def create_user_session(self, email):
"""
Creates a new user session with a new jwt token. If there is already an existing email and jwt token then they are overridden.
:param email: str
"""
path = f'{self.USER_ENDPOINT}testing/user_and_jwt?{self.secret_key}'
headers = {'Content-Type': 'application/json'}
body = {"user": {"email": email}}
response = requests.post(path, headers=headers, json=body).json()
if 601 in response.values():
raise InvalidEmailFormatException()
else:
self._jwt_token = response['jwt_token']
self.email = email
self.link_account()
def link_account(self):
path = f'{self.USER_ENDPOINT}testing/user/current/provisioned_systems?{self.secret_key}'
body = {"system_id": ["CitiBike"]}
response = requests.put(path, headers=self.AUTH_HEADER, json=body).json()
return response
|
from flask_socketio import SocketIO
from flask import current_app, request
import os, uuid, json
socketio = SocketIO()
@socketio.on('connected')
def connected():
print "%s connected" % (request.sid)
@socketio.on('disconnect')
def disconnect():
print "%s disconnected" % (request.sid)
@socketio.on('start-transfer')
def start_transfer(filename, size):
"""Process an upload request from the client."""
_, ext = os.path.splitext(filename)
if ext in ['.exe', '.bin', '.js', '.sh', '.py', '.php']:
return False # reject the upload
id = uuid.uuid4().hex # server-side filename
with open(current_app.config['FILEDIR'] + id + '.json', 'wt') as f:
json.dump({'filename': filename, 'size': size}, f)
with open(current_app.config['FILEDIR'] + id + ext, 'wb') as f:
pass
return id + ext # allow the upload
@socketio.on('write-chunk')
def write_chunk(filename, offset, data):
"""Write a chunk of data sent by the client."""
if not os.path.exists(current_app.config['FILEDIR'] + filename):
return False
try:
with open(current_app.config['FILEDIR'] + filename, 'r+b') as f:
f.seek(offset)
f.write(data)
except IOError:
return False
return True |
import param
import cartopy.crs as ccrs
from holoviews.annotators import (
annotate, Annotator, PathAnnotator, PolyAnnotator, PointAnnotator,
RectangleAnnotator # noqa
)
from holoviews.plotting.links import DataLink, VertexTableLink as hvVertexTableLink
from panel.util import param_name
from .element import Path
from .models.custom_tools import CheckpointTool, RestoreTool, ClearTool
from .links import VertexTableLink, PointTableLink, HvRectanglesTableLink, RectanglesTableLink
from .operation import project
from .streams import PolyVertexDraw, PolyVertexEdit
Annotator._tools = [CheckpointTool, RestoreTool, ClearTool]
Annotator.table_transforms.append(project.instance(projection=ccrs.PlateCarree()))
def get_point_table_link(self, source, target):
if hasattr(source.callback.inputs[0], 'crs'):
return PointTableLink(source, target)
else:
return DataLink(source, target)
PointAnnotator._link_type = get_point_table_link
def get_rectangles_table_link(self, source, target):
if hasattr(source.callback.inputs[0], 'crs'):
return RectanglesTableLink(source, target)
else:
return HvRectanglesTableLink(source, target)
RectangleAnnotator._link_type = get_rectangles_table_link
def get_vertex_table_link(self, source, target):
if hasattr(source.callback.inputs[0], 'crs'):
return VertexTableLink(source, target)
else:
return hvVertexTableLink(source, target)
PathAnnotator._vertex_table_link = get_vertex_table_link
PolyAnnotator._vertex_table_link = get_vertex_table_link
def initialize_tools(plot, element):
"""
Initializes the Checkpoint and Restore tools.
"""
cds = plot.handles['source']
checkpoint = plot.state.select(type=CheckpointTool)
restore = plot.state.select(type=RestoreTool)
clear = plot.state.select(type=ClearTool)
if checkpoint:
checkpoint[0].sources.append(cds)
if restore:
restore[0].sources.append(cds)
if clear:
clear[0].sources.append(cds)
Annotator._extra_opts['hooks'] = [initialize_tools]
class PathBreakingAnnotator(PathAnnotator):
feature_style = param.Dict(default={'fill_color': 'blue', 'size': 10}, doc="""
Styling to apply to the feature vertices.""")
node_style = param.Dict(default={'fill_color': 'indianred', 'size': 6}, doc="""
Styling to apply to the node vertices.""")
def _init_stream(self):
name = param_name(self.name)
style_kwargs = dict(node_style=self.node_style, feature_style=self.feature_style)
self._stream = PolyVertexDraw(
source=self.plot, data={}, num_objects=self.num_objects,
show_vertices=self.show_vertices, tooltip='%s Tool' % name,
**style_kwargs
)
if self.edit_vertices:
self._vertex_stream = PolyVertexEdit(
source=self.plot, tooltip='%s Edit Tool' % name,
**style_kwargs
)
annotate._annotator_types[Path] = PathBreakingAnnotator
|
"""
3D potential field inversion by planting anomalous densities.
Implements the method of Uieda and Barbosa (2012a) with improvements by
Uieda and Barbosa (2012b).
A "heuristic" inversion for compact 3D geologic bodies. Performs the inversion
by iteratively growing the estimate around user-specified "seeds". Supports
various kinds of data (gravity, gravity tensor).
The inversion is performed by function
:func:`~fatiando.gravmag.harvester.harvest`. The required information, such as
observed data, seeds, and regularization, are passed to the function through
classes :class:`~fatiando.gravmag.harvester.Seed` and
:class:`~fatiando.gravmag.harvester.Potential`,
:class:`~fatiando.gravmag.harvester.Gz`,
:class:`~fatiando.gravmag.harvester.Gxx`, etc.
See the :ref:`Cookbook <cookbook>` for some example applications to synthetic
data.
**Functions**
* :func:`~fatiando.gravmag.harvester.harvest`: Performs the inversion
* :func:`~fatiando.gravmag.harvester.iharvest`: Iterator to step through the
inversion one accretion at a time
* :func:`~fatiando.gravmag.harvester.sow`: Creates the seeds from a set of
(x, y, z) points and physical properties
* :func:`~fatiando.gravmag.harvester.loadseeds`: Loads from a JSON file a set
of (x, y, z) points and physical properties that specify the seeds. Pass
output to :func:`~fatiando.gravmag.harvester.sow`
* :func:`~fatiando.gravmag.harvester.weights`: Computes data weights based on
the distance to the seeds
**Data types**
* :class:`~fatiando.gravmag.harvester.Potential`: gravitational potential
* :class:`~fatiando.gravmag.harvester.Gz`: vertical component of gravitational
acceleration (i.e., gravity anomaly)
* :class:`~fatiando.gravmag.harvester.Gxx`: North-North component of the
gravity gradient tensor
* :class:`~fatiando.gravmag.harvester.Gxy`: North-East component of the gravity
gradient tensor
* :class:`~fatiando.gravmag.harvester.Gxz`: North-vertical component of the
gravity gradient tensor
* :class:`~fatiando.gravmag.harvester.Gyy`: East-East component of the gravity
gradient tensor
* :class:`~fatiando.gravmag.harvester.Gyz`: East-vertical component of the
gravity gradient tensor
* :class:`~fatiando.gravmag.harvester.Gzz`: vertical-vertical component of the
gravity gradient tensor
**References**
Uieda, L., and V. C. F. Barbosa (2012a), Robust 3D gravity gradient inversion
by planting anomalous densities, Geophysics, 77(4), G55-G66,
doi:10.1190/geo2011-0388.1
Uieda, L., and V. C. F. Barbosa (2012b),
Use of the "shape-of-anomaly" data misfit in 3D inversion by planting anomalous
densities, SEG Technical Program Expanded Abstracts, 1-6,
doi:10.1190/segam2012-0383.1
----
"""
from __future__ import absolute_import, division
from future.builtins import range
import json
import bisect
from math import sqrt
import numpy
from fatiando.gravmag import prism as prism_engine
from fatiando.gravmag import tesseroid as tesseroid_engine
from fatiando import utils
from fatiando.mesher import Prism, Tesseroid
def loadseeds(fname):
"""
Load a set of seed locations and physical properties from a file.
The output can then be used with the
:func:`~fatiando.gravmag.harvester.sow` function.
The seed file should be formatted as::
[
[x1, y1, z1, {"density":dens1}],
[x2, y2, z2, {"density":dens2, "magnetization":mag2}],
[x3, y3, z3, {"magnetization":mag3, "inclination":inc3,
"declination":dec3}],
...
]
x, y, z are the coordinates of the seed and the dict (``{'density':2670}``)
are its physical properties.
.. warning::
Must use ``"``, not ``'``, in the physical property names!
Each seed can have different kinds of physical properties. If inclination
and declination are not given, will use the inc and dec of the inducing
field (i.e., no remanent magnetization).
The techie among you will recognize that the seed file is in JSON format.
Remember: the coordinate system is x->North, y->East, and z->Down
Parameters:
* fname : str or file
Open file object or filename string
Returns:
* [[x1, y1, z1, props1], [x2, y2, z2, props2], ...]
(x, y, z) are the points where the seeds will be placed
and *props* is dict with the values of the physical properties of each,
seed.
Example:
>>> from StringIO import StringIO
>>> file = StringIO(
... '[[1, 2, 3, {"density":4, "magnetization":5}],' +
... ' [6, 7, 8, {"magnetization":-1}]]')
>>> seeds = loadseeds(file)
>>> for s in seeds:
... print s
[1, 2, 3, {u'magnetization': 5, u'density': 4}]
[6, 7, 8, {u'magnetization': -1}]
"""
openned = False
if isinstance(fname, str):
fname = open(fname)
openned = True
seeds = json.load(fname)
if openned:
fname.close()
return seeds
def sow(locations, mesh):
"""
Create the seeds given a list of (x,y,z) coordinates and physical
properties.
Removes seeds that would fall on the same location with overlapping
physical properties.
Parameters:
* locations : list
The locations and physical properties of the seeds. Should be a list
like::
[
[x1, y1, z1, {"density":dens1}],
[x2, y2, z2, {"density":dens2, "magnetization":mag2}],
[x3, y3, z3, {"magnetization":mag3, "inclination":inc3,
"declination":dec3}],
...
]
* mesh : :class:`fatiando.mesher.PrismMesh`
The mesh that will be used in the inversion.
Returns:
* seeds : list of seeds
The seeds that can be passed to
:func:`~fatiando.gravmag.harvester.harvest`
"""
seeds = []
if mesh.celltype == Tesseroid:
seedtype = TesseroidSeed
elif mesh.celltype == Prism:
seedtype = PrismSeed
for x, y, z, props in locations:
index = _find_index((x, y, z), mesh)
if index is None:
raise ValueError(
"Couldn't find seed at location (%g,%g,%g)" % (x, y, z))
# Check for duplicates
if index not in (s.i for s in seeds):
seeds.append(seedtype(index, (x, y, z), mesh[index], props))
return seeds
def _find_index(point, mesh):
"""
Find the index of the cell that has point inside it.
"""
x1, x2, y1, y2, z1, z2 = mesh.bounds
nz, ny, nx = mesh.shape
xs = mesh.get_xs()
ys = mesh.get_ys()
zs = mesh.get_zs()
x, y, z = point
if (x <= x2 and x >= x1 and y <= y2 and y >= y1 and
((z <= z2 and z >= z1 and mesh.zdown) or
(z >= z2 and z <= z1 and not mesh.zdown))):
if mesh.zdown:
# -1 because bisect gives the index z would have. I want to know
# what index z comes after
k = bisect.bisect_left(zs, z) - 1
else:
# If z is not positive downward, zs will not be sorted
k = len(zs) - bisect.bisect_left(zs[::-1], z)
j = bisect.bisect_left(ys, y) - 1
i = bisect.bisect_left(xs, x) - 1
seed = i + j * nx + k * nx * ny
# Check if the cell is not masked (topography)
if mesh[seed] is not None:
return seed
return None
def harvest(data, seeds, mesh, compactness, threshold, report=False,
restrict=None):
"""
Run the inversion algorithm and produce an estimate physical property
distribution (density and/or magnetization).
Parameters:
* data : list of data (e.g., :class:`~fatiando.gravmag.harvester.Gz`)
The data that will be inverted. Data used must match the physical
properties given to the seeds (e.g., gravity data requires seeds to
have ``'density'`` prop)
* seeds : list of :class:`~fatiando.gravmag.harvester.Seed`
Lits of seeds used to start the growth process of the inversion. Use
:func:`~fatiando.gravmag.harvester.sow` to generate seeds.
* mesh : :class:`fatiando.mesher.PrismMesh`
The mesh used in the inversion. Will estimate the physical property
distribution on this mesh
* compactness : float
The compactness regularing parameter (i.e., how much should the
estimate be consentrated around the seeds). Must be positive. To find a
good value for this, start with a small value (like 0.001), run the
inversion and increase the value until desired compactness is achieved.
* threshold : float
Control how much the solution can grow (usually downward). In order for
estimate to grow by the accretion of 1 prism, this prism must decrease
the data misfit measure by *threshold* decimal percent. Depends on the
size of the cells in the *mesh* and the distance from a cell to the
observations. Use values between 0.001 and 0.000001.
If cells are small and *threshold* is large (0.001), the seeds won't
grow. If cells are large and *threshold* is small (0.000001), the seeds
will grow too much.
* report : True or False
If ``True``, also will return a dict as::
report = {'goal': goal_function_value,
'shape-of-anomaly': SOA_function_value,
'misfit': data_misfit_value,
'regularizer': regularizing_function_value,
'accretions': number_of_accretions}
* restrict : list of str
Restricts seed growth in given directions. Possible directions are
``'above'``, ``'below'``, ``'north'``, ``'south'``, ``'east'`` and
``'west'``. You can pass in one or directions as a list, e.g.
``['above']``. Default is ``None`` for unrestricted growth.
Returns:
* estimate, predicted_data : a dict and a list
*estimate* is a dict like::
{'physical_property':array, ...}
*estimate* contains the estimates physical properties. The properties
present in *estimate* are the ones given to the seeds. Include the
properties in the *mesh* using::
mesh.addprop('density', estimate['density'])
This way you can plot the estimate using :mod:`fatiando.vis.myv`.
*predicted_data* is a list of numpy arrays with the predicted (model)
data. The list is in the same order as *data*. To plot a map of the fit
for visual inspection and a histogram of the residuals::
from fatiando.vis import mpl
mpl.figure()
# Plot the observed and predicted data as contours for visual
# inspection
mpl.subplot(1, 2, 1)
mpl.axis('scaled')
mpl.title('Observed and predicted data')
levels = mpl.contourf(x, y, gz, (ny, nx), 10)
mpl.colorbar()
# Assuming gz is the only data used
mpl.contour(x, y, predicted[0], (ny, nx), levels)
# Plot a histogram of the residuals
residuals = gz - predicted[0]
mpl.subplot(1, 2, 2)
mpl.title('Residuals')
mpl.hist(residuals, bins=10)
mpl.show()
# It's also good to see the mean and standard deviation of the
# residuals
print "Residuals mean:", residuals.mean()
print "Residuals stddev:", residuals.std()
"""
restrict = _test_restriction(restrict)
for accretions, update in enumerate(iharvest(data, seeds, mesh,
compactness, threshold,
restrict)):
continue
estimate, predicted = update[:2]
output = [fmt_estimate(estimate, mesh.size), predicted]
if report:
goal, misfit, regul = update[4:]
soa = goal - compactness*1/(sum(mesh.shape)/3)*regul
output.append({'goal': goal, 'misfit': misfit, 'regularizer': regul,
'accretions': accretions, 'shape-of-anomaly': soa})
return output
def iharvest(data, seeds, mesh, compactness, threshold, restrict):
"""
Same as the :func:`fatiando.gravmag.harvester.harvest` function but this
one returns an iterator that yields the information of each accretion.
Yields:
* [estimate, predicted, new, neighbors, goal, misfit, regularizer]
The unformated estimate, predicted data vectors, the new element added
during this iteration, list of neighbors, goal function value, misfit,
regularizing function value.
The first yield contains the seeds. Thus ``new`` will be ``None``.
To format the estimate in a way that can be added to a mesh, use
function fmt_estimate of this module.
"""
nseeds = len(seeds)
estimate = dict((s.i, s.props) for s in seeds)
neighbors = []
for seed in seeds:
neighbors.append(_get_neighbors(seed, neighbors, estimate, mesh, data,
restrict))
predicted = _init_predicted(data, seeds, mesh)
totalgoal = _shapefunc(data, predicted)
totalmisfit = _misfitfunc(data, predicted)
regularizer = 0.
# Weight the regularizing function by the mean extent of the mesh
mu = compactness*1/(sum(mesh.shape)/3)
yield [estimate, predicted, None, neighbors, totalgoal, totalmisfit,
regularizer]
accretions = 0
for iteration in range(mesh.size - nseeds):
grew = False # To check if at least one seed grew (stopping criterion)
for s in range(nseeds):
best, bestgoal, bestmisfit, bestregularizer = _grow(
neighbors[s], data, predicted, totalmisfit, mu, regularizer,
threshold)
if best is not None:
if best.i not in estimate:
estimate[best.i] = {}
estimate[best.i].update(best.props)
totalgoal = bestgoal
totalmisfit = bestmisfit
regularizer = bestregularizer
for p, e in zip(predicted, best.effect):
p += e
neighbors[s].pop(best.i)
neighbors[s].update(
_get_neighbors(best, neighbors, estimate, mesh, data,
restrict))
grew = True
accretions += 1
yield [estimate, predicted, best, neighbors, totalgoal,
totalmisfit, regularizer]
del best
if not grew:
break
def _init_predicted(data, seeds, mesh):
"""
Make a list with the initial predicted data vectors (effect of seeds)
"""
predicted = []
for d in data:
p = numpy.zeros(len(d.observed), dtype='f')
for seed in seeds:
p += d.effect(mesh[seed.i], seed.props)
predicted.append(p)
return predicted
def fmt_estimate(estimate, size):
"""
Make a nice dict with the estimated physical properties in separate arrays
"""
output = {}
for i in estimate:
props = estimate[i]
for p in props:
if p not in output:
output[p] = utils.SparseList(size)
output[p][i] = props[p]
return output
def _grow(neighbors, data, predicted, totalmisfit, mu, regularizer, threshold):
"""
Find the neighbor with smallest goal function that also decreases the
misfit
"""
best = None
bestgoal = None
bestmisfit = None
bestregularizer = None
for n in neighbors:
pred = [p + e for p, e in zip(predicted, neighbors[n].effect)]
misfit = _misfitfunc(data, pred)
if (misfit < totalmisfit and
abs(misfit - totalmisfit)/totalmisfit >= threshold):
reg = regularizer + neighbors[n].distance
goal = _shapefunc(data, pred) + mu * reg
if bestgoal is None or goal < bestgoal:
bestgoal = goal
best = neighbors[n]
bestmisfit = misfit
bestregularizer = reg
return best, bestgoal, bestmisfit, bestregularizer
def _shapefunc(data, predicted):
"""
Calculate the total shape of anomaly function between the observed and
predicted data.
"""
result = 0.
for d, p in zip(data, predicted):
alpha = numpy.sum(d.observed * p)/d.norm**2
result += numpy.linalg.norm(alpha * d.observed - p)
return result
def _misfitfunc(data, predicted):
"""
Calculate the total data misfit function between the observed and predicted
data.
"""
result = 0.
for d, p, in zip(data, predicted):
residuals = d.observed - p
result += sqrt(numpy.dot(d.weights*residuals, residuals))/d.norm
return result
def _get_neighbors(cell, neighborhood, estimate, mesh, data, restrict):
"""
Return a dict with the new neighbors of cell.
keys are the index of the neighbors in the mesh. values are the Neighbor
objects.
"""
indexes = [n for n in _neighbor_indexes(cell.i, mesh, restrict)
if not _is_neighbor(n, cell.props, neighborhood) and
not _in_estimate(n, cell.props, estimate)]
neighbors = dict(
(i, Neighbor(
i, cell.props, cell.seed, _distance(i, cell.seed, mesh),
_calc_effect(i, cell.props, mesh, data)))
for i in indexes)
return neighbors
def _calc_effect(index, props, mesh, data):
"""
Calculate the effect of cell mesh[index] with physical properties prop for
each data set.
"""
cell = mesh[index]
return [d.effect(cell, props) for d in data]
def _distance(n, m, mesh):
"""
Calculate the distance (in number of cells) between cells n and m in mesh.
"""
ni, nj, nk = _index2ijk(n, mesh)
mi, mj, mk = _index2ijk(m, mesh)
return sqrt((ni - mi) ** 2 + (nj - mj) ** 2 + (nk - mk) ** 2)
def _index2ijk(index, mesh):
"""
Transform the index of a cell in mesh to a 3-dimensional (i,j,k) index.
"""
nz, ny, nx = mesh.shape
k = index//(nx*ny)
j = (index - k*(nx*ny))//nx
i = (index - k*(nx*ny) - j*nx)
return i, j, k
def _in_estimate(index, props, estimate):
"""
Check is index is in estimate with props
"""
if index in estimate:
for p in props:
if p in estimate[index]:
return True
return False
def _is_neighbor(index, props, neighborhood):
"""
Check if index is already in the neighborhood with props
"""
for neighbors in neighborhood:
for n in neighbors:
if index == neighbors[n].i:
for p in props:
if p in neighbors[n].props:
return True
return False
def _neighbor_indexes(n, mesh, restrict):
"""Find the indexes of the neighbors of n"""
nz, ny, nx = mesh.shape
indexes = []
if 'above' not in restrict:
# The guy above
tmp = n - nx * ny
if tmp > 0:
indexes.append(tmp)
if 'below' not in restrict:
# The guy below
tmp = n + nx * ny
if tmp < mesh.size:
indexes.append(tmp)
if 'north' not in restrict:
# The guy in front/north
tmp = n + 1
if n % nx < nx - 1:
indexes.append(tmp)
if 'south' not in restrict:
# The guy in the back/south
tmp = n - 1
if n % nx != 0:
indexes.append(tmp)
if 'east' not in restrict:
# The guy to the left/east
tmp = n + nx
if n % (nx * ny) < nx * (ny - 1):
indexes.append(tmp)
if 'west' not in restrict:
# The guy to the right/west
tmp = n - nx
if n % (nx * ny) >= nx:
indexes.append(tmp)
# Filter out the ones that do not exist or are masked (topography)
return [i for i in indexes if i is not None and mesh[i] is not None]
def _test_restriction(restrict):
"""
Test for correct spelling of items in restrict list.
"""
cases = ['above', 'below', 'north', 'south', 'east', 'west']
if restrict is None:
return []
else:
for case in restrict:
if not(case in cases):
raise ValueError("Unrecognized item in restrict: %s" % case)
return restrict
class PrismSeed(Prism):
"""
A seed that is a right rectangular prism.
"""
def __init__(self, i, location, prism, props):
Prism.__init__(self, prism.x1, prism.x2, prism.y1, prism.y2, prism.z1,
prism.z2, props=props)
self.i = i
self.seed = i
self.x, self.y, self.z = location
class TesseroidSeed(Tesseroid):
"""
A seed that is a tesseroid (spherical prism).
"""
def __init__(self, i, location, tess, props):
Tesseroid.__init__(self, tess.w, tess.e, tess.s, tess.n, tess.top,
tess.bottom, props=props)
self.i = i
self.seed = i
self.x, self.y, self.z = location
class Neighbor(object):
"""
A neighbor.
"""
def __init__(self, i, props, seed, distance, effect):
self.i = i
self.props = props
self.seed = seed
self.distance = distance
self.effect = effect
def weights(x, y, seeds, influences, decay=2):
"""
Calculate weights for the data based on the distance to the seeds.
Use weights to ignore regions of data outside of the target anomaly.
Parameters:
* x, y : 1d arrays
The x and y coordinates of the observations
* seeds : list
List of seeds, as returned by :func:`~fatiando.gravmag.harvester.sow`
* influences : list of floats
The respective diameter of influence for each seed. Observations
outside the influence will have very small weights.
A recommended value is aproximately the diameter of the anomaly
* decay : float
The decay factor for the weights. Low decay factor makes the weights
spread out more. High decay factor makes the transition from large
weights to low weights more abrupt.
Returns:
* weights : 1d array
The calculated weights
"""
distances = numpy.array([((x - s.x) ** 2 + (y - s.y) ** 2)/influence**2
for s, influence in zip(seeds, influences)])
# min along axis=0 gets the smallest value from each column
weights = numpy.exp(-(distances.min(axis=0) ** decay))
return weights
class Data(object):
"""
A container for some potential field data.
Know about its data, observation positions, nature of the mesh, and how
to calculate the effect of a single cell.
"""
def __init__(self, x, y, z, data, weights, meshtype):
self.x = x
self.y = y
self.z = z
self.observed = data
self.size = len(data)
self.norm = numpy.linalg.norm(data)
self.meshtype = meshtype
if self.meshtype not in ['prism', 'tesseroid']:
raise AttributeError("Invalid mesh type '%s'" % (meshtype))
if self.meshtype == 'prism':
self.engine = prism_engine
if self.meshtype == 'tesseroid':
self.engine = tesseroid_engine
self.weights = weights
class Potential(Data):
"""
A container for data of the gravitational potential.
Coordinate system used: x->North y->East z->Down
Parameters:
* x, y, z : 1D arrays
Arrays with the x, y, z coordinates of the data points
* data : 1D array
The values of the data at the observation points
* weight : float or array
The weight of this data set in the misfit function. Pass an array to
give weights to each data points or a float to weight the entire misfit
function. See function :func:`~fatiando.gravmag.harvester.weights`
"""
def __init__(self, x, y, z, data, weights=1., meshtype='prism'):
Data.__init__(self, x, y, z, data, weights, meshtype)
self.prop = 'density'
self.effectfunc = self.engine.potential
def effect(self, prism, props):
if self.prop not in props:
return numpy.zeros(self.size, dtype='f')
return self.effectfunc(self.x, self.y, self.z, [prism],
props[self.prop])
class Gz(Potential):
"""
A container for data of the gravity anomaly.
Coordinate system used: x->North y->East z->Down
Parameters:
* x, y, z : 1D arrays
Arrays with the x, y, z coordinates of the data points
* data : 1D array
The values of the data at the observation points
* weight : float or array
The weight of this data set in the misfit function. Pass an array to
give weights to each data points or a float to weight the entire misfit
function. See function :func:`~fatiando.gravmag.harvester.weights`
"""
def __init__(self, x, y, z, data, weights=1., meshtype='prism'):
Potential.__init__(self, x, y, z, data, weights, meshtype)
self.effectfunc = self.engine.gz
class Gxx(Potential):
"""
A container for data of the xx (north-north) component of the gravity
gradient tensor.
Coordinate system used: x->North y->East z->Down
Parameters:
* x, y, z : 1D arrays
Arrays with the x, y, z coordinates of the data points
* data : 1D array
The values of the data at the observation points
* weight : float or array
The weight of this data set in the misfit function. Pass an array to
give weights to each data points or a float to weight the entire misfit
function. See function :func:`~fatiando.gravmag.harvester.weights`
"""
def __init__(self, x, y, z, data, weights=1., meshtype='prism'):
Potential.__init__(self, x, y, z, data, weights, meshtype)
self.effectfunc = self.engine.gxx
class Gxy(Potential):
"""
A container for data of the xy (north-east) component of the gravity
gradient tensor.
Coordinate system used: x->North y->East z->Down
Parameters:
* x, y, z : 1D arrays
Arrays with the x, y, z coordinates of the data points
* data : 1D array
The values of the data at the observation points
* weight : float or array
The weight of this data set in the misfit function. Pass an array to
give weights to each data points or a float to weight the entire misfit
function. See function :func:`~fatiando.gravmag.harvester.weights`
"""
def __init__(self, x, y, z, data, weights=1., meshtype='prism'):
Potential.__init__(self, x, y, z, data, weights, meshtype)
self.effectfunc = self.engine.gxy
class Gxz(Potential):
"""
A container for data of the xz (north-vertical) component of the gravity
gradient tensor.
Coordinate system used: x->North y->East z->Down
Parameters:
* x, y, z : 1D arrays
Arrays with the x, y, z coordinates of the data points
* data : 1D array
The values of the data at the observation points
* weight : float or array
The weight of this data set in the misfit function. Pass an array to
give weights to each data points or a float to weight the entire misfit
function. See function :func:`~fatiando.gravmag.harvester.weights`
"""
def __init__(self, x, y, z, data, weights=1., meshtype='prism'):
Potential.__init__(self, x, y, z, data, weights, meshtype)
self.effectfunc = self.engine.gxz
class Gyy(Potential):
"""
A container for data of the yy (east-east) component of the gravity
gradient tensor.
Coordinate system used: x->North y->East z->Down
Parameters:
* x, y, z : 1D arrays
Arrays with the x, y, z coordinates of the data points
* data : 1D array
The values of the data at the observation points
* weight : float or array
The weight of this data set in the misfit function. Pass an array to
give weights to each data points or a float to weight the entire misfit
function. See function :func:`~fatiando.gravmag.harvester.weights`
"""
def __init__(self, x, y, z, data, weights=1., meshtype='prism'):
Potential.__init__(self, x, y, z, data, weights, meshtype)
self.effectfunc = self.engine.gyy
class Gyz(Potential):
"""
A container for data of the yz (east-vertical) component of the gravity
gradient tensor.
Coordinate system used: x->North y->East z->Down
Parameters:
* x, y, z : 1D arrays
Arrays with the x, y, z coordinates of the data points
* data : 1D array
The values of the data at the observation points
* weight : float or array
The weight of this data set in the misfit function. Pass an array to
give weights to each data points or a float to weight the entire misfit
function. See function :func:`~fatiando.gravmag.harvester.weights`
"""
def __init__(self, x, y, z, data, weights=1., meshtype='prism'):
Potential.__init__(self, x, y, z, data, weights, meshtype)
self.effectfunc = self.engine.gyz
class Gzz(Potential):
"""
A container for data of the zz (vertical-vertical) component of the gravity
gradient tensor.
Coordinate system used: x->North y->East z->Down
Parameters:
* x, y, z : 1D arrays
Arrays with the x, y, z coordinates of the data points
* data : 1D array
The values of the data at the observation points
* weight : float or array
The weight of this data set in the misfit function. Pass an array to
give weights to each data points or a float to weight the entire misfit
function. See function :func:`~fatiando.gravmag.harvester.weights`
"""
def __init__(self, x, y, z, data, weights=1., meshtype='prism'):
Potential.__init__(self, x, y, z, data, weights, meshtype)
self.effectfunc = self.engine.gzz
class TotalField(Potential):
"""
A container for data of the total field magnetic anomaly.
Coordinate system used: x->North y->East z->Down
Parameters:
* x, y, z : 1D arrays
Arrays with the x, y, z coordinates of the data points
* data : 1D array
The values of the data at the observation points
* inc, dec : floats
The inclination and declination of the inducing field
* weight : float or array
The weight of this data set in the misfit function. Pass an array to
give weights to each data points or a float to weight the entire misfit
function. See function :func:`~fatiando.gravmag.harvester.weights`
"""
def __init__(self, x, y, z, data, inc, dec, weights=1., meshtype='prism'):
if meshtype != 'prism':
raise AttributeError(
"Unsupported mesh type '%s' for total field anomaly."
% (meshtype))
Potential.__init__(self, x, y, z, data, weights, meshtype)
self.effectfunc = self.engine.tf
self.prop = 'magnetization'
self.inc = inc
self.dec = dec
def effect(self, prism, props):
if self.prop not in props:
return numpy.zeros(self.size, dtype='f')
return self.effectfunc(self.x, self.y, self.z, [prism], self.inc,
self.dec, pmag=props[self.prop])
|
"""Struct versions of Python's own AST nodes."""
__all__ = [
'py33_nodes',
'py34_nodes',
]
from ..asdl import python33_asdl, python34_asdl
from ..node import nodes_from_asdl
# Dictionary of all Struct classes for Python 3.3 and 3.4 node types.
py33_nodes = {}
py34_nodes = {}
def initialize_nodetypes():
"""Populate the Struct nodes dictionaries."""
assert len(py33_nodes) == len(py34_nodes) == 0
# If anyone asks, these are defined in python33.py and
# python34.py since they are available on those module's
# namespaces.
home33 = __name__[:__name__.rfind('.')] + '.python33'
home34 = __name__[:__name__.rfind('.')] + '.python34'
py33_nodes.update(nodes_from_asdl(
python33_asdl, module=home33,
typed=True))
py34_nodes.update(nodes_from_asdl(
python34_asdl, module=home34,
typed=True))
initialize_nodetypes()
|
import os.path
from pynwb.spec import (
NWBNamespaceBuilder,
export_spec,
NWBGroupSpec,
NWBDatasetSpec,
NWBAttributeSpec,
)
# TODO: import the following spec classes as needed
# from pynwb.spec import NWBDatasetSpec, NWBLinkSpec, NWBDtypeSpec, NWBRefSpec
def main():
# these arguments were auto-generated from your cookiecutter inputs
ns_builder = NWBNamespaceBuilder(
doc="""An NWB extension for storing Near-Infrared Spectroscopy (NIRS) data""",
name="""ndx-nirs""",
version="""0.2.0""",
author=list(
map(
str.strip,
"""Sumner L Norman,Darin Erat Sleiter,José Ribeiro""".split(","),
)
),
contact=list(
map(
str.strip,
"""sumner@ae.studio,darin@ae.studio,jose@ae.studio""".split(","),
)
),
)
# TODO: specify the neurodata_types that are used by the extension as well
# as in which namespace they are found
# this is similar to specifying the Python modules that need to be imported
# to use your new data types
# as of HDMF 1.6.1, the full ancestry of the neurodata_types that are used by
# the extension should be included, i.e., the neurodata_type and its parent
# type and its parent type and so on. this will be addressed in a future
# release of HDMF.
ns_builder.include_type("TimeSeries", namespace="core")
ns_builder.include_type("NWBDataInterface", namespace="core")
ns_builder.include_type("NWBContainer", namespace="core")
ns_builder.include_type("Container", namespace="hdmf-common")
ns_builder.include_type("DynamicTable", namespace="hdmf-common")
ns_builder.include_type("DynamicTableRegion", namespace="hdmf-common")
ns_builder.include_type("VectorData", namespace="hdmf-common")
ns_builder.include_type("Data", namespace="hdmf-common")
ns_builder.include_type("ElementIdentifiers", namespace="hdmf-common")
ns_builder.include_type("Device", namespace="core")
# TODO: define your new data types
# see https://pynwb.readthedocs.io/en/latest/extensions.html#extending-nwb
# for more information
nirs_sources = NWBGroupSpec(
neurodata_type_def="NIRSSourcesTable",
neurodata_type_inc="DynamicTable",
default_name="sources",
doc="A table describing optical sources of a NIRS device",
datasets=[
NWBDatasetSpec(
name="label",
doc="The label of the source",
dtype="text",
shape=(None,),
neurodata_type_inc="VectorData",
),
NWBDatasetSpec(
name="x",
doc="The x coordinate of the optical source",
dtype="float",
shape=(None,),
neurodata_type_inc="VectorData",
),
NWBDatasetSpec(
name="y",
doc="The y coordinate of the optical source",
dtype="float",
shape=(None,),
neurodata_type_inc="VectorData",
),
NWBDatasetSpec(
name="z",
doc="The z coordinate of the optical source",
dtype="float",
shape=(None,),
neurodata_type_inc="VectorData",
quantity="?",
),
],
)
nirs_detectors = NWBGroupSpec(
neurodata_type_def="NIRSDetectorsTable",
neurodata_type_inc="DynamicTable",
default_name="detectors",
doc="A table describing optical detectors of a NIRS device",
datasets=[
NWBDatasetSpec(
name="label",
doc="The label of the detector",
dtype="text",
shape=(None,),
neurodata_type_inc="VectorData",
),
NWBDatasetSpec(
name="x",
doc="The x coordinate of the optical detector",
dtype="float",
shape=(None,),
neurodata_type_inc="VectorData",
),
NWBDatasetSpec(
name="y",
doc="The y coordinate of the optical detector",
dtype="float",
shape=(None,),
neurodata_type_inc="VectorData",
),
NWBDatasetSpec(
name="z",
doc="The z coordinate of the optical detector",
dtype="float",
shape=(None,),
neurodata_type_inc="VectorData",
quantity="?",
),
],
)
nirs_channels = NWBGroupSpec(
neurodata_type_def="NIRSChannelsTable",
neurodata_type_inc="DynamicTable",
default_name="channels",
doc="A table describing optical channels of a NIRS device",
datasets=[
NWBDatasetSpec(
name="label",
doc="The label of the channel",
dtype="text",
shape=(None,),
neurodata_type_inc="VectorData",
),
NWBDatasetSpec(
name="source",
doc="A reference to the optical source for this channel in NIRSSourcesTable",
shape=(None,),
neurodata_type_inc="DynamicTableRegion",
),
NWBDatasetSpec(
name="detector",
doc="A reference to the optical detector for this channel in NIRSDetectorsTable.",
shape=(None,),
neurodata_type_inc="DynamicTableRegion",
),
NWBDatasetSpec(
name="source_wavelength",
doc="The wavelength of light in nm emitted by the source for this channel.",
dtype="float",
shape=(None,),
neurodata_type_inc="VectorData",
),
NWBDatasetSpec(
name="emission_wavelength",
doc=(
"The wavelength of light in nm emitted by the fluorophore under "
"fluorescent spectroscopy for this channel. Only used for fluorescent"
" spectroscopy"
),
dtype="float",
shape=(None,),
neurodata_type_inc="VectorData",
quantity="?",
),
NWBDatasetSpec(
name="source_power",
doc="The power of the source in mW used for this channel.",
dtype="float",
shape=(None,),
neurodata_type_inc="VectorData",
quantity="?",
),
NWBDatasetSpec(
name="detector_gain",
doc="The gain applied to the detector for this channel.",
dtype="float",
shape=(None,),
neurodata_type_inc="VectorData",
quantity="?",
),
],
)
nirs_device = NWBGroupSpec(
neurodata_type_def="NIRSDevice",
neurodata_type_inc="Device",
doc="A NIRS Device",
attributes=[
NWBAttributeSpec(
name="nirs_mode",
doc=(
"The mode of NIRS measurement performed with this device."
" Examples include (but are not limited to) continuous-wave,"
" frequency-domain, time-domain, time-domain-moments,"
" diffuse-correlation-spectroscopy, continuous-wave-fluorescence,"
" and diffuse-optical-tomography, as well as variants including"
" fluorescence."
),
dtype="text",
),
NWBAttributeSpec(
name="frequency",
doc=(
"The modulation frequency in Hz used for frequency domain NIRS."
" if nirs_mode is a type of frequency domain spectroscopy."
),
dtype="float",
required=False,
),
NWBAttributeSpec(
name="time_delay",
doc=(
"The time delay in ns used for gated time domain NIRS. Only used"
" if nirs_mode is a type of gated time domain spectroscopy."
),
dtype="float",
required=False,
),
NWBAttributeSpec(
name="time_delay_width",
doc=(
"The time delay width in ns used for gated time domain NIRS. Only"
" used if nirs_mode is a type of gated time domain spectroscopy."
),
dtype="float",
required=False,
),
NWBAttributeSpec(
name="correlation_time_delay",
doc=(
"The correlation time delay in ns for diffuse correlation"
" spectroscopy NIRS. Only used if nirs_mode is a type of diffuse"
" correlation spectroscopy."
),
dtype="float",
required=False,
),
NWBAttributeSpec(
name="correlation_time_delay_width",
doc=(
"The correlation time delay width in ns for diffuse correlation"
" spectroscopy NIRS. Only used if nirs_mode is a type of diffuse"
" correlation spectroscopy."
),
dtype="float",
required=False,
),
NWBAttributeSpec(
name="additional_parameters",
doc=(
"Any additional parameters corresponding to the NIRS device and"
" NIRS mode of operation that are useful for interpreting the"
" data."
),
dtype="text",
required=False,
),
],
groups=[
NWBGroupSpec(
name="channels",
doc="A table of the optical channels available on this device",
neurodata_type_inc="NIRSChannelsTable",
),
NWBGroupSpec(
name="sources",
doc="The optical sources of this device",
neurodata_type_inc="NIRSSourcesTable",
),
NWBGroupSpec(
name="detectors",
doc="The optical detectors of this device",
neurodata_type_inc="NIRSDetectorsTable",
),
],
)
nirs_series = NWBGroupSpec(
neurodata_type_def="NIRSSeries",
neurodata_type_inc="TimeSeries",
doc="A timeseries of NIRS data",
datasets=[
NWBDatasetSpec(
name="channels",
doc="DynamicTableRegion reference to the optical channels represented by this NIRSSeries",
neurodata_type_inc="DynamicTableRegion",
)
],
)
# TODO: add all of your new data types to this list
new_data_types = [
nirs_sources,
nirs_detectors,
nirs_channels,
nirs_device,
nirs_series,
]
# export the spec to yaml files in the spec folder
output_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "spec")
)
export_spec(ns_builder, new_data_types, output_dir)
if __name__ == "__main__":
# usage: python create_extension_spec.py
main()
|
import os
import re
import socket
import threading
from librespot.audio.decoders import AudioQuality
from librespot.core import Session
from librespot.metadata import TrackId
from librespot.player.codecs import VorbisOnlyAudioQuality
session: Session
sock: socket
def handler(client: socket.socket, address: str):
req_raw = client.recv(1024 * 1024)
if len(req_raw) == 0:
return
req_arr = req_raw.split(b"\r\n")
req_http_raw = req_arr[0]
req_header_str = req_raw.split(b"\r\n\r\n")[0]
req_body_str = req_raw.split(b"\r\n\r\n")[1]
req_http_arr = req_http_raw.split(b" ")
req_method = req_http_arr[0]
req_uri = req_http_arr[1]
req_http_version = req_http_arr[2]
req_header = {}
for header in req_header_str.split(b"\r\n"):
try:
key, value = header.split(b": ")
except ValueError:
continue
else:
req_header[key.decode().lower()] = value.decode()
status, headers, content, manually = response(client, req_uri.decode(),
req_header, req_body_str)
if not manually:
client.send(req_http_version + b" " + status.encode() + b"\r\n")
client.send(b"Access-Control-Allow-Origin: *\r\n")
for header in headers:
client.send(header.encode() + "\r\n")
client.send(b"\r\n")
client.send(content)
client.close()
class HttpCode:
http_200 = "200 OK"
http_204 = "204 No Content"
http_400 = "400 Bad Request"
http_403 = "403 Forbidden"
http_404 = "404 Not Found"
http_500 = "500 Internal Server Error"
def main():
global session, sock
session = None
if os.path.isfile("credentials.json"):
try:
session = Session.Builder().stored_file().create()
except RuntimeError:
pass
if session is None or not session.is_valid():
username = input("Username: ")
password = input("Password: ")
session = Session.Builder().user_pass(username, password).create()
if not session.is_valid():
return
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", 8080))
sock.listen(5)
while True:
threading.Thread(target=handler, args=sock.accept()).start()
def response(client: socket.socket, uri: str, header: dict,
body: bytes) -> tuple[str, list, bytes, bool]:
if re.search(r"^/audio/track/([0-9a-zA-Z]{22})$", uri) is not None:
track_id_search = re.search(
r"^/audio/track/(?P<TrackID>[0-9a-zA-Z]{22})$", uri)
track_id_str = track_id_search.group("TrackID")
track_id = TrackId.from_base62(track_id_str)
stream = session.content_feeder().load(
track_id, VorbisOnlyAudioQuality(AudioQuality.VERY_HIGH), False,
None)
start = 0
end = stream.input_stream.stream().size()
if header.get("range") is not None:
range_search = re.search(
"^bytes=(?P<start>[0-9]+?)-(?P<end>[0-9]+?)$",
header.get("range"))
if range_search is not None:
start = int(range_search.group("start"))
end = (int(range_search.group("end"))
if int(range_search.group("end")) <=
stream.input_stream.stream().size() else
stream.input_stream.stream().size())
stream.input_stream.stream().skip(start)
client.send(b"HTTP/1.0 200 OK\r\n")
client.send(b"Access-Control-Allow-Origin: *\r\n")
client.send(b"Content-Length: " +
(str(stream.input_stream.stream().size()).encode() if
stream.input_stream.stream().size() == end else "{}-{}/{}"
.format(start, end,
stream.input_stream.stream().size()).encode()) +
b"\r\n")
client.send(b"Content-Type: audio/ogg\r\n")
client.send(b"\r\n")
while True:
if (stream.input_stream.stream().pos() >=
stream.input_stream.stream().size()):
break
byte = stream.input_stream.stream().read()
client.send(bytes([byte]))
return "", [], b"", True
else:
return HttpCode.http_404, [], HttpCode.http_404.encode(), False
if __name__ == "__main__":
main()
|
import pytest
from snowflake.sqlalchemy.snowdialect import SnowflakeDialect
from panoramic.cli.datacol.tztools import now
from panoramic.cli.husky.core.taxonomy.override_mapping.enums import MappingSourceType
from panoramic.cli.husky.core.taxonomy.override_mapping.models import OverrideMapping
from panoramic.cli.husky.service.blending.features.override_mapping.sql import (
OverrideMappingSql,
)
from tests.panoramic.cli.husky.test.test_base import BaseTest
@pytest.fixture
def mapping_definition():
yield OverrideMapping(
slug='slug-1',
name='My mapping',
company_id='company_id',
definition=[['orig_1', 'changed_1'], ['orig_2', 'changed_2'], [None, 'changed_3'], ['orig_4', None]],
source_type=MappingSourceType.DIRECT,
created_by='user',
created_at=now(),
)
class TestRenderDirectMappingCte(BaseTest):
@staticmethod
def _prepare_sql(dialect):
mapping_definition = OverrideMapping(
slug='slug-1',
name='My mapping',
company_id='company_id',
definition=[['orig_1', 'changed_1'], ['orig_2', 'changed_2'], [None, 'changed_3'], ['orig_4', None]],
source_type=MappingSourceType.DIRECT,
created_by='user',
created_at=now(),
)
query = OverrideMappingSql.render_direct_mapping(mapping_definition)
return str(query.compile(compile_kwargs={"literal_binds": True}, dialect=dialect()))
def test_snowflake_dialect(self):
sql = self._prepare_sql(SnowflakeDialect)
self.write_test_expectations('query.sql', str(sql))
expected_query = self.read_test_expectations('query.sql')
self.assertEqual(expected_query, str(sql))
@pytest.mark.parametrize(
'include_unknown_values,expected',
[(True, '__om_my_column_slug_1_true_a576d893f201e0e2'), (False, '__om_my_column_slug_1_false_5e6b4360696a348f')],
)
def test_generate_identifier(mapping_definition, include_unknown_values, expected):
identifier = OverrideMappingSql.generate_identifier('my_column', mapping_definition.slug, include_unknown_values)
assert identifier == expected
def test_generate_cte_name(mapping_definition):
identifier = OverrideMappingSql.generate_cte_name(mapping_definition.slug)
assert identifier == '__om_slug_1_609591aff3bc337e'
|
import numpy as np
FEATURES_PATH = '../data/roi_features/'
cocaines = ['cocaine173_rois.csv','cocaine174_rois.csv','cocaine175_rois.csv','cocaine177_rois.csv','cocaine178_rois.csv']
controls = ['control181_rois.csv','control182_rois.csv','control189_rois.csv','control239_rois.csv','control258_rois.csv']
fears = ['fear187_rois.csv','fear188_rois.csv','fear197_rois.csv','fear199_rois.csv','fear200_rois.csv']
# label, number, roi, x, y, z, mean, std, [1-12] features # 20
features = np.zeros(20, dtype=np.float32)
roi_pos = np.genfromtxt(FEATURES_PATH+'roi_pos.csv', delimiter=",", dtype=np.float32)
for fname in cocaines+controls+fears:
label = fname[:-12]
if label == 'cocaine':
label = 0
elif label == 'control':
label = 1
elif label == 'fear':
label = 2
number = fname[-12:-9]
data = np.genfromtxt(FEATURES_PATH+fname, delimiter=",", dtype=np.float32)
nrow, ncol = data.shape
for i in range(0, nrow):
result = np.zeros(20, dtype=np.float32)
row = data[i, :]
result[-12:] = np.mean(np.reshape(row[:-3], (13, 12)), axis=0)
result[0] = label
result[1] = int(fname[-12:-9])
result[2] = row[-1]
result[3:6] = roi_pos[i, 1:4]
result[6:8] = row[-3:-1]
features = np.vstack((features, result))
features = features[1:, :]
np.savetxt(FEATURES_PATH+'features.csv', features, fmt='%10.5f', delimiter=',')
|
import functools
import inspect
import json
import typing
from django.db import transaction
from django.http.response import (
Http404,
HttpResponse,
HttpResponseBadRequest,
StreamingHttpResponse,
)
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext as _
from django.views.decorators.http import require_http_methods
from .models import (
Clerk,
Counter,
Event,
EventPermission,
)
"""
Utility functions for writing AJAX views.
"""
# Some HTTP Status codes that are used here.
RET_ACCEPTED = 202 # Accepted, but not completed.
RET_BAD_REQUEST = 400 # Bad request
RET_UNAUTHORIZED = 401 # Unauthorized, though, not expecting Basic Auth...
RET_FORBIDDEN = 403 # Forbidden
RET_CONFLICT = 409 # Conflict
RET_AUTH_FAILED = 419 # Authentication timeout
RET_LOCKED = 423 # Locked resource
class AjaxError(Exception):
def __init__(self, status, message='AJAX request failed'):
super(AjaxError, self).__init__(message)
self.status = status
self.message = message
def render(self):
return HttpResponse(
self.message,
content_type='text/plain',
status=self.status,
)
class AjaxFunc(object):
def __init__(self, func, url, method):
self.name = func.__name__ # name of the view function
self.pkg = func.__module__
self.func = func
self.url = url # url for url config
self.view_name = 'api_' + self.name # view name for url config
self.view = 'kirppu:' + self.view_name # view name for templates
self.method = method # http method for templates
# Registry for ajax functions. Maps function names to AjaxFuncs per scope.
AJAX_SCOPES: typing.Dict[str, typing.Dict[str, AjaxFunc]] = {}
def ajax_func(original, method='POST', params=None, defaults=None, staff_override=False, ignore_session=False):
"""
Create view function decorator.
The decorated view will not be called if
1. the request is not an AJAX request,
2. the request method does not match the given method,
OR
3. the parameters are not present in the request data.
If the decorated view raises an AjaxError, it will be rendered.
:param original: Original function being wrapped.
:param method: Required HTTP method; either 'GET' or 'POST'
:type method: str
:param params: List of names of expected arguments.
:type params: list[str]
:param defaults: List of default values for arguments. Default values are applied to `params` tail.
:type defaults: list
:param staff_override: Whether this function can be called without checkout being active.
:type staff_override: bool
:param ignore_session: Whether Event stored in session data should be ignored for the call.
:return: A decorator for a view function
:rtype: callable
"""
params = params or []
# Default values are applied only to len(defaults) last parameters.
defaults = defaults or []
defaults_start = len(params) - len(defaults)
assert defaults_start >= 0, original.__name__
def decorator(func):
# Decorate func.
func = require_http_methods([method])(func)
@functools.wraps(func)
def wrapper(request, event_slug, **kwargs):
# Prevent access if checkout is not active.
event = get_object_or_404(Event, slug=event_slug)
if not staff_override and not event.checkout_active:
raise Http404()
if not ignore_session:
# Ensure the request hasn't changed Event.
session_event = request.session.get("event")
if session_event is not None and session_event != event.pk:
return AjaxError(
RET_CONFLICT, _("Event changed. Please refresh the page and re-login.")).render()
# Pass request params to the view as keyword arguments.
# The first argument is skipped since it is the request.
request_data = request.GET if method == 'GET' else request.POST
for i, param in enumerate(params):
try:
if i == 0 and param == "event":
# Supply event from function arguments.
kwargs[param] = event
else:
# Default: Supply argument value from request data.
kwargs[param] = request_data[param]
except KeyError:
if i < defaults_start:
return HttpResponseBadRequest("Incomplete request")
kwargs[param] = defaults[i - defaults_start]
try:
result = func(request, **kwargs)
except AjaxError as ae:
return ae.render()
if isinstance(result, (HttpResponse, StreamingHttpResponse)):
return result
else:
return HttpResponse(
json.dumps(result),
status=200,
content_type='application/json',
)
return wrapper
return decorator
def get_counter(request):
"""
Get the Counter object associated with a request.
Raise AjaxError if session is invalid or counter is not found.
"""
if "counter" not in request.session or "counter_key" not in request.session:
raise AjaxError(RET_UNAUTHORIZED, _(u"Not logged in."))
counter_id = request.session["counter"]
counter_key = request.session["counter_key"]
try:
counter_object = Counter.objects.get(pk=counter_id)
except Counter.DoesNotExist:
raise AjaxError(
RET_UNAUTHORIZED,
_(u"Counter has gone missing."),
)
if counter_object.private_key != counter_key:
raise AjaxError(
RET_UNAUTHORIZED,
_("Unauthorized")
)
return counter_object
def get_clerk(request):
"""
Get the Clerk object associated with a request.
Raise AjaxError if session is invalid or clerk is not found.
"""
for key in ["clerk", "clerk_token", "counter"]:
if key not in request.session:
raise AjaxError(RET_UNAUTHORIZED, _(u"Not logged in."))
clerk_id = request.session["clerk"]
clerk_token = request.session["clerk_token"]
try:
clerk_object = Clerk.objects.get(pk=clerk_id)
except Clerk.DoesNotExist:
raise AjaxError(RET_UNAUTHORIZED, _(u"Clerk not found."))
if clerk_object.access_key != clerk_token:
raise AjaxError(RET_UNAUTHORIZED, _(u"Bye."))
return clerk_object
def require_user_features(counter=True, clerk=True, overseer=False, staff_override=False):
def out_w(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
if staff_override and request.user.is_staff:
return func(request, *args, **kwargs)
if counter:
# This call raises if counter is not found.
get_counter(request)
if clerk or overseer:
# Thus call raises if clerk is not found.
clerk_obj = get_clerk(request)
if overseer and not EventPermission.get(clerk_obj.event, clerk_obj.user).can_perform_overseer_actions:
raise AjaxError(RET_FORBIDDEN, _(u"Access denied."))
return func(request, *args, **kwargs)
return wrapper
return out_w
def empty_as_none(value):
return None if (value or "").strip() == "" else value
def get_all_ajax_functions():
for v in AJAX_SCOPES.values():
yield from v.items()
def ajax_func_factory(scope: str):
scope_functions = AJAX_SCOPES.setdefault(scope, dict())
def wrapper(url: str, method='POST', counter=True, clerk=True, overseer=False, atomic=False,
staff_override=False, ignore_session=False):
"""
Decorate a function with some common logic.
The names of the function being decorated are required to be present in the JSON object
that is passed to the function, and they are automatically decoded and passed to those
arguments.
:param url: URL RegEx this function is served in.
:type url: str
:param method: HTTP Method required. Default is POST.
:type method: str
:param counter: Is registered Counter required? Default: True.
:type counter: bool
:param clerk: Is logged in Clerk required? Default: True.
:type clerk: bool
:param overseer: Is overseer permission required for Clerk? Default: False.
:type overseer: bool
:param atomic: Should this function run in atomic transaction? Default: False.
:type atomic: bool
:param staff_override: Whether this function can be called without checkout being active.
:type staff_override: bool
:param ignore_session: Whether Event stored in session data should be ignored for the call.
:return: Decorated function.
"""
def decorator(func):
# Get argspec before any decoration.
spec = inspect.getfullargspec(func)
wrapped = require_user_features(counter, clerk, overseer, staff_override=staff_override)(func)
fn = ajax_func(
func,
method,
spec.args[1:],
spec.defaults,
staff_override=staff_override,
ignore_session=ignore_session,
)(wrapped)
if atomic:
fn = transaction.atomic(fn)
# Copy name etc from original function to wrapping function.
# The wrapper must be the one referred from urlconf.
fn = functools.wraps(wrapped)(fn)
scope_functions[func.__name__] = AjaxFunc(fn, url, method)
return fn
return decorator
return wrapper
|
# DEC --> XS3-CODE FUNC. [1]
def decxs(n):
t = list(n)
r = ''
d = {'0':'0011', '1':'0100', '2':'0101', '3':'0110', '4':'0111', '5':'1000', '6':'1001', '7':'1010', '8':'1011', '9':'1100', '.':'.'}
for i in t:
r = r + d[i]
return(r)
# XS3-CODE --> DEC FUNC. [2]
def xsdec(n,z):
r = ''
dd = {'0011':'0', '0100':'1', '0101':'2', '0110':'3', '0111':'4', '1000':'5', '1001':'6', '1010':'7', '1011':'8', '1100':'9', '.':'.'}
if '.' not in n:
for i in range (0,z,4):
k = ''
for ii in range(4):
k = k + n[i+ii]
r = r + dd[k]
return(r)
# FOR FRACTIONAL NUMBERS
else:
f = n.index('.')
for i in range (0,f,4):
k = ''
for ii in range(4):
k = k + n[i+ii]
r = r + dd[k]
r += '.'
for i in range (f+1,z,4):
k = ''
for ii in range(4):
k = k + n[i+ii]
r = r + dd[k]
return(r)
# DRIVER FUNCTION
def temp():
try:
print("\n\n-----------------------------------------------------------------------------")
print("PRESS '1' -- TO CONVERT DECIMAL TO EXCESS-3 CODE.")
print("PRESS '2' -- TO CONVERT EXCESS-3 CODE TO DECIMAL.")
print("-----------------------------------------------------------------------------")
us = int(input())
if (us == 1):
try:
j = float(input("\nENTER THE DECIMAL [DEC] CODE -- \t"))
if (j >= 0.0):
n = str(j)
print("\n------***------------***------")
print("{} = {} [XS-3 FORMAT] ".format(j,decxs(n)))
print("------***------------***------\n")
else:
n = str ( j * (- 1.0) )
print("\n------***------------***------")
print("{} = -{} [XS-3 FORMAT] ".format(j,decxs(n)))
print("------***------------***------\n")
except:
print("\n* INVALID CODE INPUT! PLEASE TRY AGAIN..!")
temp()
elif (us == 2):
try:
jk = input("\nENTER THE EXCESS-3 [XS-3] CODE -- \t")
j = list(jk)
z = len(j)
if '-' not in j:
print("\n------***------------***------")
print("{} = {} [DEC FORMAT] ".format(jk,xsdec(j,z)))
print("------***------------***------\n")
else:
j.remove('-')
print("\n------***------------***------")
print("{} = -{} [DEC FORMAT] ".format(jk,xsdec(j,z)))
print("------***------------***------\n")
except:
print("\n* INVALID CODE INPUT [USE 4-DIGIT GROUP CODE]! PLEASE TRY AGAIN..!")
temp()
else:
print("\n* INVALID INPUT! TRY AGAIN..!")
temp()
except:
print("\n* ENTER THE [OPTION](1/2) CORRECTLY!!")
temp()
U = input("\nWANT TO TRY AGAIN? PLEASE TYPE -- [YES/Y OR NO/N] :--\t").lower()
if (U == 'yes' or U == 'y'):
temp()
else:
print("\n\n~THANK YOU! ")
exit()
print("\n *** [DEC] <---> [XS-3] CONVERTER ***\n ----------------------------------")
temp()
@ CODED BY TSG405, 2021
|
# Port of my Racket solution:
# https://github.com/lojic/LearningRacket/blob/master/advent-of-code-2021/solutions/day15/day15-conspicio.rkt
from advent import *
# Represent a path with a 3-tuple (risk x y) to more easily work with the heap
def solve(input, width):
dim0 = 100
dim = width * dim0
vec = list(chain.from_iterable(input))
visited = [ False ] * (dim * dim)
paths = [(0, 0, 0)]
is_visited = lambda x, y: visited[y * dim + x]
wrap = lambda n: (n - 9) if n > 9 else n
def set_visited(x,y): visited[y * dim + x] = True
def get(x, y):
tile_x, x = divmod(x, dim0)
tile_y, y = divmod(y, dim0)
risk = vec[y * dim0 + x]
return wrap(risk + tile_x + tile_y)
while True:
risk, x, y = heappop(paths)
if is_visited(x, y):
continue
elif (x == dim-1) and (y == dim-1):
return risk
else:
set_visited(x, y)
for nx, ny in [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)]:
if (0 <= nx < dim) and (0 <= ny < dim) and (not is_visited(nx, ny)):
heappush(paths, (risk + get(nx,ny), nx, ny))
# Tests ---------------------------------------------------------------------------------------
class TestDay15(unittest.TestCase):
def test_solve(self):
input = parse(15, digits)
self.assertEqual(solve(input, 1), 687)
self.assertEqual(solve(input, 5), 2957)
unittest.main()
|
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
from unittest import mock
import ddt
import falcon
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from testtools import matchers
from zaqar import tests as testing
from zaqar.tests.unit.transport.wsgi import base
@ddt.ddt
class TestClaimsMongoDB(base.V2Base):
config_file = 'wsgi_mongodb.conf'
@testing.requires_mongodb
def setUp(self):
super(TestClaimsMongoDB, self).setUp()
self.default_claim_ttl = self.boot.transport._defaults.claim_ttl
self.project_id = '737_abc8332832'
self.headers = {
'Client-ID': uuidutils.generate_uuid(),
'X-Project-ID': self.project_id
}
self.queue_path = self.url_prefix + '/queues/fizbit'
self.claims_path = self.queue_path + '/claims'
self.messages_path = self.queue_path + '/messages'
doc = json.dumps({"_ttl": 60})
self.simulate_put(self.queue_path, body=doc, headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
doc = json.dumps({'messages': [{'body': 239, 'ttl': 300}] * 10})
self.simulate_post(self.queue_path + '/messages',
body=doc, headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def tearDown(self):
storage = self.boot.storage._storage
control = self.boot.control
connection = storage.connection
connection.drop_database(control.queues_database)
for db in storage.message_databases:
connection.drop_database(db)
self.simulate_delete(self.queue_path, headers=self.headers)
super(TestClaimsMongoDB, self).tearDown()
@ddt.data('[', '[]', '.', '"fail"')
def test_bad_claim(self, doc):
self.simulate_post(self.claims_path, body=doc, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
href = self._get_a_claim()
self.simulate_patch(href, body=doc, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_exceeded_claim(self):
self.simulate_post(self.claims_path,
body='{"ttl": 100, "grace": 60}',
query_string='limit=21', headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data((-1, -1), (59, 60), (60, 59), (60, 43201), (43201, 60))
def test_unacceptable_ttl_or_grace(self, ttl_grace):
ttl, grace = ttl_grace
self.simulate_post(self.claims_path,
body=json.dumps({'ttl': ttl, 'grace': grace}),
headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 59, 43201)
def test_unacceptable_new_ttl(self, ttl):
href = self._get_a_claim()
self.simulate_patch(href,
body=json.dumps({'ttl': ttl}),
headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_default_ttl_and_grace(self):
self.simulate_post(self.claims_path,
body='{}', headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
body = self.simulate_get(self.srmock.headers_dict['location'],
headers=self.headers)
claim = jsonutils.loads(body[0])
self.assertEqual(self.default_claim_ttl, claim['ttl'])
def _get_a_claim(self):
doc = '{"ttl": 100, "grace": 60}'
self.simulate_post(self.claims_path, body=doc, headers=self.headers)
return self.srmock.headers_dict['Location']
def test_lifecycle(self):
doc = '{"ttl": 100, "grace": 60}'
# First, claim some messages
body = self.simulate_post(self.claims_path, body=doc,
headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
claimed = jsonutils.loads(body[0])['messages']
claim_href = self.srmock.headers_dict['Location']
message_href, params = claimed[0]['href'].split('?')
# No more messages to claim
self.simulate_post(self.claims_path, body=doc,
query_string='limit=3', headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Listing messages, by default, won't include claimed, will echo
body = self.simulate_get(self.messages_path,
headers=self.headers,
query_string="echo=true")
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self._empty_message_list(body)
# Listing messages, by default, won't include claimed, won't echo
body = self.simulate_get(self.messages_path,
headers=self.headers,
query_string="echo=false")
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self._empty_message_list(body)
# List messages, include_claimed, but don't echo
body = self.simulate_get(self.messages_path,
query_string='include_claimed=true'
'&echo=false',
headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self._empty_message_list(body)
# List messages with a different client-id and echo=false.
# Should return some messages
headers = self.headers.copy()
headers["Client-ID"] = uuidutils.generate_uuid()
body = self.simulate_get(self.messages_path,
query_string='include_claimed=true'
'&echo=false',
headers=headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Include claimed messages this time, and echo
body = self.simulate_get(self.messages_path,
query_string='include_claimed=true'
'&echo=true',
headers=self.headers)
listed = jsonutils.loads(body[0])
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertEqual(len(claimed), len(listed['messages']))
now = timeutils.utcnow() + datetime.timedelta(seconds=10)
timeutils_utcnow = 'oslo_utils.timeutils.utcnow'
with mock.patch(timeutils_utcnow) as mock_utcnow:
mock_utcnow.return_value = now
body = self.simulate_get(claim_href, headers=self.headers)
claim = jsonutils.loads(body[0])
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.assertEqual(100, claim['ttl'])
# NOTE(cpp-cabrera): verify that claim age is non-negative
self.assertThat(claim['age'], matchers.GreaterThan(-1))
# Try to delete the message without submitting a claim_id
self.simulate_delete(message_href, headers=self.headers)
self.assertEqual(falcon.HTTP_403, self.srmock.status)
# Delete the message and its associated claim
self.simulate_delete(message_href,
query_string=params, headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Try to get it from the wrong project
headers = {
'Client-ID': uuidutils.generate_uuid(),
'X-Project-ID': 'bogusproject'
}
self.simulate_get(message_href, query_string=params, headers=headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Get the message
self.simulate_get(message_href, query_string=params,
headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Update the claim
new_claim_ttl = '{"ttl": 60, "grace": 60}'
creation = timeutils.utcnow()
self.simulate_patch(claim_href, body=new_claim_ttl,
headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Get the claimed messages (again)
body = self.simulate_get(claim_href, headers=self.headers)
query = timeutils.utcnow()
claim = jsonutils.loads(body[0])
message_href, params = claim['messages'][0]['href'].split('?')
self.assertEqual(60, claim['ttl'])
estimated_age = timeutils.delta_seconds(creation, query)
self.assertGreater(estimated_age, claim['age'])
# Delete the claim
self.simulate_delete(claim['href'], headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Try to delete a message with an invalid claim ID
self.simulate_delete(message_href,
query_string=params, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
# Make sure it wasn't deleted!
self.simulate_get(message_href, query_string=params,
headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Try to get a claim that doesn't exist
self.simulate_get(claim['href'], headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Try to update a claim that doesn't exist
self.simulate_patch(claim['href'], body=doc,
headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_post_claim_nonexistent_queue(self):
path = self.url_prefix + '/queues/nonexistent/claims'
self.simulate_post(path,
body='{"ttl": 100, "grace": 60}',
headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
def test_get_claim_nonexistent_queue(self):
path = self.url_prefix + '/queues/nonexistent/claims/aaabbbba'
self.simulate_get(path, headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# NOTE(cpp-cabrera): regression test against bug #1203842
def test_get_nonexistent_claim_404s(self):
self.simulate_get(self.claims_path + '/a', headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_delete_nonexistent_claim_204s(self):
self.simulate_delete(self.claims_path + '/a',
headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
def test_patch_nonexistent_claim_404s(self):
patch_data = json.dumps({'ttl': 100})
self.simulate_patch(self.claims_path + '/a', body=patch_data,
headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
class TestClaimsFaultyDriver(base.V2BaseFaulty):
config_file = 'wsgi_faulty.conf'
def test_simple(self):
self.project_id = '480924abc_'
self.headers = {
'Client-ID': uuidutils.generate_uuid(),
'X-Project-ID': self.project_id
}
claims_path = self.url_prefix + '/queues/fizbit/claims'
doc = '{"ttl": 100, "grace": 60}'
self.simulate_post(claims_path, body=doc, headers=self.headers)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
self.simulate_get(claims_path + '/nichts', headers=self.headers)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
self.simulate_patch(claims_path + '/nichts', body=doc,
headers=self.headers)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
self.simulate_delete(claims_path + '/foo', headers=self.headers)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
|
"""
Longest Word
Have the function LongestWord(sen) take the sen parameter being passed and
return the largest word in the string. If there are two or more words that are
the same length, return the first word from the string with that length.
Ignore punctuation and assume sen will not be empty.
Examples
Input: "fun&!! time"
Output: time
Input: "I love dogs"
Output: love
"""
import re
def LongestWord(sen):
res = ''
for i in re.split('[^a-z|^A-Z|^0-9]', sen):
if len(i) > len(res):
res = i
return res
if __name__ == '__main__':
input = "fun&!! time"
print(LongestWord(input))
#Output: time
input = "I love dogs"
print(LongestWord(input))
#Output: love
input = "0123456789 123456"
print(LongestWord(input))
#Output: 0123456789
|
import PREFS
if __name__ == "__main__":
print(PREFS.read_prefs_file("folder/theme.prefs", verbose=True))
|
# -*- coding: <UTF-8> -*-
import pickle as picklerick
import lib.main_storage as ms
from ._globals import base, index_base
class PTrieNode:
def __init__(self):
self.key = None
self.value = 0
self.hasNext = False
# 26 pointers to None for a-z, plus 10 for 0-9 plus : and ' '
self.ptrs = [None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None]
def next(self, char):
self.hasNext = True
index = char_to_index(char)
if self.ptrs[index] is None:
self.ptrs[index] = PTrieNode()
return self.ptrs[index]
def set(self, string, value):
self.key = string
self.value = value
def _print(self, level):
print('key=' + str(self.key) + ', value=' + str(self.value) + ', lvl=' + str(level))
for x in self.ptrs:
if x is not None:
x._print(level+1)
def extract(self):
out = []
if self.key is not None:
out.append((self.key, self.value))
for node in self.ptrs:
if node is not None:
out += node.extract()
return out
class PatriciaTrie:
def __init__(self):
self.root = PTrieNode()
self.root.value = 0
@staticmethod
def propagate_to_branches(node_list):
out = []
for node in node_list:
out += node.extract()
return out
def _insert(self, string, value, node=None, start_ind=0):
node = self.root if node is None else node
for i in range(start_ind, len(string)):
if node.key is None:
if node.hasNext:
node = node.next(string[i])
else:
node.set(string, value)
return
else:
if len(node.key) == i:
node = node.next(string[i])
else:
string2 = node.key
value2 = node.value
if string == string2:
print('register with key=' + string2 + ' already exists')
return
else:
node.key, node.value = None, 0
j = 0
for c1, c2 in zip(string, string2):
if c1 == c2:
j += 1
else:
break
if j == len(string2):
string, string2 = string2, string
value, value2 = value2, value
if j == len(string): # in case one string is the prefix of the other
for a in range(i, j):
node = node.next(string[a])
self._insert(string, value, node, j-1)
node = node.next(string2[j])
self._insert(string2, value2, node, j)
else:
for a in range(i, j):
node = node.next(string[a])
self._insert(string, value, node.next(string[j]), j)
self._insert(string2, value2, node.next(string2[j]), j)
return
node.set(string, value) # got to the end of the string, so it belongs to this node
return
def insert(self, movie, position=None, db_filepath='lpmdb.bin', λ=lambda x: x.title):
position = ms.getMoviePositionByID(db_filepath, movie.lpmdb_id) if position is None else position
self._insert(λ(movie), position)
def save(self, filepath, is_filepath=False):
"""saves the patricia trie to the database. It assumes the filepath recieved is actually a field name and
saves the file with a special name derived from it. If is_filepath=True, it doesn't do that conversion"""
if not is_filepath:
filepath += '_ptrie.bin'
with open(index_base+filepath, 'wb') as file:
file.write(picklerick.dumps(self))
@classmethod
def load(cls, field, suffix=False):
"""given a field, converts it to the equivalent filename and opens it"""
extra = '_suf' if suffix else ''
return cls.read(field+extra+'_ptrie.bin')
@staticmethod
def read(filepath):
try:
with open(index_base+filepath, 'rb') as file:
return picklerick.loads(file.read())
except FileNotFoundError:
return None
@staticmethod
def create_patricia_trie(db_filename, λ=lambda mv: mv.title.lower()):
pt = PatriciaTrie()
pt._createPatriciaTrie(db_filename, λ)
return pt
def _createPatriciaTrie(self, db_filename, λ):
with open(base+db_filename, 'rb') as file:
position = file.tell()
movie = ms.readNext(file)
while movie is not None:
self.insert(movie, position, db_filename, λ)
position = file.tell()
movie = ms.readNext(file)
def print(self):
self.root._print(0)
def searchExactMatch(self, string):
node = self.root
for letter in string:
if node.hasNext:
node = node.next(letter)
else:
break
return node.key,node.value if node is not None else None
def prefixSearch(self, string):
node = self.root
for letter in string:
if node is not None and node.hasNext:
node = node.ptrs[char_to_index(letter)]
else:
break
return [node]
def _infixSearch(self, infix, _i, node):
out = []
if node is None:
return []
else:
if _i == len(infix):
return [node]
elif node.hasNext:
if node.key == "caacbo":
print(node.key)
print(_i)
index = char_to_index(infix[_i])
index_str0 = char_to_index(infix[0])
for i, _node in enumerate(node.ptrs):
if _node is not None:
if i == index:
out += self._infixSearch(infix, _i+1, _node)
elif i == index_str0:
out += self._infixSearch(infix, 1, _node)
else:
out += self._infixSearch(infix, 0, _node)
return out
elif infix in node.key:
return [node]
else:
return []
def infixSearch(self, infix):
return self._infixSearch(infix, 0, self.root)
def testFill(self, lista):
x = 0
for s in lista:
ptrie._insert(s, x)
x += 10
def char_to_index(char):
if char >= 'a':
index = ord(char) - ord('a')
elif char >= '0' and char <= '9':
index = ord(char) - ord('0') + 26
elif char == ':':
index = -6
elif char == '.':
index = -5
elif char == ',':
index = -4
elif char == "'":
index = -3
elif char == ' ': # char == ' '
index = -2
else:
index = -1
print('unindentified char: ' + char)
return index
# @ test
# ptrie = PatriciaTrie()
# ptrie.createPatriciaTrie('db.bin')
# x = 0
# lista = ['caca', 'macarroni', 'acbolado', 'acb', 'acd', 'caacbo', 'ccaa']
# lista = ['ar', 'args']
# ptrie.testFill(lista)
# ptrie.print()
# print(ptrie.root.ptrs[char_to_index('a')].ptrs[(char_to_index('g'))].ptrs[(char_to_index('o'))].key)
# print(rt.ptrs[char_to_index('a')].ptrs[char_to_index('a')].ptrs[char_to_index('b')].key)#.ptrs[char_to_index('a')].key)
# ptrie.print()
# print(ptrie.searchExactMatch('agon'))
# print(ptrie.prefixSearch('ab')[0].ptrs[char_to_index('b')].key)
# l = ptrie.infixSearch('cb')
# print(l)
# print(l[0].ptrs[1].key)
# print(l[0].ptrs[3].key)
# print(l[0].key)
# print(l[0].ptrs[char_to_index('o')].key)
# print(l[1].key)
# a = ptrie.genericSearch('wi', ptrie.infixSearch)
# print(len(a)) |
# -*- coding: utf-8 -*-
from classy.base import View
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
class FormView(View):
form = None
redirect = None
def get_form(self, request, **kwargs):
if not self.form:
raise ImproperlyConfigured("'%s' must provide a form." % self.__class__.__name__)
return self.form(**kwargs)
def get(self, request, **kwargs):
form = self.get_form(request)
return {'form':form}
def get_redirect(self, request, form, **kwargs):
if self.redirect:
return HttpResponseRedirect(reverse(self.redirect, kwargs=kwargs))
def post(self, request, **kwargs):
form = self.get_form(request, data=request.POST, files=request.FILES, **kwargs)
if form.is_valid():
self.process_form(request, form, **kwargs)
redirect = self.get_redirect(request, form, **kwargs)
if redirect:
return redirect
return {'form':form}
def process_form(self, request, form, **kwargs):
form.save()
if hasattr(form, 'save_m2m'):
form.save_m2m()
|
import os
import re
import psycopg2
import psycopg2.extras
import redis
from dotenv import load_dotenv
from lua_scripts import lua_1, lua_2
load_dotenv()
class CockroachHandler:
"""
This class implements a handler for the CockroachDB database.
"""
def __init__(self):
"""
Initializes this handler. The initialization uses the DATABASE_URL env variable
"""
try:
# Connect to cluster
self.connection = psycopg2.connect(
os.getenv('DATABASE_URL', ''), cursor_factory=psycopg2.extras.DictCursor)
self.connection.set_session(autocommit=True)
self.cur = self.connection.cursor()
self.r = redis.Redis(host=os.getenv('REDIS_HOST', '155.207.19.237'), port=os.getenv('REDIS_PORT', 6379),
db=os.getenv('REDIS_DB', 0), password=os.getenv('REDIS_PASSWORD', ''))
self.update_running_values = self.r.register_script(lua_1)
# Historical aggregates Lua script
self.update_historical_aggregates = self.r.register_script(lua_2)
except Exception as e:
raise e
def __del__(self):
self.cur.close()
self.connection.close()
self.r.connection_pool.disconnect()
def create_table(self, table_name, column_specs):
"""
Registers a new table at the database
:param table_name: the name of the table
:param column_specs: An array of objects, each containing the column specifications
Example object:
e.g.:{
"name": "name_of_column",
"type": "type_of_column",
"primary_key": "yes"
}
"""
column_declarator = "("
for column in column_specs:
column_declarator += '"' + column["name"] + '" ' + column["type"]
if "primary_key" in column:
column_declarator += " PRIMARY KEY"
column_declarator += ', '
column_declarator = column_declarator[:-2] + ")"
try:
self.cur.execute(
f"CREATE TABLE IF NOT EXISTS {table_name} {column_declarator}")
self.cur.execute(
f"CREATE INDEX IF NOT EXISTS timestamp_index ON {table_name} (cenote$timestamp)")
except Exception as e:
return {"response": 400, "exception": e}
return {"response": 201}
def alter_table(self, table_name, column_specs):
"""
Alters a pre-existing table in the database
:param table_name: the name of the table
:param column_specs: An array of objects, each containing the column specifications
Example object:
e.g.:{
"name": "name_of_column",
"type": "type_of_column",
"primary_key": "yes"
}
"""
try:
for column in column_specs:
column_declarator = column["name"] + ' ' + column["type"]
if "primary_key" in column:
column_declarator += " PRIMARY KEY"
self.cur.execute(
f"ALTER TABLE IF EXISTS {table_name} ADD COLUMN IF NOT EXISTS {column_declarator}")
except Exception as e:
return {"response": 400, "exception": e}
return {"response": 201}
def describe_table(self, table_name):
self.cur.execute(f"SELECT * FROM {table_name} LIMIT 1")
return self.cur.fetchone()
def write_data(self, table_name, data_instance_array):
"""
Writes data into a certain table
:param table_name: the name of the table
:param data_instance_array: array of data_instance
data_instance: An array of objects that contain the values to be inserted in each column
Example object:
e.g.:{
"column": "name_of_column",
"value": "the_value_to_be_inserted",
"built_in_function": "current_timestamp()"
}
The data registration process supports two types:
1) value: Contains the raw value to be inserted into the table
2) built_in_function: Provides the name of the built-in function to be used for generating the value
"""
# Get info from first event only
first_event = data_instance_array[0]
column_list = "("
pattern = re.compile(r'\'')
for value_descriptor in first_event:
column_list += '"' + value_descriptor["column"] + '", '
column_list = column_list[:-2] + ")"
all_values_to_write = []
all_column_names = [value_descriptor["column"]
for value_descriptor in first_event]
redis_fail = None
for data_instance in data_instance_array:
values_list = "("
for column_name in all_column_names:
value_descriptor = [
x for x in data_instance if x["column"] == column_name]
if len(value_descriptor) > 0:
if 'value' in value_descriptor[0]:
if type(value_descriptor[0]["value"]) is str:
values_list += "'" + \
pattern.sub(
"''", str(value_descriptor[0]["value"])) + "'"
else:
values_list += str(value_descriptor[0]["value"])
else:
values_list += value_descriptor[0]["built_in_function"]
else:
values_list += 'NULL'
values_list += ', '
values_list = values_list[:-2] + ")"
all_values_to_write.append(values_list)
redis_fail = None
for vd in data_instance:
if 'value' in vd and not vd["column"].startswith("cenote") and (
type(vd["value"]) is int or type(vd["value"]) is float):
try:
with self.r.pipeline() as pipe:
while True:
try:
pipe.watch(f"{table_name}_{vd['column']}")
self.update_running_values(keys=[f"{table_name}_{vd['column']}"],
args=[
vd['value']],
client=pipe)
pipe.execute()
break
except redis.WatchError:
continue
except Exception as e:
redis_fail = e
# Historical aggregates
if('installations' in table_name):
for vd in data_instance:
if vd["column"] == 'cenote$timestamp':
split = vd['value'].split(':')
date = split[0].split('T')[0]
month = date[:7]
hour = split[0].split('T')[1]
redis_fail = None
for vd in data_instance:
if 'value' in vd and vd["column"] == "active" and (type(vd["value"]) is int or type(vd["value"]) is float):
try:
with self.r.pipeline() as pipe:
while True:
try:
pipe.watch(
f"{table_name}_{vd['column']}_hist")
self.update_historical_aggregates(
keys=[
f"{table_name}_{vd['column']}_hist"],
args=[vd['value'],
date, month, hour],
client=pipe)
pipe.execute()
break
except redis.WatchError:
continue
except Exception as e:
redis_fail = e
query = f"INSERT INTO {table_name} {column_list} VALUES {','.join(map(str, all_values_to_write))}"
try:
self.cur.execute(query)
except Exception as e:
return {"response": 400, "exception": e}
return {"response": 400, "exception": repr(redis_fail)} if redis_fail else {"response": 201}
|
from .wolfram import Wolfram
def setup(bot):
bot.add_cog(Wolfram(bot))
|
import os
import imp
import configparser
from pp_utils import Monitor
class IOPluginManager(object):
plugins=[]
def __init__(self):
self.mon=Monitor()
def init(self,pp_dir,pp_profile,widget,callback,pp_home):
self.pp_dir=pp_dir
self.pp_profile=pp_profile
self.pp_home=pp_home
IOPluginManager.plugins=[]
if os.path.exists(self.pp_profile+os.sep+'pp_io_config'):
# read the .cfg files in /pp_io_config in profile registring the I/O plugin
for cfgfile in os.listdir(self.pp_profile+os.sep+'pp_io_config'):
if cfgfile in ('screen.cfg','osc.cfg'):
continue
cfgfilepath = self.pp_profile+os.sep+'pp_io_config'+os.sep+cfgfile
status,message=self.init_config(cfgfile,cfgfilepath,widget,callback)
if status == 'error':
return status,message
#read .cfg file in /pipresents/pp_io_config if file not present in profile then use this one
for cfgfile in os.listdir(self.pp_dir+os.sep+'pp_io_config'):
if cfgfile in ('screen.cfg','osc.cfg'):
continue
if not os.path.exists(self.pp_profile+os.sep+'pp_io_config'+os.sep+cfgfile):
cfgfilepath=self.pp_dir+os.sep+'pp_io_config'+os.sep+cfgfile
status,message=self.init_config(cfgfile,cfgfilepath,widget,callback)
if status == 'error':
return status,message
# print IOPluginManager.plugins
return 'normal','I/O Plugins registered'
def init_config(self,cfgfile,cfgfilepath,widget,callback):
# print cfgfile,cfgfilepath
reason,message,config=self._read(cfgfile,cfgfilepath)
if reason =='error':
self.mon.err(self,'Failed to read '+cfgfile + ' ' + message)
return 'error','Failed to read '+cfgfile + ' ' + message
if config.has_section('DRIVER') is False:
self.mon.err(self,'No DRIVER section in '+cfgfilepath)
return 'error','No DRIVER section in '+cfgfilepath
entry = dict()
#read information from DRIVER section
entry['title']=config.get('DRIVER','title')
if config.get('DRIVER','enabled')=='yes':
if config.has_option('DRIVER','driver-ref'):
entry['driver-ref']=config.get('DRIVER','driver-ref')
else:
entry['driver-ref']=''
driver_name=config.get('DRIVER','module')
driver_path=self.pp_dir+os.sep+'pp_io_plugins'+os.sep+driver_name+'.py'
if not os.path.exists(driver_path):
self.mon.err(self,driver_name + ' Driver not found in ' + driver_path)
return 'error',driver_name + ' Driver not found in ' + driver_path
instance = self._load_plugin_file(driver_name,self.pp_dir+os.sep+'pp_io_plugins')
reason,message=instance.init(cfgfile,cfgfilepath,widget,self.pp_dir,self.pp_home,self.pp_profile,callback)
if reason=='warn':
self.mon.warn(self,message)
return 'error',message
if reason=='error':
self.mon.warn(self,message)
return 'error',message
entry['instance']=instance
self.mon.log(self,message)
IOPluginManager.plugins.append(entry)
return 'normal','I/O Plugins registered'
def start(self):
for entry in IOPluginManager.plugins:
plugin=entry['instance']
if plugin.is_active() is True:
plugin.start()
def terminate(self):
for entry in IOPluginManager.plugins:
plugin=entry['instance']
if plugin.is_active() is True:
plugin.terminate()
self.mon.log(self,'I/O plugin '+entry['title']+ ' terminated')
def get_input(self,key,driver_ref=''):
for entry in IOPluginManager.plugins:
plugin=entry['instance']
# print ('trying ',entry['title'],plugin.is_active())
if plugin.is_active() is True and driver_ref == entry['driver-ref']:
# need to test found in plugin to allow key to match if driver-ref not used
found,value = plugin.get_input(key)
if found is True:
return found,value
# key not found in any plugin
return False,None
def handle_output_event(self,name,param_type,param_values,req_time):
for entry in IOPluginManager.plugins:
plugin=entry['instance']
# print ('trying ',entry['title'],name,param_type,plugin.is_active())
if plugin.is_active() is True:
# print (name,param_type,param_values,req_time)
reason,message= plugin.handle_output_event(name,param_type,param_values,req_time)
if reason == 'error':
# self.mon.err(self,message)
return 'error',message
else:
self.mon.log(self,message)
return 'normal','output scan complete'
def _load_plugin_file(self, name, driver_dir):
fp, pathname,description = imp.find_module(name,[driver_dir])
module_id = imp.load_module(name,fp,pathname,description)
plugin_class = getattr(module_id,name)
return plugin_class()
def _read(self,filename,filepath):
if os.path.exists(filepath):
config = configparser.ConfigParser(inline_comment_prefixes = (';',))
config.read(filepath)
self.mon.log(self,filename+" read from "+ filepath)
return 'normal',filename+' read',config
else:
return 'error',filename+' not found at: '+filepath,None
|
"""
Selects recurrent neural network based on the name.
Author: Mateusz Malinowski
Email: mmalinow@mpi-inf.mpg.de
"""
from keras.layers.recurrent import GRU
from keras.layers.recurrent import LSTM
from keras.layers.recurrent import SimpleRNN
#from keras.layers.recurrent import JZS1
#from keras.layers.recurrent import JZS2
#from keras.layers.recurrent import JZS3
select = {
'lstm':LSTM,
'gru':GRU,
'simpleRNN':SimpleRNN,
#'mut1':JZS1,
#'mut2':JZS2,
#'mut3':JZS3,
#'jzs1':JZS1,
#'jzs2':JZS2,
#'jzs3':JZS3
}
|
"""A simple example of how to access the Google Analytics API."""
import argparse
import oauth2client
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
import httplib2
from oauth2client import client
from oauth2client import file
from oauth2client import tools
import urllib2
def get_service(api_name, api_version, scope, key_file_location,
service_account_email):
"""Get a service that communicates to a Google API.
Args:
api_name: The name of the api to connect to.
api_version: The api version to connect to.
scope: A list auth scopes to authorize for the application.
key_file_location: The path to a valid service account p12 key file.
service_account_email: The service account email address.
Returns:
A service that is connected to the specified API.
"""
f = open(key_file_location, 'rb')
key = f.read()
f.close()
credentials = ServiceAccountCredentials.from_p12_keyfile(service_account_email,key_file_location, scopes=scope)
http = credentials.authorize(httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
# Insert user email here
users = {'',''}
# Insert list of GA account
account = {'',''}
def link_user(service):
for u in users :
for x in account :
try:
service.management().accountUserLinks().insert(
accountId='%s' % x,
body={
'permissions': {
'local': [
'MANAGE_USERS',
'EDIT',
'READ',
'COLLABORATE',
'ANALYZE'
]},
'userRef': {
'email': '%s'% u }
}
).execute()
print "%s::%s::added" % (u, x)
except TypeError, error:
# Handle errors in constructing a query.
print 'There was an error in constructing your query : %s' % error
except urllib2.HTTPError, error:
# Handle API errors.
print ('There was an API error : %s : %s' %
(error.resp.status, error.resp.reason))
# print "%s::added::user::%s" % (account[0], u)
def main():
scope = ['https://www.googleapis.com/auth/analytics.manage.users']
# service account email and relative location of your key file.
service_account_email = ''
key_file_location = 'C:/users/trenouf/documents/mystuffpython/gap/client_secrets.p12'
# Authenticate and construct service.
service = get_service('analytics', 'v3', scope, key_file_location,
service_account_email)
link_user(service)
if __name__ == '__main__':
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.