query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Defines the observations provided by the environment. May use a subclass of `ArraySpec` that specifies additional properties such as min and max bounds on the values.
def observation_spec(self) -> types.NestedArraySpec:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def observation_spec(self):\n return ArraySpec(shape=(23,), dtype=np.float32)", "def __init__(self, pathspec, properties={}):\n import numpy\n self.pathspec = pathspec\n super(ArraySpec,self).__init__(numpy.ndarray)\n self.properties = OrderedDict(properties)", "def observation_spec(self):\r\n pass", "def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(self.height, self.width, 1), dtype=np.uint8)", "def __init__(self, env):\n lab.ObservationWrapper.__init__(self, env)\n self.width = 80\n self.height = 80\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(self.height, self.width, 1),\n dtype=np.uint8)", "def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n self.observation_space = gym.spaces.Box(\n low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8\n )", "def __init__(self, env):\n\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(self.height, self.width, 1), dtype=np.uint8)", "def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n self.observation_space = gym.spaces.Box(low=0, high=255,\n shape=(self.height, self.width, 1), dtype=np.uint8)", "def __init__(self, env, width=84, height=84):\n super().__init__(env)\n self.width = width\n self.height = height\n self.observation_space = gym.spaces.Box(low=0, high=255, \n shape=(self.height, self.width, 1),\n dtype=env.observation_space.dtype)", "def __init__(self, env, width=84, height=84, grayscale=True):\n gym.ObservationWrapper.__init__(self, env)\n self.width = width\n self.height = height\n self.grayscale = grayscale\n if self.grayscale:\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(self.height, self.width, 1), dtype=np.uint8)\n else:\n self.observation_space = spaces.Box(low=0, high=255,\n shape=(self.height, self.width, 3), dtype=np.uint8)", "def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.height, self.width, 3),\n dtype=env.observation_space.dtype)", "def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.height, self.width, 1),\n dtype=env.observation_space.dtype)", "def __init__(self, env, width=100, height=100, grayscale=False):\r\n gym.ObservationWrapper.__init__(self, env)\r\n self.grayscale = grayscale\r\n if self.grayscale:\r\n self.num_colors = 1\r\n else:\r\n self.num_colors = 3\r\n self.width = width\r\n self.height = height\r\n self.crop = int(height/5)\r\n self.observation_space = Box(low=0, high=255, shape=(self.height-self.crop, self.width, self.num_colors),\r\n dtype=env.observation_space.dtype)", "def __init__(self, dtype, unit='', max_shape=None, min_shape=None,\n ndim=1, max_value=None, min_value=None, depends_on=None):\n if max_shape and min_shape and len(max_shape) != len(min_shape):\n raise ValueError('Shape constraints must have the same length.')\n\n self.dtype = dtype\n self.unit = unit\n self.max_shape = max_shape\n self.min_shape = min_shape\n if max_shape or min_shape:\n self.ndim = len(max_shape or min_shape)\n else:\n self.ndim = ndim\n self.max_value = max_value\n self.min_value = min_value\n if isinstance(depends_on, str):\n self.depends_on = (depends_on,)\n else:\n self.depends_on = depends_on\n if self.depends_on and len(self.depends_on) != self.ndim:\n raise ValueError('Number of independent variables must be equal '\n 'to the number of array dimensions.')", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing_xarray',\n clean_level='clean')\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n self.long_bins = [0., 360., 24]\n self.mlt_bins = [0., 24., 24]\n self.auto_bin = True\n\n return", "def observation_spec(self) -> Dict[str, Any]:", "def __init__(self):\n super(INumpyArrayMetric, self).__init__()\n self.metric = 'INumpyArrayMetric'\n self.ground_truth = None # np.ndarray\n self.segmentation = None # np.ndarray", "def _default_specs(self):\n # Spectrometer specs\n self.model = \"Flame-S\" # Spectrometer model\n self.fov = None # Field of view fo spectrometer\n self.ILS = None # Number array holding instrument line shape (possibly don't hold this here?)\n self.pix_num = 2048 # Number of pixels\n self.bit_depth = 16 # Bit depth of spectrometer detector\n\n # File information\n self.file_ext = '.npy' # Spectra saved as numpy array\n self.file_ss = '{}ss' # Shutter speed format spec\n self.file_spec_type = {'meas': 'Plume', 'dark': 'Dark', 'cal': 'ppmm', 'clear': 'Clear'}\n self.file_datestr = \"%Y-%m-%dT%H%M%S\" # Date/time format spec in filename\n\n\n\n # Acquisition settings\n self.start_int_time = 100 # Starting integration time\n self.start_coadd = 5 # Number of spectra to coadd\n self.framerate = 1 # Framerate of acquisitions (Hz)\n self.wavelengths = None # Wavelengths (nm)\n self.spectrum = None # Spectrum\n self.spectrum_filename = None # Filename for spectrum\n\n self.auto_int = True # Bool for requesting automated integration time adjustment\n self.min_saturation = 0.5 # Minimum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.max_saturation = 0.9 # Maximum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.saturation_range = [320, 330] # Range of wavelengths used in checking integration time\n\n # Predefined list of integration times for automatic exposure adjustment\n self.int_list = np.concatenate((np.arange(1, 10, 1),\n np.arange(10, 50, 5),\n np.arange(50, 100, 10),\n np.arange(100, 500, 50),\n np.arange(500, 1000, 100),\n np.arange(10 ** 3, 10 ** 4, 500),\n np.array([10 ** 4])))", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)\n self._skip = skip", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)\n self._skip = skip", "def __init__(self, env, color=False):\r\n ObservationWrapper.__init__(self, env)\r\n\r\n self.color = color\r\n self.img_size = (1, 64, 64)\r\n self.observation_space = Box(0.0, 1.0, (1, 64, 64))", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing_xarray',\n clean_level='clean')\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n return", "def state_array_spec(self) -> Dict[str, Any]:", "def autoset_numerical_parameters():\n testenv = env(\n Ndim=N_DIMS,\n lambda_over_dx=LAMBDA_OVER_DX,\n R_dt=R_DT,\n norm_Poisson=NORM_POISSON,\n Ngrid=N_GRID,\n Nhits=N_HITS,\n dummy=True,\n )\n if STOP_t is None:\n if N_DIMS == 1:\n stop_t = int(round(4 * testenv.N))\n else:\n if testenv.mu0_Poisson < 1e-3:\n stop_t = 10 * testenv.N ** N_DIMS\n elif testenv.mu0_Poisson < 1:\n stop_t = int(round(5 * 10 ** N_DIMS * LAMBDA_OVER_DX / np.sqrt(testenv.mu0_Poisson)))\n else:\n stop_t = int(round(5 * 10 ** N_DIMS * LAMBDA_OVER_DX))\n else:\n stop_t = STOP_t\n\n if N_RUNS is None:\n # predefined for REL_TOL = 0.01\n if N_DIMS == 1:\n Nruns = 16000\n elif N_DIMS == 2:\n Nruns = 6400\n elif N_DIMS == 3:\n Nruns = 25600\n elif N_DIMS == 4:\n Nruns = 102400\n else:\n raise Exception(\"Nruns not pre-defined for N_DIMS > 4\")\n Nruns = int(Nruns * (0.01 / REL_TOL) ** 2)\n else:\n Nruns = N_RUNS\n\n if MAX_N_RUNS is None:\n max_Nruns = MAX_N_RUNS\n else:\n max_Nruns = 10 * Nruns\n\n if ADAPTIVE_N_RUNS or WITH_MPI:\n Nruns = int(N_PARALLEL * (np.ceil(Nruns / N_PARALLEL))) # make it multiple of N_PARALLEL\n max_Nruns = int(N_PARALLEL * (np.ceil(max_Nruns / N_PARALLEL))) # make it multiple of N_PARALLEL\n\n return testenv.N, testenv.Nhits, stop_t, Nruns, max_Nruns, testenv.mu0_Poisson", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)\n self._skip = skip", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype='uint8')\n self._skip = skip", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,) + env.observation_space.shape,\n dtype=np.uint8)\n self._skip = skip", "def __init__(self, env, skip=4):\r\n gym.Wrapper.__init__(self, env)\r\n # most recent raw observations (for max pooling across time steps)\r\n self._obs_buffer = np.zeros(\r\n (2,) + env.observation_space.shape, dtype=np.uint8)\r\n self._skip = skip", "def __init__(self, data_array):\n self._data_array = data_array\n self._units = self._data_array.attrs.get('units', 'dimensionless')", "def __init__(self, env):\n self.env = env\n # set up observation space\n high = np.inf\n low = -high\n\n obs_spec = env.observation_spec()\n\n space_spec = {}\n\n for k,v in obs_spec.items():\n space_spec[k]=spaces.Box(low=low,high=high, shape=v)\n\n\n self.observation_space = spaces.Dict(space_spec)\n\n # setup action space\n low, high = self.env.action_spec\n self.action_space = spaces.Box(low=low, high=high)\n\n self.reward_range = self.env.reward_range", "def __init__(self):\n self.temperature = np.array([])\n self.salinity = np.array([])\n self.date = np.array([])\n self.temperatureQF = np.array([])\n self.salinityQF = np.array([])", "def __init__(self):\n super(ExponentialInputs, self).__init__()\n self.init_pop_size = pd.Series([], dtype=\"float\")\n self.growth_rate = pd.Series([], dtype=\"float\")\n self.time_steps = pd.Series([], dtype=\"float\")", "def set_derived_configs(self):\n if 'dim' in self.config and self.config['dim'] <= 0:\n self.config['dim'] = self.descriptors['input']['dim']", "def __init__(self, dtype, unit='', max_value=None, min_value=None):\n self.dtype = dtype\n self.unit = unit\n self.max_value = max_value\n self.min_value = min_value", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing2D_xarray',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 2, 1))\n self.dname = 'variable_profiles'\n self.test_val_length = 15\n\n return", "def __init__(self):\n self.state_dim = 12\n self.measurement_dim = 6", "def observation_space(self):\n raise NotImplementedError", "def setUp(self):\n self.dimension = 5\n self.array = np.asarray([0, 0, 0, 0, 0])\n self.array2 = np.asarray([1, 1, 1, 1, 1])\n self.array3 = np.asarray([420.968746, 420.968746, 420.968746, 420.968746, 420.968746])\n self.array4 = np.asarray([-2.903534, -2.903534])\n self.array5 = np.asarray([-0.5, -0.5, -0.5, -0.5, -0.5])\n self.array6 = np.asarray([-1, -1, -1, -1, -1])\n self.array7 = np.asarray([2, 2, 2, 2, 2])\n self.array8 = np.asarray(\n [7.9170526982459462172, 7.9170526982459462172, 7.9170526982459462172, 7.9170526982459462172,\n 7.9170526982459462172])\n self.array9 = np.asarray([-5.12, -5.12, -5.12, -5.12, -5.12])\n self.array10 = np.asarray([1, 2, 3, 4, 5])", "def __init__(self, values=np.r_[[]], coords=[], dims=[], attrs={}, procList=[]):\n super().__init__(values, dims, coords, attrs)\n self.version = version\n self.proc_attrs = []\n self.max_print_attrs = 5\n self.print_values = False", "def observation_space(self):\n pass", "def __init__(self, env, skip=4, blend=4):\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((blend,) + env.observation_space.shape, dtype=env.observation_space.dtype)\n self._skip = skip\n self._blend = blend", "def __init__(self, low, high):\n\n\t\tself.__low = np.array(low).reshape(1, -1)\n\t\tself.__high = np.array(high).reshape(1, -1)\n\t\tself.__ndims = self.__low.size\n\t\tself.__range = self.__high - self.__low", "def __init__(self):\n self.configuration = None\n self.initial_guess = self.default_initial_guess.copy()\n self.bounds = self.default_bounds.copy()\n self.fit_for = None\n self.has_converged = False\n self.data_unit = units.Unit(\"count\")\n self.use_points = 0\n self.uniform_weights = False\n self.el_range = Range()\n self.parameters = None\n self.errors = None\n self.rms = np.nan\n self.fitted_values = None\n self.elevation = None\n self.data = None\n self.sigma = None\n self.p_opt = None\n self.p_cov = None", "def _init_special_vars(self, T_start=None, T_end=None):\n self.min_energy = np.min(self.event_list_T[1][T_start:T_end])\n self.max_energy = np.max(self.event_list_T[1][T_start:T_end])\n self.min_time = np.min(self.event_list_T[0][T_start:T_end])\n self.max_time = np.max(self.event_list_T[0][T_start:T_end])", "def __init__(self, synthetic_spectrum):\n self.synth_spec = synthetic_spectrum\n self._buildArray()", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def _init_obs_space(self):\n\n # Get observation shape\n example_obs = self._process_img(self.ds_dict['initial'][0][0])\n obs_shape = np.shape(example_obs)\n\n # Get range\n if self.scale_observation:\n obs_low = 0\n obs_high = 1\n else:\n obs_low = 0\n obs_high = 255\n\n # Construct space\n obs_space = gym.spaces.Box(\n low=obs_low,\n high=obs_high,\n shape=obs_shape,\n dtype=np.float32\n )\n\n return obs_space", "def __init__(self, valuesArray):\n\n self.valuesArray = np.array(valuesArray).flatten()\n\n self.min = self.valuesArray.min()\n self.max = self.valuesArray.max()\n\n self.mean = self.valuesArray.mean()\n self.std = self.valuesArray.std()", "def buildObservationSpace(self):\n self.observation_space = Dict({\n \"system_action\": Discrete(len(self.AGENT_TYPES)), \n \"user_action\": Discrete(len(self.USER_TYPES)), \n \"function_specified\": Discrete(2),\n \"dont_know\": Discrete(2),\n # \"command_ignored\": Discrete(2),\n \"turns\": Discrete(self.MAX_TURNS+1),\n \"results\": Box(low=np.zeros(self.dataset.getDatabaseSize()), high=np.ones(self.dataset.getDatabaseSize())),\n })\n self.observation_space.shape = (flatdim(self.observation_space),)", "def setup(self):\n if self.minimizer == \"shgo\":\n self._maxiter = 100\n else:\n self._maxiter = 1000\n if self.value_ranges is None or np.any(np.isinf(self.value_ranges)):\n raise MissingBoundsError(\n \"SciPy GO requires finite bounds on all parameters\")", "def __init__(self, xList:list, constant=0, etype='main'):\n self.dimensions = len(xList)\n self.variables = np.array(xList)\n self.constant = constant\n self.type = etype", "def __init__(\n self,\n env_obs_shape: List[int],\n multitask_cfg: ConfigType,\n *args,\n **kwargs,\n ):\n super().__init__()\n self.multitask_cfg = multitask_cfg", "def __init__(self, allowable_min, allowable_max):\n # Will call reset() and set the _found variables to be the right size\n super(StatsTrackerArray, self).__init__(allowable_min, allowable_max)", "def __init__(self, env, skip=4):\n gym.Wrapper.__init__(self, env)\n \n # create the buffer of two frame sizes\n self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype=np.uint8)\n self._skip = skip", "def __init__(self):\n self.eps = 1e-5\n self.use_global_stats = True\n self.workspace = 512\n self.units = (3, 4, 23, 3) # use for 101\n self.filter_list = [256, 512, 1024, 2048]", "def _setup(self):\n numerator = np.arange(1, MAX_NUMERATOR, dtype=float)\n denominator = np.arange(1, MAX_DENOMINATOR, dtype=float)\n outer = np.outer(numerator, 1/denominator)\n self.ratios = np.unique(outer[outer!=1])\n\n self.known_periods, self.known_dms, self.known_ras, self.known_decls = \\\n np.loadtxt(KNOWNPSR_FILENM, usecols=(0,1,2,3), unpack=True)", "def __init__(self, rows, cols, default_val=0):\n self.num_rows = rows\n self.num_cols = cols\n\n # Initialize the 2-dimensional array\n self.rows = [[default_val] * cols for _ in xrange(rows)]", "def interpret_parameters(self) :\n\n if hasattr(self,'exposure_schedule') and self.exposure_schedule is not None :\n if isinstance(self.exposure_schedule,float) :\n self.exposure_schedule = [np.repeat(self.exposure_schedule,24)]\n\n elif isinstance(self.exposure_schedule,int) :\n temp = self.exposure_schedule\n self.exposure_schedule = [np.zeros(24)]\n self.exposure_schedule[0][temp] = 1\n\n elif isinstance(self.exposure_schedule,dict) :\n temp = self.exposure_schedule\n self.exposure_schedule = [np.zeros(24)]\n for x in temp.items() :\n self.exposure_schedule[0][int(x[0])] = x[1] \n\n elif isinstance(self.exposure_schedule,np.ndarray) :\n if len(np.shape(self.exposure_schedule)) == 1 and np.shape(self.exposure_schedule)[0] == 24 :\n self.exposure_schedule = [self.exposure_schedule]\n elif len(np.shape(self.exposure_schedule)) == 2 and np.shape(self.exposure_schedule)[1] == 24 :\n # split an array of multiple schedules into a list of single schedule arrays\n self.exposure_schedule = np.split(self.exposure_schedule,np.shape(self.exposure_schedule)[0])\n else :\n raise ValueError(\"Exposure schedule not a comprehensible numpy array, \" +\n \"must be length 24 in first or second dimension\")\n\n elif isinstance(self.exposure_schedule,list) :\n if len(self.exposure_schedule) == 24 and all(isinstance(x,(int,float)) for x in self.exposure_schedule) :\n self.exposure_schedule = [np.array(self.exposure_schedule)]\n \n for i in range(len(self.exposure_schedule)) :\n if isinstance(self.exposure_schedule[i],float) :\n self.exposure_schedule[i] = np.repeat(self.exposure_schedule[i],24)\n\n elif isinstance(self.exposure_schedule[i],int) :\n temp = self.exposure_schedule[i]\n self.exposure_schedule[i] = np.zeros(24)\n self.exposure_schedule[i][temp] = 1\n\n elif isinstance(self.exposure_schedule[i],dict) :\n temp = self.exposure_schedule[i]\n self.exposure_schedule[i] = np.zeros(24)\n for x in temp.items() :\n self.exposure_schedule[i][int(x[0])] = x[1] \n\n elif isinstance(self.exposure_schedule[i],np.ndarray) :\n if not (len(np.shape(self.exposure_schedule[i])) == 1 \n and np.shape(self.exposure_schedule[i])[0] == 24 ):\n raise ValueError(\"Exposure schedule list contains an incomprehensible entry, \" + \n \"a numpy array that is not length 24\")\n \n elif isinstance(self.exposure_schedule[i],list) :\n if len(self.exposure_schedule[i]) == 24 :\n self.exposure_schedule[i] = np.array(self.exposure_schedule[i])\n else :\n raise ValueError(\"Exposure schedule list contains an incomprehensible entry, \" + \n \"a list that is not length 24\")\n \n else :\n raise TypeError(\"Exposure schedule list contains an incomprehensible entry\")\n\n else :\n raise TypeError(\"Exposure schedule must be a list of length-24 numpy arrays or similar\")\n ###################################################################################################### \n if hasattr(self,'year_selection') and self.year_selection is not None :\n if isinstance(self.year_selection,int) :\n if self.year_selection==0:\n self.year_selection = [np.array([x]) for x in self.dataset_years]\n else:\n self.year_selection = [np.array([self.year_selection])]\n elif isinstance(self.year_selection,np.ndarray) :\n if len(np.shape(self.year_selection)) == 1 :\n self.year_selection = [self.year_selection]\n else :\n raise ValueError(\"Year selection should be a list of numpy arrays, \" +\n \"provided numpy array has incomprehensible shape\")\n elif isinstance(self.year_selection,list) :\n if all([isinstance(x,int) for x in self.year_selection]) and all(x!=0 for x in self.year_selection) :\n self.year_selection = [np.array(self.year_selection)]\n else :\n i=0\n for k in range(len(self.year_selection)) :\n if isinstance(self.year_selection[i],int) :\n if self.year_selection[i] == 0 :\n temp = self.year_selection[0:i] + [np.array([x]) for x in self.dataset_years]\n if i != len(self.year_selection)-1 : \n temp = temp + self.year_selection[i+1:]\n self.year_selection = temp\n i = i + len(self.dataset_years) - 1\n else :\n self.year_selection[i] = np.array([self.year_selection[i]])\n elif isinstance(self.year_selection[i],list) :\n self.year_selection[i] = np.array(self.year_selection[i])\n elif not isinstance(self.year_selection[i],np.ndarray) :\n raise TypeError(\"Year selection list must contain ints, lists, or numpy arrays\")\n i=i+1\n else :\n raise TypeError(\"Year selection must be an int, numpy array, or list of numpy arrays\")\n\n for i in range(len(self.year_selection)) :\n if all(self.year_selection[i] == 0) :\n self.year_selection[i] = np.array(self.dataset_years)\n #####################################################################################################\n if hasattr(self,'units') and self.units is not None :\n if isinstance(self.units,str) :\n self.units = [self.units]\n elif isinstance(self.units,list) :\n if not all(isinstance(x,str) for x in self.units) :\n raise TypeError(\"Units input must be a list of strings\")\n else :\n raise TypeError(\"Units input must be a list of strings\")\n\n for i in range(len(self.units)) :\n if not isinstance(self.units[i],str) :\n raise TypeError(\"Units input must be a list of strings\")\n if self.units[i] not in [\"SED\",\"UVIh\",\"UVI\",\"J m-2\",\"W m-2\",\"mW m-2\"] :\n raise ValueError(\"Units input must be list of accepted unit strings, \" +\n \"those being SED, UVIh, J m-2, UVI, W m-2, or mW m-2\")\n\n\n if hasattr(self,'bin_width') :\n if self.bin_width is None :\n self.bin_width = []\n for unit in self.units :\n self.bin_width.append({\n \"SED\" : 0.1, \n \"J m-2\" : 10, \n \"UVI\" : 0.1, \n \"W m-2\" : 0.0025, \n \"mW m-2\" : 2.5\n }[unit])\n elif isinstance(self.bin_width,(int,float)) :\n self.bin_width = [self.bin_width]\n\n\n return self", "def test_30_test_init_array(self, persons_gi):\n example = Example(groups=7, origins=5)\n\n example.init_array('param_g', 7)\n assert example.param_g.shape == (7, )", "def __init__(self, area_extents: np.ndarray):\n self.area_extents = area_extents", "def setUp(self):\n shape = RNG.integers(5, 50)\n periods = self.periods = RNG.normal() * 3\n freq = periods / shape\n amp = RNG.normal()\n offset = RNG.normal()\n phase = (RNG.normal() - 1 / 2) * 5 / 3 * np.pi\n p_gt = self.p_gt = (amp, freq, phase, offset)\n x = self.x = np.arange(shape)\n self.data = sine(x, *p_gt)", "def __init__(self, xRange, yData):\n\n self.xRange = np.array(xRange)\n self.yData = np.array(yData)", "def setup(self):\n self.testInst = pysat.Instrument('pysat', 'testing2D_xarray',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 2, 1))\n self.dname = 'images'\n self.test_val_length = 17\n\n return", "def __init__(self, env, padded_shape, center=True):\n super(ObservationPadEnv, self).__init__(env)\n self._padded_shape = padded_shape\n self._center = center\n old_space = self.observation_space\n self.observation_space = gym.spaces.Box(low=self.observation(old_space.low),\n high=self.observation(old_space.high),\n dtype=old_space.dtype)", "def __init__(self, variables, dims):\n self._variables = variables\n self._dims = dims", "def set_bounds_atom(self,bounds):\n assert bounds.shape == (2,self.Phi.d)\n self.bounds = bounds # data bounds\n self.bounds_atom = bounds.T.tolist()\n for i in range(self.Phi.d): # bounds for the variance in each dimension\n max_variance_this_dimension = (bounds[1][i]-bounds[0][i])**2\n self.bounds_atom.append([self.variance_relative_lowerbound*max_variance_this_dimension,\n self.variance_relative_upperbound*max_variance_this_dimension])", "def _default_specs(self):\n # Spectrometer specs\n self.model = \"Flame-S\" # Spectrometer model\n self.fov = 1 # Field of view fo spectrometer (radius of FOV)\n self.ILS = None # Number array holding instrument line shape (possibly don't hold this here?)\n self.fiber_diameter = 1e-3 # Diameter of optical fiber\n self.pix_num = 2048 # Number of pixels\n self.bit_depth = 16 # Bit depth of spectrometer detector\n\n # File information\n self.file_ext = '.npy' # Spectra saved as numpy array\n self.file_ss = '{}ss' # Shutter speed format spec\n self.file_ss_loc = 1 # Shutter speed location in filename\n self.file_spec_type = {'meas': 'Plume', 'dark': 'Dark', 'cal': 'ppmm', 'clear': 'Clear'}\n self.file_datestr = \"%Y-%m-%dT%H%M%S\" # Date/time format spec in filename\n self.file_datestr_loc = 0\n self.plume_params_file = 'plume_params.txt'\n self.plume_speed_id = 'plume_speed='\n self.plume_dist_id = 'plume_distance='\n\n # File which flags that a scan is complete. The file will be empty, just its presence is required\n self.scan_complete = 'complete.txt'\n\n # Acquisition settings\n self.start_int_time = 100 # Starting integration time\n self.start_coadd = 1 # Number of spectra to coadd\n self.framerate = 1 # Framerate of acquisitions (Hz)\n self.wavelengths = None # Wavelengths (nm)\n self.spectrum = None # Spectrum\n self.spectrum_filename = None # Filename for spectrum\n\n self.auto_int = True # Bool for requesting automated integration time adjustment\n self.min_saturation = 0.6 # Minimum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.max_saturation = 0.9 # Maximum saturation accepted before adjusting shutter speed (if auto_ss is True)\n self.saturation_range = [300, 335] # Range of wavelengths used in checking integration time\n self.saturation_pixels = 2 # Number of pixels to check\n\n # Predefined list of integration times for automatic exposure adjustment\n self.int_list = np.concatenate((np.arange(0.1, 0.5, 0.05),\n np.arange(0.5, 1, 0.1),\n np.arange(1, 5, 0.5),\n np.arange(5, 10, 1),\n np.arange(10, 50, 5),\n np.arange(50, 100, 10),\n np.arange(100, 500, 50),\n np.arange(500, 1000, 100),\n np.arange(10 ** 3, 10 ** 4, 500),\n np.array([10 ** 4])))", "def observation(self):\n return {\n name: np.asarray(\n self._env.observation(name), self._observation_spec[name].dtype)\n for name in self._obs_names\n }", "def set_parameters_kernel(self):\n\n prior_parameters_values = self.get_values_parameters_from_data(\n self.kernel_values, self.mean_value, self.var_noise_value, self.type_kernel,\n self.dimensions, **self.additional_kernel_parameters)\n\n if not self.noise or self.data.get('var_noise') is not None:\n self.mean_value = [0.0]\n self.var_noise_value = [0.0]\n\n parameters_priors = prior_parameters_values['kernel_values']\n\n parameters_priors = parameters_kernel_from_list_to_dict(parameters_priors, self.type_kernel,\n self.dimensions)\n\n if self.kernel_values is None:\n self.kernel_values = list(\n get_default_values_kernel(self.type_kernel, self.dimensions, **parameters_priors))\n\n if self.mean_value is None:\n self.mean_value = list(prior_parameters_values['mean_value'])\n\n if self.var_noise_value is None:\n self.var_noise_value = list(prior_parameters_values['var_noise_value'])\n\n if self.noise and self.data.get('var_noise') is None:\n self.mean = ParameterEntity(\n MEAN_NAME, np.array(self.mean_value), GaussianPrior(1, self.mean_value[0], 1.0))\n else:\n self.mean = ParameterEntity(\n MEAN_NAME, np.array([0.0]), Constant(1, 0.0))\n\n if self.noise and self.data.get('var_noise') is None:\n self.var_noise = ParameterEntity(\n VAR_NOISE_NAME, np.array(self.var_noise_value),\n NonNegativePrior(1, HorseShoePrior(1, self.var_noise_value[0])),\n bounds=[(SMALLEST_POSITIVE_NUMBER, None)])\n else:\n self.var_noise = ParameterEntity(\n VAR_NOISE_NAME, np.array([0.0]),\n Constant(1, 0.0),\n bounds=[(0.0, 0.0)])\n\n self.kernel = get_kernel_default(self.type_kernel, self.dimensions, self.bounds,\n np.array(self.kernel_values), parameters_priors,\n **self.additional_kernel_parameters)\n\n self.dimension_parameters = self.kernel.dimension_parameters + 2\n\n if self.type_kernel[0] == PRODUCT_KERNELS_SEPARABLE:\n self.kernel_dimensions = [self.kernel.dimension]\n if len(self.type_kernel) > 1:\n for name in self.kernel.names:\n self.kernel_dimensions.append(self.kernel.kernels[name].dimension)\n\n # I think that this is only useful for the product of kernels.\n self.number_parameters = [get_number_parameters_kernel(\n self.type_kernel, self.dimensions, **self.additional_kernel_parameters)]\n if len(self.dimensions) > 1:\n for type_k, dim in zip(self.type_kernel[1:], self.dimensions[1:]):\n self.number_parameters.append(\n get_number_parameters_kernel([type_k], [dim],\n **self.additional_kernel_parameters))\n\n self.length_scale_indexes = self.get_indexes_length_scale()", "def __init__(self, dimensions, lower=0.0, upper=1.0, name=None):\n super().__init__((dimensions,), name=name)\n self.dimensions = dimensions\n self.lower = lower\n self.upper = upper", "def __init__(self, xs, ys, gauge_length, sample_width, sample_thickness, name=None):\n assert len(xs) == len(ys)\n\n self.xs = np.array(xs)\n self.ys = np.array(ys)\n self.gauge_length = gauge_length # m\n self.sample_width = sample_width # m\n self.sample_thickness = sample_thickness # m\n self.name = name", "def discount_spec(self) -> types.NestedArraySpec:\n return array_spec.BoundedArraySpec(\n shape=(), dtype=np.float32, minimum=0.0, maximum=1.0, name='discount'\n )", "def __init__(\n self,\n env_obs_shape: List[int],\n multitask_cfg: ConfigType,\n feature_dim: int,\n # num_layers: int = 2,\n # num_filters: int = 32,\n ):\n super().__init__(env_obs_shape=env_obs_shape, multitask_cfg=multitask_cfg)\n\n assert len(env_obs_shape) == 1\n # assert num_layers == 0\n # assert num_filters == 0\n # self.feature_dim = obs_shape[0]\n self.feature_dim = feature_dim", "def setup(self):\n\n self.testInst = pysat.Instrument('pysat', 'testing2D_xarray',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 2, 1))\n self.dname = 'profiles'\n self.test_val_length = 15\n\n return", "def __init__(self):\n\n self.check_nans = False\n self.debug_force_memmap = False\n\n # Implementations must initialise the dtype so that feature arrays can be created with correct type:\n self.dtype = None", "def configure(self,\n instrument=None,\n obs_duration=None,\n lst=None):\n obs_plan = {}\n # subarray specific setup options\n if instrument is not None and len(instrument) > 0:\n obs_plan[\"instrument\"] = instrument\n # set observation duration if specified\n if obs_duration is not None:\n obs_plan[\"durations\"] = {\"obs_duration\": obs_duration}\n # LST times only HH:MM in OPT\n start_lst = Observatory().start_obs(self.target_list, str_flag=True)\n start_lst = \":\".join(start_lst.split(\":\")[:-1])\n end_lst = Observatory().end_obs(self.target_list, str_flag=True)\n end_lst = \":\".join(end_lst.split(\":\")[:-1])\n if lst is None:\n lst = \"{}-{}\".format(start_lst, end_lst)\n # observational setup\n obs_plan[\"observation_loop\"] = [{\"lst\": lst,\n \"target_list\": self.target_list}]\n self.configuration = obs_plan\n return obs_plan", "def get_training_array(self, window_size: str = None):\n self.window_size = window_size or self.window_size\n y_index = {\n 'A': 0, \n 'C': 1,\n 'G': 2, \n 'T': 3,\n '0/1': 4, '1/0': 4, # heterozygous\n '1/1': 5, # homozygous\n '0/0': 6, # non-variant :: assigned where alignments are not found to be variants. Need to finish populating with bam file.\n # 7 :: complex/non-snp :: assigned to be a variant that is an indel, but not an SNP\n }\n y = [0, 0, 0, 0, 0, 0, 0, 0] # ['A', 'C', 'T', 'G', hom-ref, het, hom-alt, complex-dump]\n Y = {}\n X_initial = []\n Y_initial = []\n position_array = []\n left_offset = math.floor(self.window_size / 2)\n right_offset = math.ceil(self.window_size / 2)\n if not self.window_size % 2: print('shit man, the window needs to be odd; needs to have a middle position')\n if self.bed_file:\n focus_regions = pd.read_csv(self.bed_file, delimiter='\\t', header=None)[[1, 2]].apply(tuple, axis=1).tolist()\n focus_regions = pd.arrays.IntervalArray.from_tuples(focus_regions, closed='both')\n count = 0\n too_complex = set()\n self.variants_called = set()\n if self.vcf_file:\n vcf = pd.read_vcf(self.vcf_file) # Should only have one sample\n if len(vcf.columns) > 10:\n exit(f'ERROR :: VCF file has too many samples')\n vpos = -float('inf')\n for row in vcf.itertuples():\n # if not self.variant_calls.get(row.POS):\n # continue\n if self.bed_file: \n if not any(focus_regions.contains(row.POS-1)): # bed file 0-index\n count += 1\n continue\n y_vec = y[:] # ['A', 'C', 'T', 'G', het, hom, non, complex]\n # get genotype call. default to non-variant\n genotype = row[-1]['GT'].replace('|', '/')\n genotype_index = y_index.get(genotype)\n if not genotype_index:\n continue\n try:\n # HETEROZYGOUS\n if genotype_index == 4:\n y_vec[y_index[row.REF[0]]] = .5\n y_vec[y_index[row.ALT[0]]] = .5\n # y_vec[y_index[row.REF[0]]] = 1\n # y_vec[y_index[row.ALT[0]]] = 1\n # HOMOZYGOUS\n elif genotype_index == 5:\n y_vec[y_index[row.ALT[0]]] = 1\n # y_vec[y_index[row.ALT[0]]] = 1\n # NON-VARIANT\n elif genotype_index == 6:\n y_vec[y_index[row.REF[0]]] = 1\n # y_vec[y_index[row.REF[0]]] = 1\n # COMPLEX\n elif genotype_index == 7:\n # todo: this shouldnt be always in favor of alt\n y_vec[y_index[row.ALT[0]]] = 1 # todo: maybe take avgs if this messes with the output\n # makes sure we get the proper het base call before changing the gt to complex.\n if len(row.REF) > 1 or len(row.ALT) > 1:\n genotype_index = 7\n except:\n # TODO: iupac not supported yet, too much of a slow down.\n continue\n # if abs(row.POS - vpos) < self.minimum_variant_radius:\n # genotype_index = 7\n # try:\n # Y[vpos][4] = 0\n # Y[vpos][5] = 0\n # Y[vpos][6] = 0\n # Y[vpos][7] = 1\n # except:\n # pass\n # if len(row.REF) > 5 or len(row.ALT) > 5:\n # too_complex.add(row.POS)\n # vpos = row.POS\n # continue\n vpos = row.POS\n y_vec[genotype_index] = 1\n Y[row.POS] = y_vec \n self.variants_called.add(row.POS)\n count = 0\n for position in sorted(Y):\n if self.bed_file: \n if not any(focus_regions.contains(position)): # bed file 0-index\n count += 1\n continue\n tp = position - self.contig_start - 1\n if tp < 0: # calls before contig :: incase a bed file was used \n continue\n tensor_stack = np.stack([tensor[tp-left_offset:tp+right_offset] for tensor in self.tensors], axis=2)\n if tensor_stack.size == 0: # calls after contig :: incase a bed file was used\n break \n position_array.append(position)\n X_initial.append(tensor_stack)\n Y_initial.append(Y[position])\n # print('vc skipped', count)\n # false_positives = sorted(set(self.variant_calls) - (set(Y) | too_complex))\n # self.false_positives = false_positives\n # ref_seq_seg = self.ref_seq[self.contig_start-1:self.contig_end]\n # print('false-p', len(false_positives))\n # for position in false_positives[:]:\n else:\n outside, size_catch, fp, amb_base,total=0,0,0,0,0\n for position in sorted(set(self.variant_calls) - self.variants_called):\n total+=1\n p = position - self.contig_start - 1 # numpy array 0-index\n if self.bed_file: \n if not any(focus_regions.contains(position)): # bed file 0-index \n outside+=1\n continue\n y = [0, 0, 0, 0, 0, 0, 1, 0]\n # TODO\n # base_position = y_index.get(self.variant_calls[position]['ref_base'])\n base_position = y_index.get(str(self.ref_seq[position-1])) # bypthon 0-index\n # p = position + self.contig_start\n if base_position != None:\n if p - left_offset < 0: # TODO: circularize if for plasmids\n print('wall hit!')\n continue\n tensor_stack = np.stack([tensor[p-left_offset:p+right_offset] for tensor in self.tensors], axis=2)\n vec = np.transpose(tensor_stack, axes=(0,2,1))\n # if sum(vec[7,:,0]) < 5:\n # size_catch+=1\n # continue\n if tensor_stack.size == 0:\n print(position, 'WARNING ::: contig past end; this should not happen!')\n break\n y[base_position] = 1\n fp+=1\n position_array.append(position)\n Y_initial.append(y) # like this incase we want to modify the base \n X_initial.append(tensor_stack)\n else:\n amb_base += 1\n # print(position, base_position, str(self.ref_seq[position-1]))\n # print('ambygous base catches:', amb_base)\n # print('bed catches:', outside)\n # print('size catches', size_catch)\n # print('fp total', fp)\n # print('total', total)\n Xarray = np.stack(X_initial).astype('float64')\n Yarray = np.stack(Y_initial).astype('float64')\n return Xarray, Yarray, position_array # Xarray, Yarray", "def __init__(self, shape, dtype = 'd'):\n self.shape = shape\n self.dtype = dtype\n \n ncell = int(np.prod(self.shape))\n self.shared_array_base = Array(dtype, ncell,lock=False) \n pass", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def __init__(self, spec_type, values):\r\n self._values = values\r\n self._spec_type = spec_type", "def __init__(self, name, data):\n super(PoissonEM, self).__init__(name)\n\n self._register_variable('mock_data', differentiable=True)\n self.data = data\n self.update_var_param_types(mock_data=ArrayParameter)\n self._set_original_variables()", "def __init__(self, env, n_frames):\n gym.Wrapper.__init__(self, env)\n self.n_frames = n_frames\n self.frames = deque([], maxlen=n_frames)\n shp = env.observation_space.shape\n self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * n_frames),\n dtype=env.observation_space.dtype)", "def _create_parameter_study(self):\n samples = self._create_parameter_array(self._samples, name=\"samples\")\n if hasattr(self, _quantiles_attribute_key):\n quantiles = self._create_parameter_array(self._quantiles, name=\"quantiles\")\n self.parameter_study = xarray.concat([quantiles, samples],\n xarray.DataArray([\"quantiles\", \"samples\"], dims=\"data_type\")).to_dataset(\"parameters\")\n else:\n self.parameter_study = samples.to_dataset(\"parameters\").expand_dims(data_type=[\"samples\"])\n self._merge_parameter_set_names_array()\n self.parameter_study = self.parameter_study.swap_dims({_hash_coordinate_key: _set_coordinate_key})", "def create_observation(self):", "def create_observation(self):", "def observation_space():", "def __init__(self, time_drop_width, time_stripes_num, freq_drop_width, \n freq_stripes_num):\n\n super(SpecAugmentation, self).__init__()\n\n self.time_dropper = DropStripes(dim=2, drop_width=time_drop_width, \n stripes_num=time_stripes_num)\n\n self.freq_dropper = DropStripes(dim=3, drop_width=freq_drop_width, \n stripes_num=freq_stripes_num)", "def __init__(self):\n self.counts = [0] * 10\n self.values = [2000] * 10\n self.epsilon = 0.1", "def __init__(self, env: gym.Env, info_obs_weights: dict):\n super().__init__(env)\n self.info_obs_weights = info_obs_weights", "def set_parameters(self):\n params = {}\n if self.modelname == 'SI':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after splot\n # Ts: Time from split to present, in 2*Na generation units\n names = ['N1', 'N2', 'Ts']\n values = [1, 1, 1]\n upper_bounds = [20, 20, 10]\n lower_bounds = [0.01, 0.01, 0]\n elif self.modelname == 'IM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Ts']\n values = [1, 1, 1, 1, 1]\n upper_bounds = [20, 20, 20, 20, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0]\n elif self.modelname == 'AM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Tam', 'Ts']\n values = [1, 1, 1, 1, 0.1, 1]\n upper_bounds = [20, 20, 20, 20, 2, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'SC':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n names = ['N1', 'N2', 'm21', 'm12', 'Ts', 'Tsc']\n values = [1, 1, 1, 1, 1, 0.1]\n upper_bounds = [20, 20, 20, 20, 10, 2]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'IM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'AM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Tam', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 0.1, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 2, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'SC2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'Tsc', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 2, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n params['Names'] = names\n params['Values'] = values\n params['Upper'] = upper_bounds\n params['Lower'] = lower_bounds\n return params", "def default_config(cls):\n return {\n \"observation\": {\n \"type\": \"TimeToCollision\"\n },\n \"policy_frequency\": 1, # [Hz]\n \"other_spacecrafts_type\": \"space_env.spacecraft.behavior.IDMspacecraft\",\n \"screen_width\": 600, # [px]\n \"screen_height\": 600, # [px]\n \"centering_position\": [0.3, 0.5],\n \"show_trajectories\": False\n }", "def setup(self):\n\n self.testInst = pysat.Instrument('pysat', 'testing',\n clean_level='clean')\n self.testInst.bounds = (dt.datetime(2008, 1, 1),\n dt.datetime(2008, 1, 3))\n\n self.bounds1 = (dt.datetime(2008, 1, 1), dt.datetime(2008, 1, 3))\n self.bounds2 = (dt.datetime(2009, 1, 1), dt.datetime(2009, 1, 2))\n\n self.long_bins = np.linspace(0., 360., 25)\n self.mlt_bins = np.linspace(0., 24., 25)\n\n self.auto_bin = False\n\n return", "def assign_model_parameters(self,xmax,zmax,dh,duration):\n self.model_parameters['xmax']=xmax\n self.model_parameters['zmax']=zmax\n self.model_parameters['dh']=dh\n self.model_parameters['duration']=duration", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, max_sigma, min_sigma, end_episode):\n self.max_sigma = max_sigma\n self.min_sigma = min_sigma\n self.end_episode = end_episode", "def __init__(self, env, skip=4):\n super().__init__(env)\n # most recent raw observations (for max pooling across time steps)\n self._skip = skip" ]
[ "0.6746986", "0.60315347", "0.59593797", "0.5834228", "0.58072776", "0.5796935", "0.5789936", "0.5784668", "0.5759724", "0.5755014", "0.5729606", "0.57125753", "0.56525546", "0.5553753", "0.55418384", "0.5525847", "0.55208904", "0.5515985", "0.5496002", "0.5496002", "0.5478125", "0.54709435", "0.5465005", "0.5436884", "0.54293215", "0.5408327", "0.54014426", "0.5372527", "0.5330303", "0.5319486", "0.52919894", "0.5259575", "0.52440387", "0.52333015", "0.52320856", "0.5231467", "0.5215636", "0.5215332", "0.5196398", "0.5192295", "0.518876", "0.51850516", "0.51561826", "0.5151868", "0.51482576", "0.5144117", "0.51347065", "0.5127737", "0.5087044", "0.50788605", "0.5076584", "0.50645703", "0.50506413", "0.50506073", "0.5046609", "0.5035146", "0.50277376", "0.5027283", "0.5024513", "0.50205916", "0.5017861", "0.5015636", "0.50045156", "0.49968296", "0.49952936", "0.49937314", "0.49917302", "0.49905288", "0.4989925", "0.49803203", "0.4976973", "0.49766672", "0.49762633", "0.49677074", "0.49532562", "0.4951299", "0.4950527", "0.495046", "0.49472424", "0.4944879", "0.49413633", "0.49406818", "0.4939608", "0.4936516", "0.49312174", "0.49312174", "0.4923344", "0.49185738", "0.49073327", "0.4899797", "0.4898644", "0.48888782", "0.48765624", "0.48741052", "0.48726904", "0.48726904", "0.48726904", "0.48726904", "0.4862363", "0.48588273" ]
0.60313916
2
Defines the actions that should be provided to `step()`. May use a subclass of `ArraySpec` that specifies additional properties such as min and max bounds on the values.
def action_spec(self) -> types.NestedArraySpec:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def action_spec(self):\n min_Kp_Kd = np.zeros(12)\n max_Kp_Kd = np.ones(12)*np.inf\n min_pos = np.array([-0.6, -1.7, -0.45, -0.6, -1.7, -0.45,\n -0.6, -1.7, -0.45, -0.6, -1.7, -0.45])\n max_pos = np.array([0.5, 1.7, 1.6, 0.5, 1.7, 1.6,\n 0.5, 1.7, 1.6, 0.5, 1.7, 1.6])\n return BoundedArraySpec(shape=(3, 12), dtype=np.float32,\n minimum=[min_Kp_Kd, min_Kp_Kd, min_pos],\n maximum=[max_Kp_Kd, max_Kp_Kd, max_pos],\n name=\"action\")", "def action_array_spec(self) -> Dict[str, Any]:", "def _step(self, action: types.NestedArray) -> ts.TimeStep:", "def step(self, action: np.ndarray) -> 'EnvStep':\n ...", "def step(self, action: Union[np.ndarray, torch.Tensor]):\n if type(action) == torch.Tensor:\n action = action.squeeze().numpy()\n\n if not type(action) is np.ndarray:\n raise Exception(\"The action must be a Numpy array but is of type %s (value = %s)\" % (type(action), action))\n\n if self.increment_actions and not self.action_space.contains(action):\n action = action.clip(self.action_space.low, self.action_space.high)\n\n # Additionally, we must make sure the value will stay in the range\n # min <= x + action <= max\n if self.increment_actions:\n current_values = self.x[np.array([0, 1, 3, 5])]\n new_flow_values = current_values + action\n else:\n new_flow_values = action\n\n new_flow_values = np.clip(new_flow_values, self.flows_lower_bounds, self.flows_upper_bounds)\n self.update_all_flows(new_flow_values)\n\n if any([x < 0 for x in self.x]):\n pass\n # TODO: should I clip the actions to ensure the flows are always positive?\n # raise Exception(f\"Negative flows! x = {[round(x, 4) for x in self.x]}\")\n\n self.update_fitness()\n\n self.step_number += 1\n\n # reward = self.fitness - self.previous_fitness\n reward = self.fitness\n observation = self.get_observation()\n\n done = (self.step_number == self.total_number_of_episodes)\n info = {}\n return observation, reward, done, info", "def _setVals(self, step=0):\n self.step = step", "def default_action(self):\n print ([ -0.75, 0.5, 0.5])\n self._step((0, [ -0.75, 0.5, 0.5], None, None))\n print ([ -0.75, 0.5, 0.5])\n self._step((0, [ -0.75, 0.5, 0.5], None, None))\n print ([ -0.3, 0.55, 0.5])\n self._step((0, [ -0.3, 0.55, 0.5], None, None))\n # self._step((0, [ -0.2, 0.15, 0.5], None, None))\n # self._step((0, [ -0.5, 0.0, 0.5], None, None))", "def initializeActions_(self, opts):\n\n for opt in opts.keys():\n \n val = opts[opt]\n \n \n if ( opt == '-create' ):\n ncjobs = 0\n if val:\n if ( isInt(val) ):\n ncjobs = int(val)\n elif ( val == 'all'):\n ncjobs = val\n else:\n msg = 'Bad creation bunch size <'+str(val)+'>\\n'\n msg += ' Must be an integer or \"all\"'\n msg += ' Generic range is not allowed\"'\n raise SkimException(msg)\n pass\n else: ncjobs = 'all'\n\n if ncjobs != 0:\n # Instantiate Creator object\n self.creator = Creator(self.cfg_params,\n ncjobs)\n self.actions[opt] = self.creator\n pass\n pass\n\n elif ( opt == '-submit' ):\n\n self.actions[opt] = Submitter(self.cfg_params)\n\n return", "def action_spec(self) -> Union[dm_env.specs.Array, Dict[str, Any]]:", "def step(\n self, actions: ActionDict\n ) -> tuple[\n ObsDict, dict[str, float], dict[str, bool], dict[str, bool], dict[str, dict]\n ]:\n raise NotImplementedError", "def _set_steps(self, bounds, steps):\n if type(steps) == int:\n self.steps = [np.linspace(b1,b2,steps) for b1,b2 in bounds]\n elif type(steps) == list and type(steps[0]) == int:\n self.steps = [np.linspace(b1, b2, s) for (b1, b2), s in zip(bounds, steps)]\n else:\n self.steps = steps.copy()", "def _step(self, action: np.ndarray):\n self.robot.step({\n 'dkitty': action,\n })", "def step(self, action):\n pass", "def step(self, action):\n pass", "def action_spec(self, physics):\n # one hot corner + action\n if self._random_location:\n return specs.BoundedArray(\n shape=(3,), dtype=np.float, minimum=[-1.0] * 3, maximum=[1.0] * 3)\n else:\n return specs.BoundedArray(\n shape=(7,), dtype=np.float, minimum=[-1.0] * 7, maximum=[1.0] * 7\n )", "def step(self, action):\n raise NotImplementedError", "def _step(self, action: np.ndarray):\n # TODO: How do deal with goal changing?\n denormalize = False if self.use_raw_actions else True\n current_pos = self.sim.data.mocap_pos.copy()\n meanval = (self.mocap_pos_clip_upper + self.mocap_pos_clip_lower)/2.0\n rng = (self.mocap_pos_clip_upper - self.mocap_pos_clip_lower)/2.0\n new_pos = action[:3]*rng + meanval #current_pos + action[:3]*self.range\n # new_pos = current_pos + action[:3]*self.range\n new_pos = np.clip(new_pos, self.mocap_pos_clip_lower, self.mocap_pos_clip_upper)\n self.sim.data.mocap_pos[:] = new_pos.copy()\n self.robot.step({\n 'gripper': action[-2:]\n }, denormalize)", "def step_async(self, actions: np.ndarray) -> None:\n raise NotImplementedError()", "def _step_dynamics(self, act: np.ndarray):\n raise NotImplementedError", "def step(self, action):\n # Implement your step method here\n # return (observation, reward, done, info)\n self._state = self._state + action\n # print('Step state:', self._state)\n x, y = self._state\n reward = - (x ** 2 + y ** 2) ** 0.5\n done = abs(x) < 0.01 and abs(y) < 0.01\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=done)", "def step(self, action):\n raise NotImplementedError()", "def step(self, action):", "def __init__(self, low, high, step_name, variable_name):\n super().__init__(step_name, variable_name, list(range(low, high + 1)))\n self.low = min(low, high)\n self.high = max(low, high)", "def __init__(self, step, offset):\n if step <= 0:\n raise ValueError(\"'step' must be positive\")\n self.step = step\n self._offset = abs(offset)", "def do_set_opt_scan_range(self, val):\n hwp_factor = self._half_noof_points\\\n * self.get_conversion_factor('half') #noof_steps * deg/steps\n qwp_factor = self._quarter_noof_points\\\n * self.get_conversion_factor('quarter') #noof_steps * deg/steps \n\n if np.size(val) == 2:\n self._half_stepsize = val[0]/hwp_factor\n self._quarter_stepsize = val[1]/qwp_factor\n else:\n raise ValueError('Input size must be 2, but has size %d'%size(val))", "def step(self, action):\n self.timestep += 1\n self.actions = action.ravel()\n\n # Figure out which action was taken\n self.acted = False\n self.eat = False\n self.discard = False\n if action[0] > .5:\n self.eat = True\n self.acted = True\n elif action[1] > .5:\n self.discard = True\n self.acted = True\n\n # Check whether the appropriate action was taken, and assign reward.\n # There is a small punishment for doing nothing.\n self.reward = -.1\n if ((self.eat and self.edible) or\n (self.discard and not self.edible)):\n self.reward = 1.\n elif ((self.eat and not self.edible) or\n (self.discard and self.edible)):\n self.reward = -.9\n\n if self.acted:\n self.grab_fruit()\n\n return self.sensors, self.reward", "def action_spec(self):\r\n pass", "def setValuesInStep(\n self, stepName: str, interactionProperty: str = \"\", contactControls: str = \"\"\n ):\n pass", "def setValuesInStep(\n self, stepName: str, interactionProperty: str = \"\", contactControls: str = \"\"\n ):\n pass", "def step(self, actions):\r\n # Run actions\r\n actions = [np.argmax((action_scores+.0001) * mask) for action_scores, mask in zip(actions, self.get_avail_actions())]\r\n reward, terminated, info = self.env.step(actions)\r\n\r\n # Get updated state\r\n self.state = self.env.get_state()\r\n\r\n # Return arrays for each agent\r\n reward_n = [reward / self.n for _ in range(self.n)]\r\n terminated_n = [terminated for _ in range(self.n)]\r\n info_n = [info for _ in range(self.n)]\r\n observation_n = self.env.get_obs()\r\n\r\n return observation_n, reward_n, terminated_n, info_n", "def step(self,\n actn: int) -> Tuple[np.array, float, bool]:\n raise NotImplementedError", "def __init__(self,start,step,n_up=3,n_down=1,harder=-1,ub=1,lb=0):\n \n self.value = start\n self.n_up = n_up\n self.step = step\n self.n = 0 #This is what will be compared to n_up for udpating.\n self.harder = np.sign(harder) #Make sure that this is only -1 or 1.\n self.record = [start]\n self.correct = []\n self.ub = ub\n self.lb = lb", "def perform_step(self, step, batch):\n\n # perform a min step\n self.__max_step(batch)\n self.__min_step(batch)\n\n l_suff_stats = self.shuffle_up_stats(batch)\n obj = self.main_obj(batch, l_suff_stats)\n constraints = self.main_constraints(batch, l_suff_stats)\n\n self.cli.print(self.fmtstr % (step, obj.detach().numpy(), *[item for sublist in zip(self.lagrange_mults, constraints) for item in sublist]))", "def step(self, observation):\n action, value = self(observation)\n\n return {\n 'actions': action,\n 'values': value\n }", "def step(self, step=None):\n pass", "def get_step_actions(self):\n return self.actor(tf.numpy_function(self.get_states, [], self.states[0].dtype))", "def do_step(self, action_ind):\n action_ind = action_ind.item()\n if len(self.last_actions) < self.last_action_capacity:\n self.last_actions.append(action_ind)\n self.last_actions[self.last_action_ind] = action_ind\n self.last_action_ind = (\n self.last_action_ind + 1) % self.last_action_capacity\n robot_max_vel = self.sim.getAgentMaxSpeed(self.robot_num)\n # Decode the action selection:\n # 0 => do nothing\n # 1-16 => set velocity to `robot_max_vel/2` at angle\n # `(action_ind-1) * 2pi/16`\n # 17-32 => velocity to `robot_max_vel` at angle\n # `(action_ind-17) * 2pi/16`\n # 33-34 => change heading by\n # else => do nothing\n vel = (0, 0)\n angle = self.headings[self.robot_num]\n if 1 <= action_ind <= 16:\n angle += (action_ind - 1)*(math.pi / 8)\n vel = (\n (robot_max_vel/2) * math.cos(angle),\n (robot_max_vel/2) * math.sin(angle)\n )\n elif 17 <= action_ind <= 32:\n angle += (action_ind - 17)*(math.pi / 8)\n vel = (\n robot_max_vel * math.cos(angle),\n robot_max_vel * math.sin(angle)\n )\n elif action_ind == 33:\n self.headings[self.robot_num] += self.rot_speed\n elif action_ind == 34:\n self.headings[self.robot_num] -= self.rot_speed\n self.headings[self.robot_num] = normalize(self.headings[\n self.robot_num])\n # Set the robot's goal given the action that was selected\n ts = self.sim.getTimeStep()\n pos = self.sim.getAgentPosition(self.robot_num)\n self.goals[self.robot_num] = (\n pos[0] + vel[0] * ts, pos[1] + vel[1] * ts\n )\n self.advance_simulation()", "def step(self, action):\n self.steps += 1\n self.robots[0].setAction(action)\n for i in range(self.num_agents):\n if i != 0 and self.policies[i:i+1]: # self.policies[0] is dummy\n self.robots[i].setAction(self.policies[i](self.robots[i].getObservation()))\n # rewards = [ -1.0 * self.num_foods / self.max_steps for _ in range(self.num_agents) ] # so agent needs to eat foods quickly\n rewards = [ 0.0 for _ in range(self.num_agents) ]\n for i in range(self.BULLET_STEPS):\n p.stepSimulation()\n rewards = [ rewards[i]+self._getReward(self.robots[i]) for i in range(self.num_agents) ]\n self.episode_rewards = [ self.episode_rewards[i]+rewards[i] for i in range(self.num_agents) ]\n obs = self.robots[0].getObservation()\n done = self._isDone()\n info = { 'steps': self.steps }\n if done:\n # TODO\n info['episode'] = { 'r': self.episode_rewards[0], 'l': self.steps, 'r_all': self.episode_rewards }\n # print(self.episode_rewards, self.steps)\n return obs, rewards[0], done, info", "def steps(self, steps):\n\n self._steps = steps", "def __init__(self, max_row, max_col):\n self.action_set = default_action_set\n self.option_set = []\n self.default_max_actions = len(self.action_set) # will stay fixed\n self.max_actions = len(self.action_set) # can increase\n\n self.Q = np.zeros((max_row, max_col, self.default_max_actions))\n self.states_rc = [(r, c) for r in range(max_row)\n for c in range(max_col)]\n\n self.last_state, self.last_action = -1, -1\n self.steps = 0\n self.max_row, self.max_col = max_row, max_col\n\n\n self.is_following_option = False\n self.option_number = -1", "def arange(self, start: float, stop: float, step: float = 1.0) -> None:\n self.values = []\n assert step != 0.0\n while abs(start) < abs(stop):\n self.values.append(start)\n start += step", "def test_stepint(self):\n fun = get_problem('stepint', self.dimension)\n self.assertEqual(fun(self.array9), -5.0)", "def step(self, action) -> (list, float, bool):\n if len(self.curr_stim) == 0:\n self.curr_stim += [action[0]] * action[1] + [-action[0]] * action[1]\n self.system_step()\n self.frame += 1 / self.config[\"Fs\"]\n self.history.append(self.x2-self.x1)\n return self.get_state(), 0, False", "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def step(self, action):\n observation, reward, done, _ = self.env.step(action)\n return np.array(observation), reward, done", "def __init__(self, start_threshold, end_threshold, step, method=\"linear\"):\n if start_threshold < 0 or end_threshold < 0 or step < 0:\n raise ValueError(\"Thresholds must be positive.\")\n\n if start_threshold < end_threshold:\n raise ValueError(\"Start threshold must be bigger than end \"\n \"threshold.\")\n\n if method == \"exponential\" and step > 1:\n raise ValueError(\"For exponential updating, the step parameter \"\n \"must not be explosive.\")\n\n self._start_threshold = start_threshold\n self._end_threshold = end_threshold\n self._step = step\n self._method = method\n\n self._threshold = start_threshold", "def test_chain(self):\n self._test_chain(self.array_dense,\n ['min-max', 'pca', 'min-max', 'rbf', 'svm'],\n [{'feature_range': (-5, 5)}, {},\n {'feature_range': (0, 1)}, {}, {}],\n y=self.labels)", "def setStepSize(self, step_size):\n assert isinstance(step_size, int)\n self.step_size = step_size\n self.step_directions = [np.array([i[0], i[1]]) for i in [(0,0),\n (0,step_size),\n (0,-step_size),\n (step_size, 0),\n (-step_size,0)]]", "def __init__(self, initial_value, n_values, schedule):\n self.step = 0.\n self.initial_value = initial_value\n self.nvalues = n_values\n self.schedule = SCHEDULES[schedule]", "def step(self, action):\n # TODO: code here\n y, x = self.state\n dy, dx = self.moves[action]\n next_x, next_y = x+dx, y+dy\n\n next_x = np.clip(next_x, 0, self.width-1) # clip the values to the world\n next_y = np.clip(next_y, 0, self.height-1) # clip the values to the world\n\n if next_y == 1:\n rand = np.random.uniform()\n if rand < 0.2:\n next_x += 1\n elif rand < 0.7:\n next_x += 2\n else:\n next_x += 3\n\n next_x = np.clip(next_x, 0, self.width - 1)\n\n if next_x == 4 and next_y == 1:\n reward = -1\n done = True\n elif next_x == 4 and next_y == 2:\n reward = 1\n done = True\n else:\n reward = 0\n done = False\n\n next_state = (next_y, next_x)\n self.state = next_state\n\n return next_state, reward, done, {}", "def test_step(self):\n fun = get_problem('step', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def step(self, action: ActionType) -> None:\n raise NotImplementedError", "def step(self, actions):\n \n lastidx = 0\n for _i in range(self.nbehavior):\n action_tuple = ActionTuple()\n action_tuple.add_discrete(actions[lastidx:lastidx + self.n_each_agent[_i], :])\n self.env.set_actions(behavior_name=self.behavior_names[_i], action=action_tuple)\n lastidx = self.n_each_agent[_i]\n\n self.env.step()\n self.decision_steps = []\n self.terminal_steps = []\n\n for _i in range(self.nbehavior):\n d_s, t_s = self.env.get_steps(self.behavior_names[_i])\n self.decision_steps.append(d_s)\n self.terminal_steps.append(t_s)\n\n obs = []\n reward = []\n done = []\n info = {}\n\n for _i in range(self.nbehavior):\n _j = 0\n for o in self.reshape_obs(self.decision_steps[_i]):\n obs.append(o)\n reward.append(self.decision_steps[_i].reward[_j])\n done.append(False)\n _j += 1\n\n return obs, reward, done, info", "def __call__(self, new_val, previous_val, step):\n\t\treturn", "def act(self, env: FakeEnv, s: ActorStrategy):\n action = env.action_space.sample()\n print(f\"Sampled action shape : {action.shape}\")\n env.step(action)", "def step(self, action):\n self.steps += 1\n in_var = self.state[:4]\n\n # Increase or decrease the 4 input values\n new_var = in_var+ action \n\n #If the agent tries to exceed the range of the mins & maxes, this sets them to the max. \n for i,temp_i in enumerate(new_var):\n if (temp_i <= self.mins[i]):\n new_var[i] = self.mins[i]\n elif (temp_i >= self.maxes[i]): \n new_var[i] = self.maxes[i]\n\n in_var = new_var\n\n # Get all the new outputs:\n self.ins = in_var\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n #check that this is a viable output; if not, reject the action\n #is this temp change viable?\n \n MSE1 = (self.goals[0]-out_flow)**2\n MSE2 = (self.goals[1]-out_frac)**2\n MSE3 = (self.goals[2]-out_temp)**2\n\n MSE = MSE1 + MSE2 + MSE3\n\n # Update your state:\n state_new = np.append(self.ins,[out_flow,out_frac,out_temp] )\n self.state =np.append(state_new,self.goals)\n\n done = ((MSE1 <= self.MSE_thresh1) & (MSE2 <= self.MSE_thresh2) & (MSE3 <= self.MSE_thresh3))\n done = bool(done)\n\n # Get the corresponding reward:\n reward = 0\n if done:\n reward += self.rew_goal\n else: \n reward -= MSE *cfg['MSE_scale']\n\n self.reward = reward\n self.tot_rew += reward\n self.done = done\n\n return (self.state, reward, done, {'MSE thresh': self.MSE_thresh1})", "def step(self, **kwargs):\n pass", "def set_options(self, options):\n self._set_steps(options.get('bounds', [(0,1)]), options.get('steps',2))", "def __init__(self, *args):\n \n self.steps = args", "def step(self):\n return _uhd_swig.meta_range_t_step(self)", "def test_default_step(self):\r\n start, stop = iscalars('start', 'stop')\r\n out = arange(start, stop)\r\n f = function([start, stop], out)\r\n\r\n if config.cast_policy == 'custom':\r\n assert out.dtype == start.type.dtype\r\n elif config.cast_policy in ('numpy', 'numpy+floatX'):\r\n assert out.dtype == numpy.arange(numpy.int32(0),\r\n numpy.int32(1)).dtype\r\n else:\r\n raise NotImplementedError(config.cast_policy)\r\n assert numpy.all(f(0, 5) == numpy.arange(0, 5))\r\n assert numpy.all(f(-5, 1) == numpy.arange(-5, 1))\r\n assert numpy.all(f(0, 0) == numpy.arange(0, 0))\r\n\r\n dstart, dstop = dscalars('start', 'stop')\r\n dout = arange(dstart, dstop)\r\n df = function([dstart, dstop], dout)\r\n\r\n assert dout.dtype == dstart.type.dtype\r\n #print df(0.2, 5.3)\r\n #print numpy.arange(0.2, 5.3)\r\n assert numpy.all(df(0.2, 5.3) == numpy.arange(0.2, 5.3))\r\n assert numpy.all(df(0.8, 5.3) == numpy.arange(0.8, 5.3))\r\n assert numpy.all(df(-0.7, 5.3) == numpy.arange(-0.7, 5.3))", "def step(self, action):\n total_reward = 0.0\n done = False\n obs_list = []\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n obs_list.append(obs)\n total_reward += reward\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n max_frame = np.max(obs_list[-2:], axis=0)\n return max_frame, total_reward, done, info", "def test_Op_integers(self):\r\n start, stop, step = iscalars('start', 'stop', 'step')\r\n out = ARange(start.type.dtype)(start, stop, step)\r\n f = function([start, stop, step], out)\r\n\r\n assert numpy.all(f(0, 5, 1) == numpy.arange(0, 5, 1))\r\n assert numpy.all(f(2, 11, 4) == numpy.arange(2, 11, 4))\r\n assert numpy.all(f(-5, 1, 1) == numpy.arange(-5, 1, 1))\r\n assert numpy.all(f(10, 2, -2) == numpy.arange(10, 2, -2))\r\n assert numpy.all(f(10, 2, 2) == numpy.arange(10, 2, 2))\r\n assert numpy.all(f(0, 0, 1) == numpy.arange(0, 0, 1))", "def __init__(self, state_size, action_size, action_low, action_high):\n self.state_size = state_size\n self.action_size = action_size\n self.action_low = action_low\n self.action_high = action_high\n self.action_range = self.action_high - self.action_low\n\n ###\n\n self.build_model()", "def step(self, actions):\n assert (len(actions) == self.num_actions)\n actions = np.around(actions)\n actions = np.clip(actions, 0, 1)\n self.done = self.network.perform_actions(actions)\n self.cur_pos = self._get_current_pos_in_1d()\n self.reward = self.network.get_reward()\n\n return self.cur_pos, self.reward, self.done, {}", "def test_mujoco_action_dimensions(env_spec: EnvSpec):\n env = env_spec.make(disable_env_checker=True)\n env.reset()\n\n # Too few actions\n with pytest.raises(ValueError, match=\"Action dimension mismatch\"):\n env.step(env.action_space.sample()[1:])\n\n # Too many actions\n with pytest.raises(ValueError, match=\"Action dimension mismatch\"):\n env.step(np.append(env.action_space.sample(), 0))\n\n # Too few dimensions\n with pytest.raises(ValueError, match=\"Action dimension mismatch\"):\n env.step(0.1)\n\n # Too many dimensions\n with pytest.raises(ValueError, match=\"Action dimension mismatch\"):\n env.step(np.expand_dims(env.action_space.sample(), 0))\n\n # Incorrect shape\n with pytest.raises(ValueError, match=\"Action dimension mismatch\"):\n env.step(np.expand_dims(env.action_space.sample(), 1))\n\n env.close()", "def perform_step(self, action):\n pass", "def getSteps():", "def step(self, action_idx):\n action = Actions(action_idx)\n reward, done = self._state.step(action)\n obs = self._state.to_numpy_array()\n info = {\"instrument\": self._instrument, \"offset\": self._state._offset} # might be an error (private member use)\n return obs, reward, done, info", "def action(self, action):\n action = (action + 1) / 2 # [-1, 1] => [0, 1]\n action *= (self.action_space.high - self.action_space.low)\n action += self.action_space.low\n return action", "def __init__(self, *, center: int = 10, step: int = 2, **kwargs):\n self.center = center\n self.step = step\n\n super().__init__(**kwargs)", "def test_discrete_actions_out_of_bound(env: gym.Env):\n assert isinstance(env.action_space, spaces.Discrete)\n upper_bound = env.action_space.start + env.action_space.n - 1\n\n env.reset()\n with pytest.raises(Exception):\n env.step(upper_bound + 1)\n\n env.close()", "def step(self, s, a):\n raise NotImplementedError", "def __init__(self, start_epsilon, end_epsilon, decay_steps, decay_schedule='const',\n min_action=None, max_action=None):\n self.min_action = min_action\n self.max_action = max_action\n self.epsilon_updater = common.ParameterUpdater(\n start_epsilon, end_epsilon, decay_steps, decay_schedule)", "def arange(start=0, stop=None, step=None):\n raise NotImplementedError", "def __init__ ( self , func , step = 0 , order = 2 , err = False ) :\n assert is_integer ( order ) and 0 <= order, \"Invalid ``order''-parameter!\"\n \n self.__func = func\n self.__step = float( step ) \n self.__order = int ( order )\n self.__err = True if err else False", "def step(self, action):\n self.t += 1\n if self.use_run_time_assurance:\n probe_state, unsafe = self.probe_step(action)\n # switch to safe controller if unsafe\n if unsafe:\n x, x_dot, theta, theta_dot = probe_state\n # go right\n if x <= -self.x_threshold: # go right\n action = 1\n elif x>= self.x_threshold: # go left\n action = 0 \n \n state, reward, done, info = self.env.step(action)\n # Could make a custom reward here if you want\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info", "def __init__(self, start, step, size, unit='SECOND'):\n self.unit = unit\n self.start = start\n self.step = step\n self.size = size", "def step(self):\n return _uhd_swig.range_t_step(self)", "def step(self,action):\n observation, reward, done, info = self.env.step(action)\n if info[\"health\"] <= 0 or info[\"enemy_health\"] <= 0:\n self.player_hp = 120\n self.enemy_hp = 120\n reward = 0\n else:\n self.player_hp = info['health']\n self.enemy_hp = info[\"enemy_health\"]\n reward = self.player_hp - self.enemy_hp\n\n\n if info[\"enemy_rounds_won\"] == 2 or info[\"rounds_won\"] == 2:\n self.player_hp = 120\n self.enemy_hp = 120\n reward = 0\n done = True\n\n obs = self.observation(observation)\n if self.current_frame_number == self.frame_skipping:\n self.q.append(obs)\n self.current_frame_number = 0 \n self.current_frame_number += 1\n reward = reward / 120 +1\n return np.array(list(self.q)), reward, done, info", "def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]:\n next_state, reward, done, info = self.env.step(action)\n return next_state, reward, done, info", "def scale_actions(self, actions):", "def step(self):\r\n raise NotImplementedError", "def get_step_actions(self):\n actions = self.actor(tf.numpy_function(self.get_states, [], tf.float64))\n actions += tf.random.normal(\n shape=(self.n_envs, self.n_actions), stddev=self.step_noise_coef\n )\n return tf.clip_by_value(actions, -1, 1)", "def set_parameter_and_step(self, pname, value, nstep=2, warning_action=\"default\"):\n setattr(self.p, pname, value)\n with warnings.catch_warnings():\n warnings.simplefilter(warning_action)\n for _ in range(nstep):\n self.step()", "def _step(self, action):\n\n # action is generated from the action_policy (external to the environment)\n if len(action) == 4:\n object_index, new_location, action_means, action_stds = action\n if len(action) == 2:\n \"\"\"\n Action is not generated from a Gaussian distribution\n \"\"\"\n object_index, new_location = action\n action_means = action_stds = None\n \n position = new_location[:2]\n rotation = new_location[2]\n\n prev_transform = self.e.objects[object_index].transform\n\n if len(self.action_storage) > 0:\n last_progress = self.action_storage[-1][4]\n else:\n last_progress = 0\n\n info = {}\n if self.e.act(object_index, Command(position, rotation)):\n # print ('Action accepted')\n cur_transform = self.e.objects[object_index].transform\n # I need to call self.action_storage.append before get_observation_and_progress\n self.action_storage.append( [object_index, prev_transform, cur_transform, None, None, True, action_means, action_stds] )\n observation, progress = self.get_observation_and_progress()\n self.action_storage[-1][3:5] = [observation, progress]\n\n info['action_accepted'] = True\n else:\n \"\"\"\n Action failed\n We can reduce the progress to avoid falling out of the table\n \"\"\"\n if len(self.action_storage) > 0:\n # Just return observation and progress of last action\n _, _, _, observation, progress, _, _, _ = self.action_storage[-1]\n progress -= self.config.failed_action_penalty\n else:\n # First action failed\n observation, _ = self.get_observation_and_progress()\n progress = -self.config.failed_action_penalty\n \n self.action_storage.append( [object_index, prev_transform, prev_transform, observation, progress, False, action_means, action_stds] )\n\n \n info['action_accepted'] = False\n\n # Typical threshold approach\n if progress > self.progress_threshold:\n # Finish action\n done = True\n else:\n done = False\n \n reward = progress - last_progress\n #print ('Progress = %.2f ; reward = %.2f' % (progress, reward))\n\n return (observation, reward, done, info)", "def __init__(self, action_size: int, epsilon: float, max_policy) -> None:\n self._action_size = action_size\n self._epsilon = epsilon\n self._max_policy = max_policy", "def step(self, action: list) -> None:\n self._input = np.array(\n [self._thrust_surge(action[0]), self._moment_steer(action[1])]\n )\n w, q = odesolver45(\n self._state_dot, self._state, self.config.simulation.t_step_size\n )\n\n self._state = q\n self._state[2] = geom.princip(self._state[2])\n\n self._prev_states = np.vstack([self._prev_states, self._state])\n self._prev_inputs = np.vstack([self._prev_inputs, self._input])\n\n self._step_counter += 1", "def act(self, q_values, *args, **kwargs):\n if np.random.binomial(1, p=self.epsilon_updater.cur_value):\n action = np.array([np.random.choice(range(len(q_values)))])\n else:\n action = np.array([np.argmax(q_values)])\n self.epsilon_updater.update()\n return action", "def getInputSpecification(cls):\n ## This will replace the lines above\n inSpec= super(ValueDuration, cls).getInputSpecification()\n inSpec.addSub(InputData.parameterInputFactory('target',\n contentType=InputTypes.StringListType,\n strictMode=True))\n inSpec.addSub(InputData.parameterInputFactory('bins',\n contentType=InputTypes.IntegerType))\n return inSpec", "def actions(self):\n return {0, 1, 2, 3, 4, 5, 11, 12}", "def set_default_behavior(self):\n min_, max_ = self.minimum(), self.maximum()\n\n self.setTickInterval((max_ - min_)/50.)\n self.setSingleStep((max_ - min_)/100.)\n self.setPageStep((max_ - min_)/50.)", "def regular(step, start=0.):\n\n def output(low, high):\n newstart = math.ceil((low - start)/step) * step + start\n return numpy.arange(newstart, high, step, dtype=numpy.float)\n output.func_name = \"regular(%g, start=%g)\" % (step, start)\n return output", "def act(self, action_values: Tensor) -> Tensor:\n ...", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def step(self, action):\n\n # handling error\n \n assert (action < self.NumofBandits)#, \"[ERROR] un-identified arm\"\n\n # return reward via sampling from normal distribution\n return np.random.normal(self.MeanList[action], self.sigma, 1)[0]", "def set_up_continuous_action_space(self):\n self.action_space = gym.spaces.Box(shape=(self.action_dim,),\n low=-1.0,\n high=1.0,\n dtype=np.float32)\n self.action_high = self.torque * np.ones([self.action_dim])\n self.action_low = -self.action_high" ]
[ "0.6053397", "0.59856373", "0.5846451", "0.5825244", "0.5739144", "0.57092017", "0.5685348", "0.5438964", "0.5409966", "0.5284483", "0.5272299", "0.5256275", "0.524941", "0.524941", "0.52339363", "0.522407", "0.5194787", "0.5182742", "0.51721895", "0.51688147", "0.514305", "0.5124584", "0.51207817", "0.5109548", "0.50846547", "0.50414044", "0.50353223", "0.49906597", "0.49906597", "0.49876294", "0.4986241", "0.49755394", "0.49444854", "0.49279755", "0.49209172", "0.49182847", "0.49152374", "0.48923305", "0.4875389", "0.48454762", "0.48405454", "0.48375618", "0.48351613", "0.48252556", "0.48203933", "0.48028892", "0.47992295", "0.4795926", "0.47927395", "0.47898385", "0.47874993", "0.4774956", "0.47692436", "0.4765465", "0.4760971", "0.47550923", "0.47477514", "0.47448567", "0.47441304", "0.47381178", "0.47351876", "0.47310328", "0.47246212", "0.4714181", "0.47093093", "0.47084025", "0.4697964", "0.46917027", "0.4690102", "0.46886197", "0.46858412", "0.4680028", "0.4676432", "0.4674636", "0.46690983", "0.46550855", "0.46526137", "0.46515882", "0.4646021", "0.4645748", "0.4634963", "0.46321723", "0.46198288", "0.46185097", "0.4615384", "0.46142912", "0.46134123", "0.46132973", "0.4602679", "0.4601203", "0.45993522", "0.4597221", "0.45878062", "0.45853817", "0.45844266", "0.45844266", "0.45844266", "0.45844266", "0.45840776", "0.45828927" ]
0.55081457
7
Defines the rewards that are returned by `step()`. Override this method to define an environment that uses nonstandard reward values, for example an environment with arrayvalued rewards.
def reward_spec(self) -> types.NestedArraySpec: return array_spec.ArraySpec(shape=(), dtype=np.float32, name='reward')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_rewards(self, step_reward, goal_reward, bad_state_reward=None, restart_state_reward = None):\n self.r_step = step_reward\n self.r_goal = goal_reward\n self.r_bad = bad_state_reward\n self.r_restart = restart_state_reward", "def __init__(self, env):\n gym.RewardWrapper.__init__(self, env)", "def reward(self, env):\n del env\n return 1", "def __init__(self, env=None, tilesEnv=False):\n super(MarioEnv, self).__init__(env)\n self.resetCount = -1\n # reward is distance travelled. So normalize it with total distance\n # https://github.com/ppaquette/gym-super-mario/blob/master/ppaquette_gym_super_mario/lua/super-mario-bros.lua\n # However, we will not use this reward at all. It is only for completion.\n self.maxDistance = 3000.0\n self.tilesEnv = tilesEnv", "def test_noop(self):\n base_env = _DiscreteEnvironmentOneReward(\n action_dtype=np.int64,\n reward_spec=specs.Array(dtype=np.float32, shape=()))\n wrapped_env = wrappers.DelayedRewardWrapper(base_env, accumulation_period=1)\n base_episode_reward = _episode_reward(base_env)\n wrapped_episode_reward = _episode_reward(wrapped_env)\n self.assertEqual(base_episode_reward, wrapped_episode_reward)", "def add_env_args(parser):\n # sawyer\n parser.add_argument(\n \"--reward_type\",\n type=str,\n default=\"dense\",\n choices=[\"dense\", \"sparse\"],\n help=\"reward type\",\n )\n parser.add_argument(\n \"--distance_threshold\",\n type=float,\n default=0.06,\n help=\"distance threshold for termination\",\n )\n parser.add_argument(\n \"--max_episode_steps\",\n type=int,\n default=70,\n help=\"maximum timesteps in an episode\",\n )\n parser.add_argument(\n \"--camera_name\",\n type=str,\n default=\"visview\",\n help=\"camera name in an environment\",\n )\n\n # observations\n parser.add_argument(\n \"--frame_skip\", type=int, default=1, help=\"Numer of skip frames\"\n )\n parser.add_argument(\n \"--action_repeat\", type=int, default=1, help=\"number of action repeats\"\n )\n parser.add_argument(\n \"--ctrl_reward_coef\", type=float, default=0, help=\"control reward coefficient\"\n )\n\n parser.add_argument(\n \"--kp\", type=float, default=40.0, help=\"p term for a PID controller\"\n ) # 150.)\n parser.add_argument(\n \"--kd\", type=float, default=8.0, help=\"d term for a PID controller\"\n ) # 20.)\n parser.add_argument(\n \"--ki\", type=float, default=0.0, help=\"i term for a PID controller\"\n )\n parser.add_argument(\n \"--frame_dt\", type=float, default=0.15, help=\"delta t between each frame\"\n ) # 0.1)\n parser.add_argument(\n \"--use_robot_indicator\",\n type=eval,\n default=False,\n help=\"enable visualization of robot indicator for motion planner\",\n )\n parser.add_argument(\n \"--use_target_robot_indicator\",\n type=eval,\n default=False,\n help=\"enable visualization of robot indicator for target position of motion planner\",\n )\n parser.add_argument(\n \"--success_reward\", type=float, default=150.0, help=\"completion reward\"\n )\n parser.add_argument(\n \"--contact_threshold\",\n type=float,\n default=-0.002,\n help=\"depth thredhold for contact\",\n )\n parser.add_argument(\n \"--joint_margin\", type=float, default=0.001, help=\"marin of each joint\"\n )\n parser.add_argument(\"--task_level\", type=str, default=\"easy\")\n parser.add_argument(\n \"--step_size\",\n type=float,\n default=0.02,\n help=\"step size for invalid target handling\",\n )\n # puck\n parser.add_argument(\"--puck_friction\", type=float, default=2.0)\n parser.add_argument(\"--puck_mass\", type=float, default=0.01)\n parser.add_argument(\"--source_env_puck_friction\", type=float, default=2.0)\n parser.add_argument(\"--source_env_puck_mass\", type=float, default=0.01)\n parser.add_argument(\"--target_env_puck_friction\", type=float, default=2.0)\n parser.add_argument(\"--target_env_puck_mass\", type=float, default=0.01)\n\n parser.add_argument(\"--env_ob_source\", type=str2bool, default=False)\n parser.add_argument(\"--end_effector\", type=str2bool, default=True)\n parser.add_argument(\"--ik_target\", type=str, default=\"grip_site\")\n parser.add_argument(\n \"--action_range\", type=float, default=0.1, help=\"range of radian\"\n )\n parser.add_argument(\"--dr\", type=str2bool, default=False)\n parser.add_argument(\"--dr_params_set\", type=str, default=\"IP_large_range\")\n\n parser.add_argument(\"--mod_env_params\", type=str2bool, default=False)\n parser.add_argument(\"--param_mod_instructions\", type=eval, default=[])\n\n parser.add_argument(\"--unity\", type=str2bool, default=False)\n parser.add_argument(\"--unity_editor\", type=str2bool, default=False)\n parser.add_argument(\"--virtual_display\", type=str, default=\":1\")\n parser.add_argument(\"--port\", type=int, default=4000)\n\n # FetchReach action\n parser.add_argument(\"--action_rotation_degrees\", type=float, default=0.0)\n parser.add_argument(\"--action_z_bias\", type=float, default=0.0)", "def test_full_preprocessing_rewards(self):\n env = DummyEnv()\n env_wrapped = generic_preprocess(env, max_n_noops=0, clip_rewards=False)\n env_wrapped.reset()\n _, r1, _, _ = env_wrapped.step(0)\n _, r2, _, _ = env_wrapped.step(0)\n _, r3, _, _ = env_wrapped.step(0)\n # MaxWrapper skips the first step after reset (which gives reward 2)\n # FrameStackWrapper does another 3 steps after reset, each of which\n # does 4 steps in the raw environment because of FrameSkipWrapper.\n # Step 1: 3, 4, 5, 6\n # Step 2: 7, 8, 9, 10\n # Step 3: 11, 12, 13, 14\n # The first step we do should get rewards 15, 16, 17 18, summed by\n # FrameSkipWrapper.\n self.assertEqual(r1, 66)\n # Then 19 + 20 + 21 + 22.\n self.assertEqual(r2, 82)\n # Then 23 + 24 + 25 + 27.\n self.assertEqual(r3, 98)", "def hard_reset(self):\n self._hard_reset()\n rewards = np.zeros((self.n_envs, self.n_players))\n return self.obs, rewards, self.agent_dones, self.info_dict", "def __init__(self, environment):\n self.env = environment\n self.cumreward = 0 # tracking cumulative reward\n self.samples = 0 # tracking the number of samples\n\n self.sensor_limits = None\n self.actor_limits = None\n self.clipping = True\n\n self.current_action = 0 # Saving current action\n self.prev_action = -1 # Saving previous action", "def run(self):\n\t\tep_rewards = [0.0]\n\t\tavg_rewards = []\n\t\tobs = self.env.reset()\n\t\tstep_counter = 0\n\n\t\tself.mylogger.info('Task: {}, epochs: {}, batch size: {}'.format(self.env.unwrapped.spec.id, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.epochs,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.batch_size\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ))\n\n\t\tfor epoch in range(self.epochs):\n\t\t\tfor step in range(self.batch_size):\n\t\t\t\tstep_counter += 1\n\n\t\t\t\tself.observations[step] = obs.copy()\n\t\t\t\tself.actions[step], self.values[step] = self.model.action_value(obs[None, :])\n\t\t\t\tobs, self.rewards[step], self.dones[step], _ = self.env.step(self.actions[step])\n\t\t\t\tep_rewards[-1] += self.rewards[step]\n\n\t\t\t\tif step_counter % self.log_step == 0:\n\t\t\t\t\tlog_msg = 'global_step: {}, obs: {}, act: {}, reward: {}'.format(step_counter,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t obs, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.actions[step], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.rewards[step]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t )\n\t\t\t\t\tself.mylogger.info(log_msg)\n\t\t\t\t\tself.mylogger.info(\"prev episode reward: {}\".format(ep_rewards[-2]))\n\n\t\t\t\tif self.dones[step]:\n\t\t\t\t\twith self.summary_writer.as_default():\n\t\t\t\t\t\ttf.summary.scalar('episode reward', ep_rewards[-1], step=step_counter)\n\t\t\t\t\tep_rewards.append(0.0)\n\t\t\t\t\tobs = self.env.reset()\n\n\t\t\t_, next_value = self.model.action_value(obs[None, :])\n\t\t\treturns, advs = self._returns_advantages(self.rewards, self.dones, self.values, next_value)\n\t\t\t# A trick to input actions and advantages through same API.\n\t\t\tacts_and_advs = np.concatenate([self.actions[:, None], advs[:, None]], axis=-1)\n\n\t\t\t# update weights \n\t\t\tlosses = self.model.train_on_batch(self.observations, [acts_and_advs, returns])\n\n\t\t\twith self.summary_writer.as_default():\n\t\t\t\ttf.summary.scalar('policy loss', losses[1], step=step_counter)\n\t\t\t\ttf.summary.scalar('value loss', losses[2], step=step_counter)", "def update_rewards(self, arm: int, reward: float):\r\n if self.rewards[arm][0] == self.optim_c:\r\n self.rewards[arm] = np.array(reward)\r\n else:\r\n self.rewards[arm] = np.append(self.rewards[arm], reward)", "def fit(self, env, num_iterations, max_episode_length=None):\n print ('initializing replay memory...')\n sys.stdout.flush()\n self.mode = 'init'\n self.memory.clear()\n self.preprocessor.reset()\n self.num_steps = 0\n num_updates = 0\n num_episodes = 0\n while num_updates < num_iterations:\n state = env.reset()\n self.preprocessor.reset()\n num_episodes += 1\n t = 0\n total_reward = 0\n while True:\n self.num_steps +=1\n t += 1\n action, _ = self.select_action(state)\n next_state, reward, is_terminal, debug_info = env.step(action)\n\n reward = self.preprocessor.process_reward(reward)\n total_reward += reward\n\n preprocessed_state = self.preprocessor.process_state_for_memory(state)\n\n self.memory.append(preprocessed_state, action, reward, is_terminal)\n\n if self.num_steps > self.num_burn_in:\n if self.mode != 'train':\n print('Finish Burn-in, Start Training!')\n\n self.mode = 'train'\n if self.num_steps % self.train_freq == 0:\n self.update_predict_network()\n num_updates += 1\n if num_updates % 10000 == 0:\n self.q_network.save_weights('%s/model_weights_%d.h5' % (self.save_path, num_updates // 10000))\n \n if is_terminal or (max_episode_length is not None and t > max_episode_length):\n break\n \n state = next_state\n #print ('episode %d ends, lasts for %d steps (total steps:%d), gets $d reward. (%d/%d updates.)' % (num_episodes, t, self.))", "def __generate_reward_function(self):\n K = -3\n self.reward = np.array([[10, 0, K],\n [0, 2, 0],\n [K, 0, 10]])", "def reset(self):\n\n # initialize gym env variables\n self.finish = False\n self.curr_step = -1\n self.curr_episode += 1\n\n # initialize target position\n self.target = np.random.uniform(-10.0,10.0,size=(2))\n\n # initialize sheep positions\n if self.fixed_reset:\n init_sheep_pose = np.array([75.0, 75.0])\n self.sheep_poses = (np.random.uniform(-50.0, 50.0, \n size=(self.num_sheep,2))) + init_sheep_pose[None,:]\n else:\n init_sheep_pose = np.random.uniform(-self.init_sheep_root, \n self.init_sheep_root, size=(2))\n self.sheep_poses = (np.random.uniform(-self.init_sheep_range, \n self.init_sheep_range, size=(self.num_sheep,2))) \\\n + init_sheep_pose[None,:]\n self.sheep_com = self.sheep_poses.mean(axis=0)\n\n # get the farthest sheep and radius of the sheep\n dist_to_com = np.linalg.norm((self.sheep_poses - self.sheep_com[None,:]), axis=1)\n self.farthest_sheep = self.sheep_poses[np.argmax(dist_to_com),:]\n self.radius_sheep = np.array([np.max(dist_to_com)])\n\n # update distance to target\n self.target_distance = np.linalg.norm(self.target - self.sheep_com)\n\n # initialize values for reward estimation\n self.init_radius_sheep = self.radius_sheep\n self.init_target_distance = self.target_distance\n\n # initialize dog position\n if self.fixed_reset:\n init_dog_pose = np.array([0.0,75.0])\n else:\n init_theta = np.random.uniform(-np.pi,np.pi)\n init_dog_pose = init_sheep_pose + self.init_dog_distance*np.array([np.cos(init_theta), \n np.sin(init_theta)])\n self.dog_pose = init_dog_pose\n\n # initialize inertia\n self.inertia = np.ones((self.num_sheep, 2))\n\n # initialize episode reward and length\n self.episode_reward = 0\n self.episode_length = 0\n\n # get the state, reward, finish, info\n state = self._get_state()\n \n return state", "def env_runner(env, policy, num_local_steps, summary_writer):\n last_state = env.reset()\n last_features = policy.get_initial_features()\n length = 0\n rewards = np.zeros(5)\n \n while True:\n terminal_end = False\n rollout = PartialRollout()\n\n for local_step in range(num_local_steps):\n features = policy.features(last_state, *last_features)\n state, reward, terminal, info = env.step(None)\n\n if len(np.shape(terminal)) > 0:\n reward = np.sum(reward, axis=0) / len(terminal)\n state = state[-1]\n terminal = terminal[-1]\n # total_reward = np.zeros_like(reward[0])\n\n # for i, t in enumerate(terminal[:-1]):\n # if t:\n # total_reward += reward[i]\n # else:\n # total_reward += policy.value(state[i], *features)\n\n # if terminal[-1]:\n # total_reward += reward[-1]\n # total_reward /= len(terminal)\n # else:\n # total_reward /= len(terminal[:-1])\n # likelihood *= 1. / len(terminal)\n\n # reward = total_reward\n # state = state[-1]\n # terminal = terminal[-1]\n\n # collect the experience\n # note that the deepcopies seem to be necessary\n rollout.add(\n copy.deepcopy(last_state), \n copy.deepcopy(reward), \n info['weight'], \n terminal, \n copy.deepcopy(last_features))\n \n length += 1\n rewards += reward\n\n last_state = state\n last_features = features\n\n if info:\n summary = tf.Summary()\n for k, v in info.items():\n summary.value.add(tag=k, simple_value=float(v))\n summary_writer.add_summary(summary, policy.global_step.eval())\n summary_writer.flush()\n\n timestep_limit = env.spec.tags.get(\n 'wrapper_config.TimeLimit.max_episode_steps')\n if terminal or length >= timestep_limit:\n terminal_end = True\n if length >= timestep_limit or not env.metadata.get('semantics.autoreset'):\n last_state = env.reset()\n last_features = policy.get_initial_features()\n print(\"Episode finished. Sum of rewards: {}. Length: {}\".format(rewards, length))\n length = 0\n rewards = 0\n break\n\n if not terminal_end:\n rollout.r = policy.value(last_state, *last_features)\n\n # once we have enough experience, yield it\n yield rollout", "def soft_reset(self):\n self._soft_reset()\n rewards = np.zeros((self.n_envs, self.n_players))\n return self.obs, rewards, self.agent_dones, self.info_dict", "def experiment(config):\n with tf.Session() as sess:\n\n seed = config.pop('seed')\n\n if seed:\n seed = int(seed)\n random.seed(seed)\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n env_id = config.pop('env_id')\n LOGGER.info('using {} env'.format(env_id))\n\n env = gym.make(env_id)\n\n global_rewards = []\n global_step, episode = 0, 0\n\n config['env'] = env\n config['env_repr'] = repr(env)\n config['sess'] = sess\n\n render = int(config.pop('render'))\n\n agent = Agent(**config)\n\n rl_writer = tf.summary.FileWriter('./results/rl')\n save_args(config, 'results/args.txt')\n\n while global_step < config['total_steps']:\n episode += 1\n done = False\n rewards, actions = [], []\n observation = env.reset()\n\n while not done:\n global_step += 1\n\n # if episode % 1 == render:\n env.render()\n action = agent.act(observation)\n next_observation, reward, done, info = env.step(action)\n agent.remember(observation, action, reward, next_observation, done)\n train_info = agent.learn()\n\n rewards.append(reward)\n actions.append(action)\n observation = next_observation\n\n ep_rew = sum(rewards)\n global_rewards.append(ep_rew)\n avg_reward = sum(global_rewards[-100:]) / len(global_rewards[-100:])\n\n if episode % 10 == 0:\n log_str =' step {:.0f} ep {:.0f} reward {:.1f} avg {:.1f}'\n logging.info(log_str.format(global_step,\n episode,\n ep_rew,\n avg_reward))\n\n summary = tf.Summary(value=[tf.Summary.Value(tag='episode_reward',\n simple_value=ep_rew)])\n rl_writer.add_summary(summary, episode)\n avg_sum = tf.Summary(value=[tf.Summary.Value(tag='avg_last_100_ep',\n simple_value=avg_reward)])\n rl_writer.add_summary(avg_sum, episode)\n rl_writer.flush()\n \n return config", "def __init__(self,\n debug=False,\n urdf_version=None,\n control_time_step=0.005,\n action_repeat=5,\n control_latency=0,\n pd_latency=0,\n on_rack=False,\n motor_kp=1.0,\n motor_kd=0.02,\n render=False,\n num_steps_to_log=2000,\n env_randomizer=None,\n log_path=None,\n signal_type='ik',\n target_position=None,\n backwards=None,\n gait_type='trot',\n terrain_type='plane',\n terrain_id='plane',\n mark='base',\n ):\n self.phase = 0\n\n self._gait_type = gait_type \n # for observation space bounding \n self.max_speed = 1.0\n self.min_speed = 0.5 # change back to 0.2 for OLD TD3 model evaluation\n \n self.min_side_speed = 0.0\n self.max_side_speed = 0.0\n\n self.speed = np.random.uniform(self.min_speed, self.max_speed)\n self.side_speed = np.random.uniform(self.min_side_speed, self.max_side_speed)\n self.speed_des = [self.speed, self.side_speed]\n\n # Initialization variables for periodic reward sum composition\n self.theta_FL = phase_constants.PHASE_VALS[self._gait_type]['front_left']\n self.theta_FR = phase_constants.PHASE_VALS[self._gait_type]['front_right']\n self.theta_RL = phase_constants.PHASE_VALS[self._gait_type]['rear_left']\n self.theta_RR = phase_constants.PHASE_VALS[self._gait_type]['rear_right']\n\n self.min_swing_ratio = 0.6\n self.max_swing_ratio = 0.8\n self.ratio = np.random.uniform(self.min_swing_ratio, self.max_swing_ratio)\n\n super(rexPeriodicRewardEnv,\n self).__init__(urdf_version=urdf_version,\n accurate_motor_model_enabled=True,\n motor_overheat_protection=False,\n motor_kp=motor_kp,\n motor_kd=motor_kd,\n remove_default_joint_damping=False,\n control_latency=control_latency,\n pd_latency=pd_latency,\n on_rack=on_rack,\n render=render,\n num_steps_to_log=num_steps_to_log,\n env_randomizer=env_randomizer,\n log_path=log_path,\n control_time_step=control_time_step,\n action_repeat=action_repeat,\n target_position=target_position,\n signal_type=signal_type,\n backwards=backwards,\n debug=debug,\n terrain_id=terrain_id,\n terrain_type=terrain_type,\n mark=mark,\n ratio=self.ratio,\n forward_reward_cap=5\n )\n\n self.height_des = 0.206 # this is init standing height for rex\n\n self.cycle_complete = 0\n self.cycle_len = 1000 # this is L\n \n # vonmises variables\n self.kappa = phase_constants.VON_MISES_KAPPA\n\n rex_joints = p.getNumJoints(bodyUniqueId=self.rex.quadruped)\n link_name_to_ID = {}\n for i in range(rex_joints):\n name = p.getJointInfo(self.rex.quadruped, i)[12].decode('UTF-8')\n link_name_to_ID[name] = i\n\n self.link_name_to_ID = link_name_to_ID\n self.toe_pos_last = { 'front_left_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['front_left_toe_link'])[0],\n 'front_right_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['front_right_toe_link'])[0],\n 'rear_left_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['rear_left_toe_link'])[0],\n 'rear_right_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['rear_right_toe_link'])[0]\n\n } \n\n print('Using Periodic Reward Composition Rex Environment')", "def reward(self, value):\n self._custom_setter('reward', value)", "def eval_policy_on_env(self, eval_gym_env, eval_episodes=10, seed=None):\n if not seed:\n eval_gym_env.seed(seed)\n else:\n eval_gym_env.seed(int(time.time()))\n avg_reward = 0.\n for i in range(eval_episodes):\n state, done = eval_gym_env.reset(), False\n obs = self.observer(state)\n step = 0\n #while not done and step < max_steps:\n while not done:\n action = self.plan(np.array(obs))\n state, reward, done, _ = eval_gym_env.step(action)\n obs = self.observer(state)\n avg_reward += reward\n step += 1\n\n avg_reward /= eval_episodes\n return avg_reward", "def q_learning(env, learning, discount, epsilon, min_eps, episodes):\n # [18.00000072 14.00000006]\n num_states = (env.observation_space.high - env.observation_space.low) * \\\n np.array([10, 100]) # >> [18.00000072 14.00000006]\n num_states = np.round(num_states, 0).astype(int) + 1 # >> [19 15]\n\n # Initialize Q table\n # env.action_space.n return the number of action that our agent can make (here 3, left, cease, right)\n Q = np.random.uniform(low=-1, high=1, size=(num_states[0], num_states[1], env.action_space.n))\n\n # Initialize variable to track rewards\n reward_list = []\n ave_reward_list = []\n\n # Calculate episodic reduction in epsilon\n reduction = (epsilon - min_eps) / (episodes / 2)\n\n for i in range(episodes):\n # Initialize parameters\n done = False\n tot_reward, reward = 0, 0\n state = env.reset()\n\n # Discretize state\n state_adj = adjust_state(state)\n\n while done != True:\n # Render env for last five eps\n if i >= (episodes - 20):\n env.render()\n\n # Determine next action - epsilon greedy strategy\n if np.random.random() < 1 - epsilon:\n action = np.argmax(Q[state_adj[0], state_adj[1]])\n else:\n action = np.random.randint(0, env.action_space.n)\n\n # Get next state and reward\n state2, reward, done, info = env.step(action)\n\n # Discretize state2\n state2_adj = adjust_state(state2)\n\n # Allow for terminal states // .5 on env_space[0] represent the flag position\n if done and state2[0] >= .5:\n Q[state_adj[0], state_adj[1], action] = reward\n\n # adjust Q value for current state\n else:\n '''work on this, it's complicated but far from non-understandable'''\n delta = learning*(reward + discount*np.max(Q[state2_adj[0], state2_adj[1]]) -\n Q[state_adj[0], state_adj[1], action])\n Q[state_adj[0], state_adj[1], action] += delta\n\n tot_reward += reward\n state_adj = state2_adj\n\n # Decay epsilon\n if epsilon > min_eps:\n epsilon -= reduction\n\n # Track rewards\n reward_list.append(tot_reward)\n\n if (i+1) % 100 == 0:\n ave_reward = np.mean(reward_list)\n ave_reward_list.append(ave_reward)\n reward_list = []\n print(f'Episode {i+1} Average Reward: {ave_reward}')\n\n env.close()\n\n return ave_reward_list", "def __init__(self,\n sess,\n num_actions,\n observation_shape=base_dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,\n gamma=0.99,\n update_horizon=1,\n min_replay_history=20000,\n update_period=4,\n target_update_period=8000,\n epsilon_fn=linearly_decaying_epsilon,\n epsilon_train=0.01,\n epsilon_eval=0.001,\n epsilon_decay_period=250000,\n tf_device='/cpu:*',\n use_staging=True,\n max_tf_checkpoints_to_keep=3,\n optimizer=tf.train.RMSPropOptimizer(\n learning_rate=0.00025,\n decay=0.95,\n momentum=0.0,\n epsilon=0.00001,\n centered=True),\n summary_writer=None,\n summary_writing_frequency=500,\n clip_reward=False):\n self._clip_reward = clip_reward\n self.intrinsic_model = intrinsic_rewards.RNDIntrinsicReward(\n sess=sess,\n tf_device=tf_device,\n summary_writer=summary_writer)\n super(RNDDQNAgent, self).__init__(\n sess=sess,\n num_actions=num_actions,\n observation_shape=observation_shape,\n gamma=gamma,\n update_horizon=update_horizon,\n min_replay_history=min_replay_history,\n update_period=update_period,\n target_update_period=target_update_period,\n epsilon_fn=epsilon_fn,\n epsilon_train=epsilon_train,\n epsilon_eval=epsilon_eval,\n epsilon_decay_period=epsilon_decay_period,\n tf_device=tf_device,\n use_staging=use_staging,\n optimizer=optimizer,\n summary_writer=summary_writer,\n summary_writing_frequency=summary_writing_frequency)", "def evaluate(env, model, num_env, iter_step):\n episode_rewards = []\n episode_reward = np.zeros((num_env))\n obs = env.reset()\n for _ in tqdm(range(iter_step)):\n action, _states = model.predict(obs)\n obs, reward, done, info = env.step(action)\n episode_reward += reward\n for i in range(num_env):\n if done[i]:\n episode_rewards.append(episode_reward[i])\n episode_reward[i] = 0\n return episode_rewards", "def reward_spec(self):\n task_reward_spec = self._task.get_reward_spec()\n if task_reward_spec is not None:\n return task_reward_spec\n else:\n return super(Environment, self).reward_spec()", "def evaluate(agent, env, n_games=1):\n # env.render()\n game_rewards = []\n for _ in range(n_games):\n states = env.reset()\n\n total_reward = 0\n i = 0\n while True:\n i += 1\n actions = agent.sample_actions(agent.step(states))\n states, rewards, dones, infos = env.step(actions)\n total_reward += sum(rewards)\n if dones[0]:\n break\n\n # We rescale the reward back to ensure compatibility\n # with other evaluations.\n game_rewards.append(total_reward / env.num_envs)\n # env.render('disable')\n return game_rewards", "def test_noop_composite_reward(self):\n base_env = _DiscreteEnvironmentOneReward(\n action_dtype=np.int64,\n reward_spec=specs.Array(dtype=np.float32, shape=(2, 1)))\n wrapped_env = wrappers.DelayedRewardWrapper(base_env, accumulation_period=1)\n base_episode_reward = _episode_reward(base_env)\n wrapped_episode_reward = _episode_reward(wrapped_env)\n self.assertTrue(\n _compare_nested_sequences(base_episode_reward, wrapped_episode_reward))", "def phase(self, phase, **kwargs):\n ## Rewards\n if phase == 'set_default_behavior':\n rewards = {\n 'swim': +0.1, # reward given for anything else\n 'skin': +0.5, # reward given when breathing through the skin\n 'pneumostome': +1. # reward given when breathing through the pneumostome\n #'miss': # to force some type of trial duration limit if implementing trials in this task ?\n }\n elif phase == 'training_session':\n rewards = {\n 'swim': +0.1,\n 'skin': +1.,\n 'pneumostome': -1.\n }\n elif phase == 'testing_session':\n rewards = {\n 'swim': 0.,\n 'skin': 0.,\n 'pneumostome': 0.\n }\n return rewards", "def q_human(episodes = 1000, fill_table = False):\n path = os.getcwd()\n grid = AdvGridworld()\n os.chdir(path)\n print(grid.name + ' with Q-Learning Human Agent')\n gamma = 0.95\n learning_rate = 0.01\n agent = QLearning(8, gamma, learning_rate)\n\n liRewards = []\n\n for i in range(episodes):\n grid.reset()\n state = grid.state\n width = grid.getBoardDim()[0]\n top_right_unex = True\n bottom_left_unex = True\n while not grid.isEnd:\n state_ind = state[1]*(width+1)+state[0]\n if fill_table:\n actions = [0, 1, 2, 3, 4, 5, 6, 7]\n action = np.random.choice(actions, 1)[0]\n else:\n action = agent.get_action(state_ind)\n next_state, reward, is_end = grid.step(action)\n next_state_ind = next_state[1]*(width+1)+next_state[0]\n #makes a 2 shape\n if (next_state_ind not in [0,1,2,3,10,17,24,23,22,21,28,35,42,43,44,45,46,47,48]):\n reward = -10\n\n agent.train(state_ind, action, reward, next_state_ind)\n state = next_state\n #print(i, grid.reward)\n liRewards.append(grid.reward)\n rewards = np.array(liRewards)\n print(\"Rewards Info for \", episodes, ' Episodes')\n print('Size', rewards.size)\n print('Mean', np.mean(rewards, axis=0))\n print('Stdev', np.std(rewards, axis=0))\n print('Min', np.min(rewards))\n print('Max', np.max(rewards))\n\n return rewards, agent", "def rl_alone(episodes = 1000, fill_table = False):\n path = os.getcwd()\n grid = AdvGridworld()\n os.chdir(path)\n print(grid.name + ' with Q-Learning Agent')\n gamma = 0.95\n learning_rate = 0.01\n agent = QLearning(8, gamma, learning_rate)\n\n liRewards = []\n\n #action_index = {\"up\":0,\"right\":1,\"down\":2,\"left\":3,\"upri\":4,\"dori\":5,\"dole\":6,\"uple\":7}\n\n for i in range(episodes):\n grid.reset()\n state = grid.state\n width = grid.getBoardDim()[0]\n while not grid.isEnd:\n state_ind = state[1]*(width+1)+state[0]\n #If we want a filled Q-tables, we can't let the agent be optimal\n #or else we wouldn't know the q-values for never reached state-action pairs\n if fill_table:\n actions = [0, 1, 2, 3, 4, 5, 6, 7]\n action = np.random.choice(actions, 1)[0]\n else:\n action = agent.get_action(state_ind)\n next_state, reward, is_end = grid.step(action)\n next_state_ind = next_state[1]*(width+1)+next_state[0]\n agent.train(state_ind, action, reward, next_state_ind)\n state = next_state\n\n liRewards.append(grid.reward)\n rewards = np.array(liRewards)\n print(\"Rewards Info for \", episodes, ' Episodes')\n print('Size', rewards.size)\n print('Mean', np.mean(rewards, axis=0))\n print('Stdev', np.std(rewards, axis=0))\n print('Min', np.min(rewards))\n print('Max', np.max(rewards))\n\n return rewards, agent", "def fit(self, env, num_iteration, do_train=False):\n\n #s, a, r, new_s, d = get_multi_step_sample(one_step_memory, self.gamma, self.num_step)\n #self.replay_memory.append((s, a, r, new_s, d))\n # epsilon update\n num_env = env.num_process\n env.reset()\n\n for t in range(0, num_iteration, num_env):\n self.global_step += 1\n #print(\"Global_step: {}\".format(self.global_step))\n old_state, action, reward, new_state, is_terminal = self.get_multi_step_sample(env)\n self.replay_memory.append(old_state, action, reward, new_state, is_terminal)\n\n \"\"\"\n Epsilon update\n epsilon begin 1.0, end up 0.1\n FIX\n \"\"\"\n\n self.epsilon = self.epsilon+ num_env*self.epsilon_increment if self.epsilon > EPSILON_END else EPSILON_END\n num_update = sum([1 if i%self.update_freq == 0 else 0 for i in range(t, t+num_env)])\n if do_train:\n for _ in range(num_update):\n\n if self.per == 1:\n (old_state_list, action_list, reward_list, new_state_list, is_terminal_list), \\\n idx_list, p_list, sum_p, count = self.replay_memory.sample(self.batch_size)\n else:\n old_state_list, action_list, reward_list, new_state_list, is_terminal_list \\\n = self.replay_memory.sample(self.batch_size)\n\n feed_dict = {self.target_s: new_state_list.astype(np.float32)/255. ,\n self.s : old_state_list.astype(np.float32)/255.,\n self.a_ph: list(enumerate(action_list)),\n self.r_ph: np.array(reward_list).astype(np.float32),\n self.d_ph: np.array(is_terminal_list).astype(np.float32),\n }\n\n if self.double:\n action_chosen_by_online = self.sess.run(self.a,\n feed_dict={\n self.s: new_state_list.astype(np.float32)/255.})\n feed_dict[self.a_for_new_state_ph] = list(enumerate(action_chosen_by_online))\n\n if self.per == 1:\n # Annealing weight beta\n feed_dict[self.loss_weight_ph] = (np.array(p_list) * count / sum_p) ** (-self.beta)\n error, _ = self.sess.run([self.error_op, self.train_op], feed_dict=feed_dict)\n self.replay_memory.update(idx_list, error)\n\n else:\n self.sess.run(self.train_op, feed_dict=feed_dict)\n\n self.update_time += 1\n\n if self.beta < BETA_END:\n self.beta += self.beta_increment\n\n if (self.update_time)%self.target_update_freq == 0 :\n #print(\"Step: {} \".format(self.update_time) + \"target_network update\")\n self.sess.run([self.target_update])\n #print(\"Step: {} \".format(self.update_freq) + \"Network save\")\n self.save_model()", "def test_reward_valid(env_name, reward_type):\n venv = util.make_vec_env(env_name, n_envs=1, parallel=False)\n TRAJECTORY_LEN = 10\n obs = _sample(venv.observation_space, TRAJECTORY_LEN)\n actions = _sample(venv.action_space, TRAJECTORY_LEN)\n next_obs = _sample(venv.observation_space, TRAJECTORY_LEN)\n steps = np.arange(0, TRAJECTORY_LEN)\n\n with serialize.load_reward(reward_type, \"foobar\", venv) as reward_fn:\n pred_reward = reward_fn(obs, actions, next_obs, steps)\n\n assert pred_reward.shape == (TRAJECTORY_LEN,)\n assert isinstance(pred_reward[0], numbers.Number)", "def gae(done, rewards, values, n_envs, steps_per_env, gamma, gae_lambda, device):\n advantages = torch.zeros((n_envs, steps_per_env, 1), dtype=torch.float, device=device)\n last_advantage = 0\n for state in reversed(range(steps_per_env)):\n error = rewards[:, state] + gamma * values[:, state + 1] * (~done[:, state]) - values[:, state]\n last_advantage = (error + gamma * gae_lambda * last_advantage * (~done[:, state]))\n\n advantages[:, state] = last_advantage\n\n return advantages", "def set_reward(self, reward):\n self.reward = reward", "def reinforce(env, estimator_policy, estimator_value, num_episodes, discount_factor=1.0):\n\n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes)) \n \n Transition = collections.namedtuple(\"Transition\", [\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n \n for i_episode in range(num_episodes):\n # Reset the environment and pick the fisrst action\n state = env.reset()\n \n episode = []\n \n # One step in the environment\n for t in itertools.count():\n \n # Take a step\n #action_means = np.ndarray.flatten(estimator_policy.predict(state))\n #action = np.random.multivariate_normal(mean=action_means, cov=full_var)\n\n action = estimator_policy.predict(state)\n\n '''\n max_idx = np.argmax(np.abs(action))\n a_max = action[max_idx]\n\n if a_max > high_threshold or a_max < low_threshold: \n action_clipped = action / (10*np.abs(a_max))\n '''\n\n\n '''\n action_clipped = [np.max([np.min([action[0], high_threshold]), low_threshold]), \n np.max([np.min([action[1], high_threshold]),\n low_threshold])]\n '''\n\n next_state, reward, done, _ = env.step(action)\n\n '''\n if t > 50:\n done = True\n '''\n \n # Keep track of the transition\n episode.append(Transition(\n state=state, action=action, reward=reward, next_state=next_state, done=done))\n \n # Update statistics \n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t\n \n '''\n # Print out which step we're on, useful for debugging.\n print(\"\\rStep {} @ Episode {}/{} ({})\".format(\n t, i_episode + 1, num_episodes, stats.episode_rewards[i_episode - 1]), end=\"\")\n # sys.stdout.flush()\n '''\n\n if done:\n break\n \n state = next_state\n \n monitor_epoch = 50\n if i_episode % monitor_epoch == 0 and i_episode > 0: \n print(\"avg reward : %f\" %(np.mean(stats.episode_rewards[i_episode-monitor_epoch:i_episode])))\n\n\n baseline_value = np.mean([cur_trans.reward for cur_trans in episode]) \n\n # Go through the episode and make policy updates\n for t, transition in enumerate(episode):\n # The return after this timestep\n total_return = sum(discount_factor**i * t.reward for i, t in enumerate(episode[t:]))\n \n advantage = total_return - baseline_value\n #advantage += np.max([0, baseline_value - v_prev])\n\n # Update our policy estimator\n estimator_policy.update(transition.state, advantage, transition.action)\n\n #print(p_action)\n\n if i_episode % 200 == 0 and i_episode > 0:\n plt.figure(1)\n plt.plot(transition.state[0], transition.state[1], 'bo')\n if t == len(episode) - 1:\n plt.show()\n \n \n return stats", "def train(self, env):\n\n\t\tmin_average_reward_for_stopping = 195\n\t\tconsecutive_successful_episodes_to_stop = 10\n\t\tlast_10_rewards = deque(maxlen=consecutive_successful_episodes_to_stop)\n\n\t\tnum_Episodes = []\n\t\tEpisode_Rewards = []\n\n\t\tfor episode in range(self.episodes):\n\t\t\tstate = env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\t\t\tdone = False\n\t\t\ttotal_reward = 0\n\n\t\t\twhile not done:\n\t\t\t\taction = self.act(state)\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\t\t\t\tnext_state = np.reshape(next_state, [1, self.state_size])\n\t\t\t\tself.remember(state, action, reward, next_state, done)\n\t\t\t\tstate = next_state\n\t\t\t\ttotal_reward += reward\n\n\t\t\tnum_Episodes.append(episode)\n\t\t\tEpisode_Rewards.append(total_reward)\n\t\t\tlast_10_rewards.append(total_reward)\n\t\t\tlast_10_avg_reward = np.mean(last_10_rewards)\n\t\t\tprint(episode, last_10_avg_reward)\n\n\t\t\t# call experience relay\n\t\t\tif len(self.memory) >= self.batch_size:\n\t\t\t\tself.replay(self.batch_size)\n\t\t\t# Stopping criteria\n\t\t\tif len(\n\t\t\t\t\tlast_10_rewards) == consecutive_successful_episodes_to_stop \\\n\t\t\t\t\tand last_10_avg_reward > min_average_reward_for_stopping:\n\t\t\t\tprint(\"Solved after {} epsiodes\".format(episode))\n\t\t\t\tbreak", "def __init__(self, env):\n\n self.env = env\n self.env_info = env.env_info\n\n self.best_policy = None\n self.best_score = 0\n\n # Time to run the environment for during training and evaluation\n self.max_t = MAX_T\n # Maximum possible reward given the environment\n if (self.env.max_reward_per_timestep is None\n and self.env.max_reward_per_episode is None):\n raise ValueError(\"Either max_reward_per_timestep or \"\n \"max_reward_per_episode needs to be set.\")\n elif (self.env.max_reward_per_timestep is None\n and self.env.max_reward_per_episode is None):\n raise ValueError(\"Either max_reward_per_timestep or \"\n \"max_reward_per_episode needs to be None.\")\n elif self.env.max_reward_per_timestep is not None:\n self.max_reward = self.env.max_reward_per_timestep * self.max_t\n else:\n self.max_reward = self.env.max_reward_per_episode", "def __init__(self, \n ns: str, \n reward_fnc: str, \n is_action_space_discrete, \n safe_dist: float = None, \n goal_radius: float = 0.1, \n max_steps_per_episode=100, \n train_mode: bool = True, \n debug: bool = False,\n task_mode: str = \"staged\",\n PATHS: dict = dict(),\n extended_eval: bool = False,\n *args, **kwargs):\n super(FlatlandEnv, self).__init__()\n\n self.ns = ns\n try:\n # given every environment enough time to initialize, if we dont put sleep,\n # the training script may crash.\n ns_int = int(ns.split(\"_\")[1])\n time.sleep(ns_int*2)\n except Exception:\n rospy.logwarn(f\"Can't not determinate the number of the environment, training script may crash!\")\n pass\n\n\n # process specific namespace in ros system\n self.ns_prefix = '' if (ns == '' or ns is None) else '/'+ns+'/'\n \n if not debug:\n if train_mode:\n rospy.init_node(f'train_env_{self.ns}', disable_signals=False)\n else:\n rospy.init_node(f'eval_env_{self.ns}', disable_signals=False)\n\n self._extended_eval = extended_eval\n self._is_train_mode = rospy.get_param(\"/train_mode\")\n self._is_action_space_discrete = is_action_space_discrete\n \n self.setup_by_configuration(PATHS['robot_setting'], PATHS['robot_as'])\n\n # set rosparam\n rospy.set_param(\"/laser_num_beams\", self._laser_num_beams)\n \n # observation collector\n self.observation_collector = ObservationCollector(\n self.ns, self._laser_num_beams, self._laser_max_range)\n self.observation_space = self.observation_collector.get_observation_space()\n\n # reward calculator\n if safe_dist is None:\n safe_dist = 1.6*self._robot_radius\n\n self.reward_calculator = RewardCalculator(\n robot_radius=self._robot_radius, safe_dist=1.6*self._robot_radius, goal_radius=goal_radius, \n rule=reward_fnc, extended_eval=self._extended_eval)\n\n # action agent publisher\n if self._is_train_mode:\n self.agent_action_pub = rospy.Publisher(f'{self.ns_prefix}cmd_vel', Twist, queue_size=1)\n else:\n self.agent_action_pub = rospy.Publisher(f'{self.ns_prefix}cmd_vel_pub', Twist, queue_size=1)\n\n # service clients\n if self._is_train_mode:\n self._service_name_step = f'{self.ns_prefix}step_world'\n self._sim_step_client = rospy.ServiceProxy(\n self._service_name_step, StepWorld)\n \n # instantiate task manager\n self.task = get_predefined_task(\n ns, mode=task_mode, start_stage=kwargs['curr_stage'], PATHS=PATHS)\n\n self._steps_curr_episode = 0\n self._max_steps_per_episode = max_steps_per_episode\n\n # for extended eval\n self._action_frequency = 1/rospy.get_param(\"/robot_action_rate\")\n self._last_robot_pose = None\n self._distance_travelled = 0\n self._safe_dist_counter = 0\n self._collisions = 0\n self._in_crash = False", "def __init__(self, limited_scenarios: bool = False, increment_actions: bool = False, bizarre_states: bool = False):\n super(WatershedEnv, self).__init__()\n\n self.relevant_q1 = limited_Q1 if limited_scenarios else all_Q1\n self.relevant_q2 = limited_Q2 if limited_scenarios else all_Q2\n self.relevant_s = limited_S if limited_scenarios else all_S\n self.number_of_scenarios = len(self.relevant_q1)\n\n self.bizarre_states = bizarre_states\n self.increment_actions = increment_actions\n\n # Internal details\n self.step_number = 0\n self.step_number_one_hot = None\n self.total_number_of_episodes = 100 if increment_actions else 50\n\n # Fitness\n self.previous_fitness = None\n self.fitness = 0\n\n # Objective function\n self.objective_scores = [0.0] * 6\n self.total_objective_score = 0\n\n # Violations\n self.constraint_values = [0.0] * 9\n self.violations = 0\n self.violation_penalty = 0\n self.num_violations = 0\n self.violations_multiplier = 100\n\n self.total_violations_sum = 0\n\n # Objective function coefficients\n self.a = [-0.20, -0.06, -0.29, -0.13, -0.056, -0.15]\n self.b = [6.0, 2.5, 6.28, 6.0, 3.74, 7.6]\n self.c = [-5.0, 0.0, -3.0, -6.0, -23.0, -15.0]\n\n # Used in verifying boundaries and computing constraints\n self.alpha = [12.0, 10.0, 8.0, 6.0, 15.0, 10.0]\n\n self.setup_environment_parameters()\n\n # Variables to be optimized\n self.x = np.zeros(6) # To make pylint happy and get better auto-completion\n self.reinitialise_state()", "def reset(self, **kwargs):\n\n # on a reset we set the health back to 120\n self.player_hp = 120\n self.enemy_hp = 120\n\n # reset the environment\n \n observation = self.env.reset(**kwargs)\n\n # we restarted inc the number\n self.num_resets += 1\n\n # the observation\n obs = self.observation(observation)\n self.current_frame_number = 0\n \n # fill up the queue\n for i in range(4):\n self.q.append(obs)\n \n return np.array(list(self.q))", "def __init__(self):\n self.last_reward_pos = 0\n super().__init__()\n self.TERRAIN_VARIANCE = 0.0\n self.stump_spacing = 4.0\n self.stump_height = 1.0\n self.my_init({'leg_length': 35, 'walker_type': 'default'})", "def reward(self, history_id, reward):\n pass", "def set_reward(self, rewards, trader):\n\n NAV_chg = float(trader.acc.nav - trader.acc.prev_nav)\n\n # maximize NAV\n #rewards[trader.ID] = NAV_chg\n\n # maximize NAV, minimize num of trades (more trades gets penalized).\n if NAV_chg >= 0:\n rewards[trader.ID] = NAV_chg / (trader.acc.num_trades + 1)\n else:\n rewards[trader.ID] = NAV_chg * (trader.acc.num_trades + 1)\n\n trader.acc.reward = rewards[trader.ID]\n\n return rewards", "def init_environment(env_name):\n env = gym.make(env_name)\n discrete = False\n if type(env.action_space) is gym.spaces.Discrete:\n discrete = True\n else:\n env = NormalizedActions(env)\n return env, discrete", "def get_reward(self, observations, actions):\n\n #initialize and reshape as needed, for batch mode\n self.reward_dict = {}\n if(len(observations.shape)==1):\n observations = np.expand_dims(observations, axis = 0)\n actions = np.expand_dims(actions, axis = 0)\n batch_mode = False\n else:\n batch_mode = True\n\n #get vars\n xvel = observations[:, 9].copy()\n body_angle = observations[:, 2].copy()\n front_leg = observations[:, 6].copy()\n front_shin = observations[:, 7].copy()\n front_foot = observations[:, 8].copy()\n zeros = np.zeros((observations.shape[0],)).copy()\n\n # ranges\n leg_range = 0.2\n shin_range = 0\n foot_range = 0\n penalty_factor = 10\n\n #calc rew\n self.reward_dict['run'] = xvel\n\n front_leg_rew = zeros.copy()\n front_leg_rew[front_leg>leg_range] = -penalty_factor\n self.reward_dict['leg'] = front_leg_rew\n\n front_shin_rew = zeros.copy()\n front_shin_rew[front_shin>shin_range] = -penalty_factor\n self.reward_dict['shin'] = front_shin_rew\n\n front_foot_rew = zeros.copy()\n front_foot_rew[front_foot>foot_range] = -penalty_factor\n self.reward_dict['foot'] = front_foot_rew\n\n # total reward\n self.reward_dict['r_total'] = self.reward_dict['run'] + self.reward_dict['leg'] + self.reward_dict['shin'] + self.reward_dict['foot']\n\n #return\n dones = zeros.copy()\n if(not batch_mode):\n return self.reward_dict['r_total'][0], dones[0]\n return self.reward_dict['r_total'], dones", "def update_policy(self, minibatch_size):\n \n steps = self.rewards.shape[0]\n batch_size = self.rewards.shape[0] * self.rewards.shape[1]\n #steps = 500\n #batch_size = 500\n #print(steps)\n #print(batch_size)\n \n # Compute advantages\n '''\n with torch.no_grad():\n if self.gae:\n advantages = torch.zeros_like(self.rewards).to(self.training_device)\n lastgaelam = 0\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n nextvalues = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t + 1]\n nextvalues = self.state_values[t + 1]\n delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]\n advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + self.state_values\n else:\n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n ''' \n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n \n\n # flatten the batch\n #b_obs = self.states.reshape((-1,) + self.state_space)\n #print(self.states.shape)\n b_obs = self.states.reshape((-1,4)).detach()\n b_logprobs = self.action_probs.reshape(-1,1).detach()\n b_actions = self.actions.reshape((-1,)).detach()\n b_advantages = advantages.reshape(-1,1)\n b_returns = returns.reshape(-1,1)\n b_values = self.state_values.reshape(-1,1)\n \n # Optimize policy and value network for K epochs, run optimization in minibatches\n \n inds = np.arange(batch_size)\n for i_epoch_pi in range(self.epochs):\n np.random.shuffle(inds)\n for start in range(0, batch_size, minibatch_size):\n end = start + minibatch_size\n minibatch_ind = inds[start:end]\n mb_advantages = b_advantages[minibatch_ind]\n if self.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)\n \n #_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])\n newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])\n #ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()\n ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))\n \n # Stats\n approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n entropy_loss = entropy.mean()\n\n # Value loss\n _, new_values = self.policy.forward(b_obs[minibatch_ind])\n if self.clip_vloss:\n \n v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])\n #v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)\n v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],\n -self.clip_epsilon, self.clip_epsilon)\n #v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2\n v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n #v_loss = 0.5 * v_loss_max.mean()\n v_loss = 0.5 * v_loss_max\n else:\n #v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()\n v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])\n\n loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Copy new weights into old policy:\n self.old_policy.load_state_dict(self.policy.state_dict())", "def reset(self):\n # reset rewards\n self.reward = torch.zeros(self.batch_size, self.game_num_agents)\n\n self.has_been = torch.zeros(self.batch_size, self.nsteps, self.game_num_agents)\n\n self.terminal = torch.zeros(self.batch_size)\n\n self.step_counter = 1\n\n self.active", "def stored_reset(self):\r\n\t\tself.stored_reward = np.zeros((self.num_timesteps - self.first_considered_reward_step,))\r\n\t\tself.stored_optimum = np.zeros_like(self.stored_reward)", "def make_env(num_steps, stack=True, scale_rew=True):\n env = retro.make(game='SonicTheHedgehog-Genesis', state='GreenHillZone.Act1')\n env = gym.wrappers.TimeLimit(env, max_episode_steps=num_steps)\n env = SonicDiscretizer(env)\n env = AllowBacktracking(env)\n if scale_rew:\n env = RewardScaler(env, REWARD_RATE)\n env = WarpFrame(env)\n if stack:\n env = FrameStack(env, NUM_STATES)\n return env", "def evaluate(self, env, num_episode, epsilon):\n num_environment = env.num_process\n env.reset()\n reward_of_each_environment = np.zeros(num_environment)\n rewards_list = []\n\n num_finished_episode = 0\n\n while num_finished_episode < num_episode:\n old_state, action, reward, new_state, is_terminal = env.get_state()\n action = self.get_action(new_state, epsilon)\n env.take_action(action)\n for i, r, is_t in zip(range(num_environment), reward, is_terminal):\n if not is_t:\n reward_of_each_environment[i] += r\n else:\n rewards_list.append(reward_of_each_environment[i])\n reward_of_each_environment[i] = 0\n num_finished_episode += 1\n return np.mean(rewards_list), np.std(rewards_list), self.epsilon", "def interact(self, n_steps=100, verbose=False, add_last_observation=True):\n\n\n def env_step(i,action):\n \"\"\"environment reaction,\n :returns: observation, reward, is_alive, info\"\"\"\n\n if not self.just_ended[i]:\n new_observation, cur_reward,is_done,info = self.envs[i].step(action)\n if is_done:\n # game ends now, will finalize on next tick\n self.just_ended[i] = True\n new_observation = self.preprocess_observation(new_observation)\n\n #note: is_alive=True in any case because environment is still alive (last tick alive) in our notation\n return new_observation, cur_reward,True,info\n\n\n else:\n assert self.just_ended[i] == True\n\n # reset environment, get new observation to be used on next tick\n new_observation = self.preprocess_observation(self.envs[i].reset())\n\n #reset memory for new episode\n for m_i in range(len(new_memory_states)):\n new_memory_states[m_i][i] = 0\n\n if verbose:\n print(\"env %i reloaded\" % i)\n\n self.just_ended[i] = False\n\n return new_observation,0,False,{'end':True}\n\n\n history_log = []\n\n for i in range(n_steps - int(add_last_observation)):\n res = self.agent_step(self.prev_observations, *self.prev_memory_states)\n actions, new_memory_states = res[0],res[1:]\n\n new_observations, cur_rewards, is_alive, infos = \\\n zip(*map(env_step,range(len(self.envs)),actions))\n\n\n # append data tuple for this tick. Is alive is always True\n history_log.append((self.prev_observations, actions, cur_rewards, new_memory_states, is_alive, infos))\n\n self.prev_observations = new_observations\n self.prev_memory_states = new_memory_states\n\n if add_last_observation:\n fake_actions = np.array([env.action_space.sample() for env in self.envs])\n fake_rewards = np.zeros(shape=len(self.envs))\n is_fake_alive = np.ones(shape=len(self.envs))\n history_log.append((self.prev_observations,fake_actions,fake_rewards,self.prev_memory_states,\n is_fake_alive,[None]*len(self.envs)))\n\n # cast to numpy arrays\n observation_log, action_log, reward_log, memories_log, is_alive_log, info_log = zip(*history_log)\n\n # tensor dimensions\n # [batch_i, time_i, observation_size...]\n observation_log = np.array(observation_log).swapaxes(0, 1)\n\n # [batch, time, units] for each memory tensor\n memories_log = map(lambda mem: np.array(mem).swapaxes(0, 1), zip(*memories_log))\n\n # [batch_i,time_i]\n action_log = np.array(action_log).swapaxes(0, 1)\n\n # [batch_i, time_i]\n reward_log = np.array(reward_log).swapaxes(0, 1)\n\n # [batch_i, time_i]\n is_alive_log = np.array(is_alive_log).swapaxes(0, 1).astype('uint8')\n\n\n return observation_log, action_log, reward_log, memories_log, is_alive_log, info_log", "def __init__(\n self,\n *,\n demonstrations: base.AnyTransitions,\n demo_batch_size: int,\n venv: vec_env.VecEnv,\n gen_algo: base_class.BaseAlgorithm,\n reward_net: reward_nets.RewardNet,\n **kwargs,\n ):\n # Raw self._reward_net is discriminator logits\n reward_net = reward_net.to(gen_algo.device)\n # Process it to produce output suitable for RL training\n # Applies a -log(sigmoid(-logits)) to the logits (see class for explanation)\n self._processed_reward = RewardNetFromDiscriminatorLogit(reward_net)\n super().__init__(\n demonstrations=demonstrations,\n demo_batch_size=demo_batch_size,\n venv=venv,\n gen_algo=gen_algo,\n reward_net=reward_net,\n **kwargs,\n )", "def reset(self, evaluate=False):\n self.reset_env()\n for _ in range(20):\n self.__publish_cmd(0.0, 0.0)\n self.rate.sleep()\n self.__publish_cmd(1.0, 0.0)\n self.rate.sleep()\n self.prev_pos_index, _ = self.__find_closest_point(1088)\n self.prev_reward = 0.0\n \n \n # If this run isn't being evaluated, we need to add some randomness to the starting point or else the agents will not learn well\n # if not evaluate:\n # random_steps = np.random.randint(0, 20)\n # ref = self.ref_track\n # l = len(ref)\n # curr_pos_index, _ = self.__find_closest_point(self.prev_pos_index)\n # for _ in range(random_steps):\n # ranges = self.get_state()\n # max_range = np.max(ranges)\n # indices = np.where(ranges>=max_range)\n # target_index = np.mean(indices)\n # angle = ((2 * self.lidar_angle) / len(ranges)) * target_index - self.lidar_angle\n \n # # future_pos_index = (curr_pos_index + 20) % l\n # # goal_point = ref[future_pos_index]\n # # print(goal_point)\n # # curr_pos = copy.copy(self.pos) # [x, y, yaw, speed]\n # # dx = goal_point[0] - curr_pos[0]\n # # dy = goal_point[1] - curr_pos[1]\n # # yaw = curr_pos[2]\n # # xgv = (dx * np.cos(yaw)) + (dy * np.sin(yaw))\n # # ygv = (-dx * np.sin(yaw)) + (dy * np.cos(yaw))\n # # angle = -np.arctan2(ygv,xgv)\n # # print(angle)\n # self.step(angle)\n # curr_pos_index, _ = self.__find_closest_point(curr_pos_index)\n \n # self.prev_pos_index = copy.copy(curr_pos_index)\n\n \n # print(self.prev_pos_index)\n\n return", "def __init__(self, env):\n self.env = env\n # set up observation space\n high = np.inf\n low = -high\n\n obs_spec = env.observation_spec()\n\n space_spec = {}\n\n for k,v in obs_spec.items():\n space_spec[k]=spaces.Box(low=low,high=high, shape=v)\n\n\n self.observation_space = spaces.Dict(space_spec)\n\n # setup action space\n low, high = self.env.action_spec\n self.action_space = spaces.Box(low=low, high=high)\n\n self.reward_range = self.env.reward_range", "def reset_env(env, num_active_adv=0):\n if hasattr(env, 'domain_randomization'):\n env.domain_randomization = False\n if num_active_adv > 0:\n env.adversary_range = env.advs_per_strength * env.num_adv_strengths", "def _action_rewards(self, context) -> ActionRewards:\n pass", "def eval(self) -> None:\n\n config = self.config.clone()\n\n if len(self.config.VIDEO_OPTION) > 0:\n config.defrost()\n config.NUM_ENVIRONMENTS = 1\n config.freeze()\n\n logger.info(f\"env config: {config}\")\n with construct_envs(config, get_env_class(config.ENV_NAME)) as envs:\n observations = envs.reset()\n batch = batch_obs(observations, device=self.device)\n\n current_episode_reward = torch.zeros(\n envs.num_envs, 1, device=self.device\n )\n ppo_cfg = self.config.RL.PPO\n test_recurrent_hidden_states = torch.zeros(\n config.NUM_ENVIRONMENTS,\n self.actor_critic.net.num_recurrent_layers,\n ppo_cfg.hidden_size,\n device=self.device,\n )\n prev_actions = torch.zeros(\n config.NUM_ENVIRONMENTS,\n 1,\n device=self.device,\n dtype=torch.long,\n )\n not_done_masks = torch.zeros(\n config.NUM_ENVIRONMENTS,\n 1,\n device=self.device,\n dtype=torch.bool,\n )\n\n rgb_frames = [\n [] for _ in range(self.config.NUM_ENVIRONMENTS)\n ] # type: List[List[np.ndarray]]\n\n if len(config.VIDEO_OPTION) > 0:\n os.makedirs(config.VIDEO_DIR, exist_ok=True)\n\n self.actor_critic.eval()\n\n for _i in range(config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS):\n current_episodes = envs.current_episodes()\n\n with torch.no_grad():\n (\n _,\n actions,\n _,\n test_recurrent_hidden_states,\n ) = self.actor_critic.act(\n batch,\n test_recurrent_hidden_states,\n prev_actions,\n not_done_masks,\n deterministic=False,\n )\n\n prev_actions.copy_(actions)\n\n outputs = envs.step([a[0].item() for a in actions])\n\n observations, rewards, dones, infos = [\n list(x) for x in zip(*outputs)\n ]\n batch = batch_obs(observations, device=self.device)\n\n not_done_masks = torch.tensor(\n [[not done] for done in dones],\n dtype=torch.bool,\n device=\"cpu\",\n )\n\n rewards = torch.tensor(\n rewards, dtype=torch.float, device=self.device\n ).unsqueeze(1)\n\n current_episode_reward += rewards\n\n # episode ended\n if not not_done_masks[0].item():\n generate_video(\n video_option=self.config.VIDEO_OPTION,\n video_dir=self.config.VIDEO_DIR,\n images=rgb_frames[0],\n episode_id=current_episodes[0].episode_id,\n checkpoint_idx=0,\n metrics=self._extract_scalars_from_info(infos[0]),\n tb_writer=None,\n )\n\n print(\"Evaluation Finished.\")\n print(\"Success: {}\".format(infos[0][\"episode_success\"]))\n print(\n \"Reward: {}\".format(current_episode_reward[0].item())\n )\n print(\n \"Distance To Goal: {}\".format(\n infos[0][\"object_to_goal_distance\"]\n )\n )\n\n return\n\n # episode continues\n elif len(self.config.VIDEO_OPTION) > 0:\n frame = observations_to_image(observations[0], infos[0])\n rgb_frames[0].append(frame)\n\n not_done_masks = not_done_masks.to(device=self.device)", "def reward(self, **kwargs):\n # rewards = self.get_mos_from_aps()\n rewards = self.get_mos_from_localhost()\n self.log.debug(\"Collect MOS: {}\".format(rewards))\n\n # obtain the global reward\n avgs = []\n for i in rewards:\n m = np.nanmean(rewards[i])\n # trunc to bounds\n if m < 1:\n m = 1\n elif m > 5:\n m = 5\n avgs.append(m)\n self.log.debug(\"Avgs: {}\".format(avgs))\n if np.any(np.isnan(avgs)):\n # if found a nan, that means an error (e.g. a disconnected station)\n r = [np.nan for _ in rewards]\n else:\n avgs = np.array(avgs)\n C = self.DEFAULT_C if 'C' not in kwargs else kwargs['C']\n r = reward_hossfeld(avgs, C=C)\n self.log.info(\"Hossfeld reward: {} C: {}\".format(r, C))\n\n self.log.info(\"Rewards: {}\".format(r))\n return r", "def __init__(self, init_pose=None, init_velocities=None, \n init_angle_velocities=None, runtime=5., target_pos=None, reward_function=None, angle_penalty=10.):\n # Simulation\n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) \n self.action_repeat = 3\n\n self.state_size = self.action_repeat * 6\n self.action_low = 0\n self.action_high = 900\n self.action_size = 4\n self.angle_penalty = angle_penalty\n self.max_v_length = 0.\n\n # Goal\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.]) \n \n self.distance_to_target = self.vector_length(self.target_pos - self.sim.pose[:3])\n \n if( reward_function == 6 ):\n self.reward_function_str = 'Using Reward Function 6'\n self.reward_function = self.reward_function_6\n elif( reward_function == 5 ):\n self.reward_function_str = 'Using Reward Function 5'\n self.reward_function = self.reward_function_5\n elif( reward_function == 4 ):\n self.reward_function_str = 'Using Reward Function 4'\n self.reward_function = self.reward_function_4\n elif( reward_function == 3 ):\n self.reward_function_str = 'Using Reward Function 3'\n self.reward_function = self.reward_function_3\n elif( reward_function == 2 ):\n self.reward_function_str = 'Using Reward Function 2'\n self.reward_function = self.reward_function_2\n elif( reward_function == 1 ):\n self.reward_function_str = 'Using Reward Function 1'\n self.reward_function = self.reward_function_1\n else:\n self.reward_function_str = 'Using Deafault Reward Function'\n self.reward_function = self.default_reward", "def q_learning_simulation(env, N=1000):\n env.reset()\n agent = QLearningAgent(env, use_sklearn=True)\n total_rewards = np.empty(N)\n\n for n in tqdm(range(N)):\n # There are 3 different kinds of epsilon to try\n #eps = 1.0/np.sqrt(n+1)\n #eps = 0.3\n eps = 0.1*(0.97**n)\n total_reward = agent.play(epsilon=eps, env=env, gamma=0.99)\n total_rewards[n] = total_reward\n if (n + 1) % 100 == 0:\n telegram_send.send(messages=[\"episode: {}, total reward: {}\".format(n, total_reward)])\n\n avgReward = total_rewards[-100:].mean()\n totalsteps = -total_rewards.sum()\n telegram_send.send(messages=[\n \"Agent training complete! Please check your plots.\",\n \"Average reward for last 100 episodes: {}.\".format(avgReward),\n \"Total steps: {}\".format(totalsteps)\n ])\n\n # Sanity check on training\n plt.plot(total_rewards)\n plt.title(\"Rewards\")\n plt.show()\n plot_running_avg(total_rewards)\n\n # Test if the agent is properly trained\n agent.play(epsilon=0, env=env, gamma=0.99, render=True)", "def run_one_step(self):\n # Get the current state, action and initialise the reward\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n # Check if the environment has reached a terminal state\n if self.env.check_terminal() is False:\n # Save the initial state and action to an 'experience'\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n # Update the environment using the chosne action\n self.env.update(action)\n # Get the reward to attribute to the agent and save to the experience to save\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n # Get the updated state\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n # If the new state isn't terminal, save the next action and the 'done' flag to the experience\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n # If the new state is terminal, save a dummy action and the 'done' flag to the experience\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n # Update the history with the latest experience\n self.agent.update_history(copy.copy(latest_experience))\n # Update the agents policy using a batch of experiences chosen from the history\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count += 1\n # Update the target network if appropriate\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.agent.policy.learner.update_target_network()\n else:\n # If the environment is in a terminal state, record this and perform a policy update\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count = 0\n return reward", "def evaluate(self, env, num_episodes, max_episode_length=None):\n self.mode = 'test'\n\n average_episode_length = 0\n rewards = []\n\n for i in range(num_episodes):\n state = env.reset()\n t = 0\n episode_reward = 0.0\n while True:\n t += 1\n action, _ = self.select_action(state)\n next_state, reward, is_terminal, debug_info = env.step(action)\n episode_reward += reward\n average_episode_length += 1\n\n if is_terminal or (max_episode_length is not None and t > max_episode_length):\n break\n\n state = next_state\n\n rewards.append(episode_reward)\n self.mode = 'train'\n return np.mean(rewards), np.std(rewards), average_episode_length / num_episodes", "def _random_warmup(self, num_steps):\n new_frame = self.env.reset()\n reward = 0.0\n action = 0\n done = False\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n for i in range(num_steps):\n \n action = np.random.randint(self.num_actions)\n new_frame, reward, done, _ = self.env.step(action)\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n if done:\n new_frame = self.env.reset()\n self.memory.add_experience(0, 0.0, new_frame, 1, False)\n\n self.memory.add_experience(0, 0.0, new_frame, 1, True)", "def test_prop_reward(self):\n tmax = 10.0\n dt = 1.0\n\n reward_scale = 5.0\n\n ini_rate = 80.0\n\n tutor = SimpleNeurons(1, out_fct=lambda _: ini_rate+20.0)\n reward = MockReward(lambda t: 1.0 if t < tmax/2 else -1)\n tutor_rule = ReinforcementTutorRule(tutor, reward, tau=0,\n constrain_rates=False, ini_rate=ini_rate, learning_rate=1.0,\n use_tutor_baseline=False)\n\n sim1 = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)\n sim1.run(tmax)\n\n drates1 = tutor_rule.rates - ini_rate\n\n tutor_rule.reset_rates()\n reward.reward_fct = lambda t: reward_scale if t < tmax/2 else -reward_scale\n\n sim2 = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)\n sim2.run(tmax)\n\n drates2 = tutor_rule.rates - ini_rate\n\n self.assertLess(np.max(np.abs(reward_scale*drates1 - drates2)), 1e-6)", "def step_env(self):\n raise NotImplementedError\n # Not needed for this homework", "def fit(self, env, env_eval, num_iterations, max_episode_length=None):\n train_counter = 0;\n eval_res_hist = np.zeros((1,3));\n\n time_this, ob_this, is_terminal = env.reset()\n\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n\n setpoint_this = ob_this[6:8]\n \n this_ep_length = 0;\n flag_print_1 = True;\n flag_print_2 = True;\n action_counter = 0;\n \n for step in range(num_iterations):\n #Check which stage is the agent at. If at the collecting stage,\n #then the actions will be random action.\n if step <= self._num_burn_in:\n if flag_print_1:\n logging.info (\"Collecting samples to fill the replay memory...\");\n flag_print_1 = False;\n\n action_mem = self.select_action(None, stage = 'collecting');\n action = self._policy.process_action(setpoint_this, action_mem)\n\n else:\n if flag_print_2:\n logging.info (\"Start training process...\");\n flag_print_2 = False;\n\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n \n state_this_net = np.append(obs_this_net[0:13], obs_this_net[14:]).reshape(1,16)\n\n action_mem = self.select_action(state_this_net, stage = 'training')\n # covert command to setpoint action \n action = self._policy.process_action(setpoint_this, action_mem) \n\n action_counter = action_counter + 1 if action_counter < 4 else 1;\n\n time_next, ob_next, is_terminal = env.step(action)\n ob_next = self._preprocessor.process_observation(time_next, ob_next)\n \n setpoint_next = ob_next[6:8]\n \n #check if exceed the max_episode_length\n if max_episode_length != None and \\\n this_ep_length >= max_episode_length:\n is_terminal = True;\n\n #save sample into memory \n self._memory.append(Sample(ob_this, action_mem, ob_next\n , is_terminal))\n\n \n #Check which stage is the agent at. If at the training stage,\n #then do the training\n if step > self._num_burn_in:\n #Check the train frequency\n if action_counter % self._train_freq == 0 \\\n and action_counter > 0:\n action_counter = 0;\n #Eval the model\n if train_counter % self._eval_freq == 0:\n eval_res = self.evaluate(env_eval, self._eval_epi_num\n , show_detail = True);\n eval_res_hist = np.append(eval_res_hist\n , np.array([step\n , eval_res[0], eval_res[1]]).reshape(1, 3)\n , axis = 0);\n np.savetxt(self._log_dir + '/eval_res_hist.csv'\n , eval_res_hist, delimiter = ',');\n logging.info ('Global Step: %d, '%(step), 'evaluation average \\\n reward is %0.04f, average episode length is %d.'\\\n %eval_res);\n \n \n #Sample from the replay memory\n samples = self._preprocessor.process_batch(\n self._memory.sample(self._batch_size), \n self._min_array, self._max_array);\n #Construct target values, one for each of the sample \n #in the minibatch\n samples_x = None;\n targets = None;\n for sample in samples:\n sample_s = np.append(sample.obs[0:13], sample.obs[14:]).reshape(1,16)\n sample_s_nex = np.append(sample.obs_nex[0:13], \n sample.obs_nex[14:]).reshape(1,16)\n sample_r = self._preprocessor.process_reward(sample.obs_nex[12:15])\n\n target = self.calc_q_values(sample_s);\n a_max = self.select_action(sample_s_nex, stage = 'greedy');\n \n \n\n if sample.is_terminal:\n target[0, sample.a] = sample_r;\n else:\n target[0, sample.a] = (sample_r\n + self._gamma \n * self.calc_q_values_1(\n sample_s_nex)[0, a_max]);\n if targets is None:\n targets = target;\n else:\n targets = np.append(targets, target, axis = 0);\n if samples_x is None:\n samples_x = sample_s;\n else:\n samples_x = np.append(samples_x, sample_s, axis = 0);\n #Run the training\n \n \n feed_dict = {self._state_placeholder:samples_x\n ,self._q_placeholder:targets}\n sess_res = self._sess.run([self._train_op, self._loss]\n , feed_dict = feed_dict);\n \n #Update the target parameters\n if train_counter % self._target_update_freq == 0:\n self.update_policy();\n logging.info('Global Step %d: update target network.' \n %(step));\n #Save the parameters\n if train_counter % self._save_freq == 0 or step + 1 == num_iterations:\n checkpoint_file = os.path.join(self._log_dir\n , 'model_data/model.ckpt');\n self._saver.save(self._sess\n , checkpoint_file, global_step=step);\n \n if train_counter % 100 == 0:\n logging.info (\"Global Step %d: loss %0.04f\"%(step, sess_res[1]));\n # Update the events file.\n summary_str = self._sess.run(self._summary, feed_dict=feed_dict)\n self._summary_writer.add_summary(summary_str, train_counter);\n self._summary_writer.add_graph(self._sess.graph);\n self._summary_writer.flush()\n \n train_counter += 1;\n \n #check whether to start a new episode\n if is_terminal:\n time_this, ob_this, is_terminal = env.reset()\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n setpoint_this = ob_this[6:8]\n\n this_ep_length = 0;\n action_counter = 0;\n else:\n ob_this = ob_next\n setpoint_this = setpoint_next\n time_this = time_next\n this_ep_length += 1;", "def reward_shaping(self, state_desc):\r\n # Reward for not falling down\r\n reward = 10.0\r\n\r\n yaw = state_desc['joint_pos']['ground_pelvis'][2]\r\n current_v_x, current_v_z = rotate_frame(\r\n state_desc['body_vel']['pelvis'][0],\r\n state_desc['body_vel']['pelvis'][2], yaw)\r\n # leftward\r\n current_v_z = -current_v_z\r\n\r\n # current relative target theta\r\n target_v_x, target_v_z = state_desc['v_tgt_field'][0][5][5], state_desc['v_tgt_field'][1][5][5]\r\n\r\n vel_penalty = np.linalg.norm([target_v_x - current_v_x, target_v_z - current_v_z])\r\n\r\n muscle_penalty = 0\r\n for muscle in sorted(state_desc['muscles'].keys()):\r\n muscle_penalty += np.square(state_desc['muscles'][muscle]['activation'])\r\n\r\n ret_r = reward - (vel_penalty * self.vel_penalty_coeff + muscle_penalty * self.muscle_penalty_coeff) * self.penalty_coeff\r\n return ret_r", "def explorer(global_rb, queue, trained_steps, n_transition,\n is_training_done, lock, env_fn, policy_fn,\n buffer_size=1024, max_transition=None,\n episode_max_steps=1000):\n env = env_fn()\n policy = policy_fn(env, \"Explorer\", global_rb.get_buffer_size())\n local_rb = ReplayBuffer(obs_shape=env.observation_space.shape,\n act_dim=env.action_space.low.size,\n size=buffer_size)\n\n s = env.reset()\n episode_steps = 0\n total_reward = 0.\n total_rewards = []\n start = time.time()\n sample_at_start = 0\n\n while not is_training_done.is_set():\n # Periodically copy weights of explorer\n if not queue.empty():\n actor_weights, critic_weights, critic_target_weights = queue.get()\n update_target_variables(policy.actor.weights, actor_weights, tau=1.)\n update_target_variables(policy.critic.weights, critic_weights, tau=1.)\n update_target_variables(policy.critic_target.weights, critic_target_weights, tau=1.)\n\n n_transition.value += 1\n episode_steps += 1\n a = policy.get_action(s)\n s_, r, done, _ = env.step(a)\n done_flag = done\n if episode_steps == env._max_episode_steps:\n done_flag = False\n total_reward += r\n local_rb.add(s, a, r, s_, done_flag)\n\n s = s_\n if done or episode_steps == episode_max_steps:\n s = env.reset()\n total_rewards.append(total_reward)\n total_reward = 0\n episode_steps = 0\n\n # Add collected experiences to global replay buffer\n if local_rb.get_stored_size() == buffer_size - 1:\n temp_n_transition = n_transition.value\n samples = local_rb.sample(local_rb.get_stored_size())\n states, next_states, actions, rewards, done = samples[\"obs\"], samples[\"next_obs\"], samples[\"act\"], samples[\"rew\"], samples[\"done\"]\n done = np.array(done, dtype=np.float64)\n td_errors = policy.compute_td_error(\n states, actions, next_states, rewards, done)\n print(\"Grad: {0: 6d}\\tSamples: {1: 7d}\\tTDErr: {2:.5f}\\tAveEpiRew: {3:.3f}\\tFPS: {4:.2f}\".format(\n trained_steps.value, n_transition.value, np.average(np.abs(td_errors).flatten()),\n sum(total_rewards) / len(total_rewards), (temp_n_transition - sample_at_start) / (time.time() - start)))\n total_rewards = []\n lock.acquire()\n global_rb.add(\n states, actions, rewards, next_states, done,\n priorities=np.abs(td_errors)+1e-6)\n lock.release()\n local_rb.clear()\n start = time.time()\n sample_at_start = n_transition.value\n\n if max_transition is not None and n_transition.value >= max_transition:\n is_training_done.set()", "def __init__(self, env = GridWorldEnv(), discountingFactor = 0.9,\n convergenceThreshold = 1e-4, iterationThreshold = 1000,\n mode='prod'):\n self.env = env\n self.gamma = discountingFactor\n self.th = convergenceThreshold\n self.maxIter = iterationThreshold\n self.stateCount = self.env.get_statespace_len()\n self.actionCount = self.env.get_actionspace_len()\n self.uniformActionProbability = 1.0/self.actionCount\n self.stateDict = self.env.stateDict\n self.actionDict = self.env.actionDict\n self.mode = mode\n self.stateCount = self.env.get_statespace_len()\n self.V = np.zeros(self.stateCount)\n self.Q = [np.zeros(self.actionCount) for s in range(self.stateCount)]\n self.Policy = np.zeros(self.stateCount)\n self.totalReward = 0\n self.totalSteps = 0", "def reset(self):\n\n self.curr_episode += 1\n self.curr_step = 0\n\n self.action_episode_memory.append([])\n self.rewards.append([])\n\n self.is_finalized = False\n init_state, init_reward = self._take_action(5 * np.random.randn(self.act_dimension))\n self.initial_conditions.append(init_state)\n return init_state", "def test_env_reset_and_step(self):\n create_env = CreateEnv()\n env = create_env.env\n\n # Assert that the total number of agents matches the sum of the 'n_agents'\n # configuration and the number of planners (1 in this case)\n num_planners = 1\n self.assertEqual(\n len(env.all_agents), create_env.env_config[\"n_agents\"] + num_planners\n )\n\n # Assert that the number of agents created in the world\n # matches the configuration specification\n self.assertEqual(len(env.world.agents), create_env.env_config[\"n_agents\"])\n\n # Assert that the planner's index in the world is 'p'\n self.assertEqual(env.world.planner.idx, \"p\")\n\n obs = env.reset()\n\n # Test whether the observation dictionary keys are created as expected\n self.assertEqual(\n sorted(list(obs.keys())),\n [str(i) for i in range(create_env.env_config[\"n_agents\"])] + [\"p\"],\n )\n\n obs, reward, done, info = env.step({})\n\n # Check that the observation, reward and info keys match\n self.assertEqual(obs.keys(), reward.keys())\n self.assertEqual(obs.keys(), info.keys())\n\n # Assert that __all__ is in done\n assert \"__all__\" in done", "def evaluate(self, environment, max_reward=1.0):\n episode_reward = 0.0\n state = environment.reset()\n\n for step_idx in range(self.max_episode_steps):\n reward, action_idx, new_state, is_done = environment.step(state, self)\n \n state = new_state\n episode_reward += reward\n\n if is_done or episode_reward >= max_reward:\n break\n\n self.fitness = episode_reward\n return episode_reward", "def rewards(self, choices: Sequence[Tuple[Key,Choice]] ) -> Sequence[Reward]:\n ...", "def play(self, max_steps, render=False, reward_network=None):\n\n done = False\n total_reward = np.zeros(1)\n state = self.env_reset()\n episode_length = 0\n max_steps_elapsed = False\n\n while not (done or max_steps_elapsed):\n # Env returns numpy state so convert to torch\n torch_state = torch.from_numpy(state).type(torch.float32)\n torch_state = torch_state.to(DEVICE).unsqueeze(0)\n\n torch_action, _, _ = self.policy.sample(torch_state)\n action = torch_action.detach().cpu().numpy()\n action = action.reshape(self.env.action_space.shape)\n\n next_state, reward, done, _ = self.env_step(action)\n\n if reward_network:\n # reward networks can be either r(s) or r(s,a)\n try:\n reward = reward_network(torch_state)\n except TypeError:\n reward = reward_network(torch_state, torch_action)\n\n reward = float(reward.cpu().detach().item())\n\n episode_length += 1\n\n max_steps_elapsed = episode_length > max_steps\n\n if max_steps_elapsed:\n self.replay_buffer.push(\n (state, action, reward, next_state, done)\n )\n else:\n self.replay_buffer.push(\n (state, action, reward, next_state, not done)\n )\n\n if render:\n self.env.render()\n\n state = next_state\n total_reward += reward\n\n self.tbx_writer.add_scalar(\n \"rewards/episode_reward\", total_reward.item(), self.play_i\n )\n\n self.tbx_writer.add_scalar(\n \"rewards/episode_length\", episode_length, self.play_i\n )\n\n self.play_i += 1", "def get_reward(self, observations, actions):\n\n #initialize and reshape as needed, for batch mode\n self.reward_dict = {}\n if len(observations.shape)==1:\n observations = np.expand_dims(observations, axis = 0)\n actions = np.expand_dims(actions, axis = 0)\n batch_mode = False\n else:\n batch_mode = True\n\n # obs:\n # self.obs_dict['robot_pos'], #24\n # self.obs_dict['object_position'], #3\n # self.obs_dict['object_orientation'], #3\n # self.obs_dict['object_velp'], #3\n # self.obs_dict['object_velr'], #3\n # self.obs_dict['desired_orientation'], #3\n\n #get vars\n obj_pos = observations[:, (24):(24)+3]\n obj_orientation = observations[:,(24+3):(24+3)+3]\n desired_orientation = observations[:,-3:]\n obj_height = observations[:,24+2]\n zeros = np.zeros(obj_height.shape)\n\n #orientation\n angle_diffs = np.linalg.norm(obj_orientation - desired_orientation, axis=1)\n\n #fall\n is_fall = zeros.copy()\n is_fall[obj_height < -0.1] = 1\n\n #done based on is_fall\n dones = (is_fall==1) if not self.startup else zeros\n\n #rewards\n self.reward_dict['ori_dist'] = -7*angle_diffs\n self.reward_dict['drop_penalty'] = -1000*is_fall\n self.reward_dict['r_total'] = self.reward_dict['ori_dist'] + self.reward_dict['drop_penalty']\n\n #return\n if not batch_mode:\n return self.reward_dict['r_total'][0], dones[0]\n return self.reward_dict['r_total'], dones", "def set_rewards(self, item, short=False):\n if short or self.tail_batch is None:\n self.memory.set('rewards', self.s, self.e, item)\n else:\n bl = len(self.tail_batch)\n self.memory.set('rewards', self.s, self.e, item[:-bl])\n self.tail_batch['rewards'] = item[-bl:]", "def test_novelty_reward(self):\n\n @self.variant\n def episodic_memory_intrinsic_rewards(embeddings, reward_scale):\n return exploration.episodic_memory_intrinsic_rewards(\n embeddings, self.num_neighbors, reward_scale, max_memory_size=10)\n # Memory starts out as all zeros, if we try to add more zeros we should get\n # a lower reward than if we try to add 2 novel embeddings.\n identical_embeddings = np.array([[0., 0.], [0., 0.]])\n novel_embeddings = np.array([[1.3, 2.7], [-10.4, 16.01]])\n low_reward, state = episodic_memory_intrinsic_rewards(\n identical_embeddings, self.reward_scale)\n np.testing.assert_equal(np.array(state.distance_sum), 0)\n high_reward, _ = episodic_memory_intrinsic_rewards(\n novel_embeddings, self.reward_scale)\n np.testing.assert_array_less(low_reward, high_reward)", "def interact(env, agent, num_episodes=30, window=1):\r\n # initialize average rewards\r\n average_reward_per_100_episodes = []\r\n best_average_reward_per_100_episodes = []\r\n avg_rewards = deque(maxlen=num_episodes)\r\n # initialize best average reward\r\n best_avg_reward = -math.inf\r\n # initialize monitor for most recent rewards\r\n samp_rewards = deque(maxlen=window)\r\n\r\n\r\n answer = input(\"Load QTable? (y/n) ?\")\r\n if answer == \"y\":\r\n # Do this.\r\n an = input(\"Name ?\")\r\n agent.q_table.readQ(an)\r\n agent.q_table.readStateList(an)\r\n agent.observation_space = np.size(agent.q_table.q, 0)\r\n agent.q_table.stateCount = len(agent.q_table.stateList)\r\n #print(\"files not found\")\r\n elif answer == \"n\":\r\n # Do that.\r\n pass\r\n else:\r\n print(\"Please enter y or n\")\r\n\r\n start_time = time.time()\r\n # for each episode\r\n for i_episode in range(1, num_episodes+1):\r\n # begin the episode\r\n state_array,state, state_with_act = env.reset()\r\n #if state <= agent.q_table.observation_space -1:\r\n #print('next state is in q table')\r\n y = agent.q_table.addStateList(state_with_act[state][4], state_with_act[state][1], state_with_act[state][2])\r\n if y != -1:\r\n print (\"state is in the stateList\")\r\n else : \r\n #agent.addState(state)\r\n y= agent.q_table.stateCount\r\n # initialize the sampled reward\r\n samp_reward = 0\r\n while True:\r\n #print(\"new action starts\")\r\n # agent selects an action\r\n action = agent.select_action(y)\r\n # agent performs the selected action\r\n next_state, reward,done, indices, stateENV, action_count = env.step(action)\r\n time.sleep(0.3)\r\n #print(\"indices =\")\r\n #print(indices)\r\n # agent performs internal updates based on sampled experience\r\n \r\n #if next_state <= agent.q_table.observation_space -1:\r\n #print('next state is in q table')\r\n x = agent.q_table.addStateList(stateENV[next_state][4], stateENV[next_state][1], stateENV[next_state][2])\r\n if x != -1:\r\n print (\"state is in the stateList\")\r\n else : \r\n #agent.addState(state)\r\n x= agent.q_table.stateCount\r\n #print (\"next state =\"+str(next_state))\r\n #print (\"observation_space\" + str(agent.q_table.observation_space-1))\r\n #agent.step(state, action, reward, next_state, indices)\r\n agent.step(y, action, reward, x, indices)\r\n #print('stateList = ')\r\n #print(agent.q_table.stateList)\r\n # update the sampled reward\r\n samp_reward += reward\r\n # update the state (s <- s') to next time step\r\n y = x\r\n agent.update_epsilon()\r\n if done:\r\n # save final sampled reward\r\n samp_rewards.append(samp_reward)\r\n break\r\n \r\n lenStateCOunt = len(agent.q_table.stateList)\r\n time_end = time.time()\r\n saveEpisodeResult(start_time,time_end,i_episode, lenStateCOunt)\r\n\r\n\r\n \r\n\r\n if (i_episode >= 100):\r\n # get average reward from last 100 episodes\r\n avg_reward = np.mean(samp_rewards)\r\n # append to deque\r\n avg_rewards.append(avg_reward)\r\n # update best average reward\r\n print('episode average reward {}'.format(avg_reward))\r\n average_reward_per_100_episodes.append(avg_reward)\r\n best_average_reward_per_100_episodes.append(best_avg_reward)\r\n if avg_reward > best_avg_reward:\r\n best_avg_reward = avg_reward\r\n print (\"State with activities =\")\r\n for key,value in stateENV.items():\r\n print(str(key) + '. ')\r\n print(str(stateENV[key][0]) + ', ' + str(stateENV[key][1])+ ', ' + str(stateENV[key][2]) + ', ' + str(stateENV[key][4]))\r\n \r\n # monitor progress\r\n print(\"\\rEpisode {}/{} || Best average reward {} || eps {} \".format(i_episode, num_episodes, best_avg_reward, agent.epsilon), end=\"\")\r\n sys.stdout.flush()\r\n \r\n # check if task is solved (according to OpenAI Gym)\r\n if best_avg_reward >= 9.7:\r\n print('\\nEnvironment solved in {} episodes.'.format(i_episode), end=\"\")\r\n agent.q_table.saveQ(best_avg_reward,appName)\r\n agent.q_table.saveStateList(appName)\r\n print(\"width = \"+ str(action_count))\r\n saveEpToCSV()\r\n save_rewards_csv(average_reward_per_100_episodes, best_average_reward_per_100_episodes)\r\n break\r\n if i_episode == num_episodes: \r\n agent.q_table.saveQ(best_avg_reward,appName)\r\n agent.q_table.saveStateList(appName)\r\n print(\"width = \"+ str(action_count))\r\n saveEpToCSV()\r\n save_rewards_csv(average_reward_per_100_episodes, best_average_reward_per_100_episodes)\r\n print('\\n')\r\n\r\n end_time = time.time()\r\n time_lapsed = end_time - start_time\r\n time_convert(time_lapsed)\r\n return avg_rewards, best_avg_reward\r\n print(\"width = \"+ str(action_count))", "def q_learning(num_iterations: int, learning_rate: float,\n discount_rate: float, exploration_rate: float,\n min_exploration_rate: float, exploration_rate_decay: float,\n string_env: str, save_path: str = None) -> np.ndarray:\n env = gym.make(string_env)\n state = env.reset()\n num_states = env.observation_space.n\n num_actions = env.action_space.n\n q_table = np.zeros((num_states, num_actions))\n\n total_rewards = []\n for i in range(num_iterations):\n action = choose_action(q_table, state, exploration_rate)\n\n new_state, reward, done, info = env.step(action)\n update_q_value(q_table, state, action, learning_rate, reward, new_state,\n discount_rate)\n\n if done == True:\n new_state = env.reset()\n current_reward = run_with_best_action(q_table, string_env)\n total_rewards.append(current_reward)\n exploration_rate = reduce_exploration_rate(exploration_rate,\n min_exploration_rate,\n exploration_rate_decay)\n\n state = new_state\n\n if save_path:\n np.save(save_path, q_table)\n print(np.mean(np.array(total_rewards[-100:])))\n return q_table", "def reset(self):\n self.agents.reset()\n self._cur_obs, self._cur_lm = self.parallel_env.reset()\n self.agent_cum_rewards = np.zeros((len(self.agents), self.n_states, 1))\n self.agent_contiguous_states = np.full((len(self.agents), self.n_states), True)", "def run_badreenvironment(nav_args, ctrl_args, bias=0.0, seed=None, flat=False,\n label=\"tmp\"):\n\n if seed is not None:\n HRLutils.set_seed(seed)\n seed = HRLutils.SEED\n\n net = nef.Network(\"run_badreenvironment\")\n\n env = badreenvironment.BadreEnvironment(flat=flat)\n net.add(env)\n\n # ##NAV AGENT\n stateN = 500\n max_state_input = 3\n enc = env.gen_encoders(stateN, 0, 0.0)\n\n # generate evaluation points\n orientations = MU.I(env.num_orientations)\n shapes = MU.I(env.num_shapes)\n colours = MU.I(env.num_colours)\n evals = (list(MU.diag([3 for _ in range(env.stateD)])) +\n [o + s + c\n for o in orientations for s in shapes for c in colours])\n\n # create lower level\n nav_agent = smdpagent.SMDPAgent(stateN, env.stateD, env.actions,\n name=\"NavAgent\",\n stateradius=max_state_input,\n state_encoders=enc, state_evals=evals,\n discount=0.5, **nav_args)\n net.add(nav_agent)\n\n print \"agent neurons:\", nav_agent.countNeurons()\n\n # actions terminate on fixed schedule (aligned with environment)\n nav_term_node = terminationnode.TerminationNode(\n {terminationnode.Timer((0.6, 0.6)): None}, env, name=\"NavTermNode\",\n state_delay=0.1, reset_delay=0.05, reset_interval=0.1)\n net.add(nav_term_node)\n\n net.connect(nav_term_node.getOrigin(\"reset\"),\n nav_agent.getTermination(\"reset\"))\n net.connect(nav_term_node.getOrigin(\"learn\"),\n nav_agent.getTermination(\"learn\"))\n net.connect(nav_term_node.getOrigin(\"reset\"),\n nav_agent.getTermination(\"save_state\"))\n net.connect(nav_term_node.getOrigin(\"reset\"),\n nav_agent.getTermination(\"save_action\"))\n\n net.connect(nav_agent.getOrigin(\"action_output\"),\n env.getTermination(\"action\"))\n\n # ##CTRL AGENT\n stateN = 500\n enc = RandomHypersphereVG().genVectors(stateN, env.stateD)\n actions = [(\"shape\", [0, 1]), (\"orientation\", [1, 0]), (\"null\", [0, 0])]\n ctrl_agent = smdpagent.SMDPAgent(stateN, env.stateD, actions,\n name=\"CtrlAgent\", state_encoders=enc,\n stateradius=max_state_input,\n state_evals=evals, discount=0.4,\n **ctrl_args)\n net.add(ctrl_agent)\n\n print \"agent neurons:\", ctrl_agent.countNeurons()\n\n net.connect(env.getOrigin(\"state\"),\n ctrl_agent.getTermination(\"state_input\"))\n\n ctrl_term_node = terminationnode.TerminationNode(\n {terminationnode.Timer((0.6, 0.6)): None}, env, name=\"CtrlTermNode\",\n state_delay=0.1, reset_delay=0.05, reset_interval=0.1)\n net.add(ctrl_term_node)\n\n net.connect(ctrl_term_node.getOrigin(\"reset\"),\n ctrl_agent.getTermination(\"reset\"))\n net.connect(ctrl_term_node.getOrigin(\"learn\"),\n ctrl_agent.getTermination(\"learn\"))\n net.connect(ctrl_term_node.getOrigin(\"reset\"),\n ctrl_agent.getTermination(\"save_state\"))\n net.connect(ctrl_term_node.getOrigin(\"reset\"),\n ctrl_agent.getTermination(\"save_action\"))\n\n # ctrl gets a slight bonus if it selects a rule (as opposed to null), to\n # encourage it to not just pick null all the time\n reward_relay = net.make(\"reward_relay\", 1, 3, mode=\"direct\")\n reward_relay.fixMode()\n net.connect(env.getOrigin(\"reward\"), reward_relay,\n transform=[[1], [0], [0]])\n net.connect(ctrl_agent.getOrigin(\"action_output\"), reward_relay,\n transform=[[0, 0], [1, 0], [0, 1]])\n\n net.connect(reward_relay, ctrl_agent.getTermination(\"reward\"),\n func=lambda x: ((x[0] + bias * abs(x[0]))\n if x[1] + x[2] > 0.5 else x[0]),\n origin_name=\"ctrl_reward\")\n\n # ideal reward function (for testing)\n# def ctrl_reward_func(x):\n# if abs(x[0]) < 0.5:\n# return 0.0\n#\n# if flat:\n# return 1.5 if x[1] + x[2] < 0.5 else -1.5\n# else:\n# if x[1] + x[2] < 0.5:\n# return -1.5\n# if [round(a) for a in env.state[-2:]] == [round(b)\n# for b in x[1:]]:\n# return 1.5\n# else:\n# return -1.5\n# net.connect(reward_relay, ctrl_agent.getTermination(\"reward\"),\n# func=ctrl_reward_func)\n\n # nav rewarded for picking ctrl target\n def nav_reward_func(x):\n if abs(x[0]) < 0.5 or env.action is None:\n return 0.0\n\n if x[1] + x[2] < 0.5:\n return x[0]\n\n if x[1] > x[2]:\n return (1.5 if env.action[1] == env.state[:env.num_orientations]\n else -1.5)\n else:\n return (1.5 if env.action[1] == env.state[env.num_orientations:\n - env.num_colours]\n else -1.5)\n net.connect(reward_relay, nav_agent.getTermination(\"reward\"),\n func=nav_reward_func)\n\n # state for navagent controlled by ctrlagent\n ctrl_state_inhib = net.make_array(\"ctrl_state_inhib\", 50, env.stateD,\n radius=2, mode=HRLutils.SIMULATION_MODE)\n ctrl_state_inhib.fixMode([SimulationMode.DEFAULT, SimulationMode.RATE])\n\n inhib_matrix = [[0, -5]] * 50 * env.num_orientations + \\\n [[-5, 0]] * 50 * env.num_shapes + \\\n [[-5, -5]] * 50 * env.num_colours\n\n # ctrl output inhibits all the non-selected aspects of the state\n net.connect(env.getOrigin(\"state\"), ctrl_state_inhib)\n net.connect(ctrl_agent.getOrigin(\"action_output\"), ctrl_state_inhib,\n transform=inhib_matrix)\n\n # also give a boost to the selected aspects (so that neurons are roughly\n # equally activated).\n def boost_func(x):\n if x[0] > 0.5:\n return [3 * v for v in x[1:]]\n else:\n return x[1:]\n boost = net.make(\"boost\", 1, 1 + env.stateD, mode=\"direct\")\n boost.fixMode()\n net.connect(ctrl_state_inhib, boost,\n transform=([[0 for _ in range(env.stateD)]] +\n list(MU.I(env.stateD))))\n net.connect(ctrl_agent.getOrigin(\"action_output\"), boost,\n transform=[[1, 1]] + [[0, 0] for _ in range(env.stateD)])\n\n net.connect(boost, nav_agent.getTermination(\"state_input\"),\n func=boost_func)\n\n # save weights\n weight_save = 1.0 # period to save weights (realtime, not simulation time)\n threads = [\n HRLutils.WeightSaveThread(nav_agent.getNode(\"QNetwork\").saveParams,\n os.path.join(\"weights\", \"%s_%s\" %\n (nav_agent.name, seed)),\n weight_save),\n HRLutils.WeightSaveThread(ctrl_agent.getNode(\"QNetwork\").saveParams,\n os.path.join(\"weights\", \"%s_%s\" %\n (ctrl_agent.name, seed)),\n weight_save)]\n for t in threads:\n t.start()\n\n # data collection node\n data = datanode.DataNode(period=1,\n filename=HRLutils.datafile(\"dataoutput_%s.txt\" %\n label),\n header=\"%s %s %s %s %s\" % (nav_args, ctrl_args,\n bias, seed, flat))\n print \"saving data to\", data.filename\n print \"header\", data.header\n net.add(data)\n nav_q = nav_agent.getNode(\"QNetwork\")\n ctrl_q = ctrl_agent.getNode(\"QNetwork\")\n ctrl_bg = ctrl_agent.getNode(\"BGNetwork\").getNode(\"weight_actions\")\n data.record_avg(env.getOrigin(\"reward\"))\n data.record_avg(ctrl_q.getNode(\"actionvals\").getOrigin(\"X\"))\n data.record_sparsity(ctrl_q.getNode(\"state_pop\").getOrigin(\"AXON\"))\n data.record_sparsity(nav_q.getNode(\"state_pop\").getOrigin(\"AXON\"))\n data.record_avg(ctrl_q.getNode(\"valdiff\").getOrigin(\"X\"))\n data.record_avg(ctrl_agent.getNode(\"ErrorNetwork\").getOrigin(\"error\"))\n data.record_avg(ctrl_bg.getNode(\"0\").getOrigin(\"AXON\"))\n data.record_avg(ctrl_bg.getNode(\"1\").getOrigin(\"AXON\"))\n data.record(env.getOrigin(\"score\"))\n\n# net.add_to_nengo()\n# net.network.simulator.run(0, 300, 0.001)\n net.view()\n\n for t in threads:\n t.stop()", "def initiate_agent(self, env):\n from keras import Sequential\n from keras.optimizers import Adam\n from keras.layers import Dense, Dropout\n from rl.memory import SequentialMemory\n from rl.agents import DQNAgent\n\n self.env = env\n\n nb_actions = self.env.action_space.n\n\n model = Sequential()\n model.add(Dense(512, activation='relu', input_shape=env.observation_space))\n model.add(Dropout(0.2))\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(nb_actions, activation='linear'))\n\n # Finally, we configure and compile our agent. You can use every built-in Keras optimizer and\n # even the metrics!\n memory = SequentialMemory(limit=memory_limit, window_length=window_length)\n policy = TrumpPolicy()\n from rl.core import Processor\n\n class CustomProcessor(Processor):\n \"\"\"he agent and the environment\"\"\"\n\n def process_state_batch(self, batch):\n \"\"\"\n Given a state batch, I want to remove the second dimension, because it's\n useless and prevents me from feeding the tensor into my CNN\n \"\"\"\n return np.squeeze(batch, axis=1)\n\n def process_info(self, info):\n processed_info = info['player_data']\n if 'stack' in processed_info:\n processed_info = {'x': 1}\n return processed_info\n\n nb_actions = env.action_space.n\n\n self.dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=nb_steps_warmup,\n target_model_update=1e-2, policy=policy,\n processor=CustomProcessor(),\n batch_size=batch_size, train_interval=train_interval, enable_double_dqn=enable_double_dqn)\n self.dqn.compile(Adam(lr=1e-3), metrics=['mae'])", "def _rewards(self, action: Action) -> Dict[Text, float]:\n raise NotImplementedError", "def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False):\n if episode_life:\n env = EpisodicLifeEnv(env)\n\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = WarpFrame(env)\n\n if clip_rewards:\n env = ClipRewardEnv(env)\n\n if frame_stack:\n env = FrameStack(env, 4)\n\n return env", "def _preprocess_experience(self):\n observed_inputs = []\n observed_reward = []\n predicted_outputs = []\n distance_from_reward = []\n next_state = []\n # process inputs and outputs to train the net\n for episode in self.examples:\n episode_match, example_reward = episode\n last_step = True\n for n, step in enumerate(reversed(episode_match)):\n this_state = state_from_hash(step.state_t)\n next_state.append(state_from_hash(step.action_t))\n observed_inputs.append(np.hstack((this_state,\n this_state != next_state[-1]))\n .flatten())\n distance_from_reward.append(n)\n # now we have to evaluate max_{s'}[Q(a',s')]\n # let's see all possible actions two steps ahead\n two_ahead = []\n for possible_action in self.state_space[step.action_t].actions:\n possible_action = state_from_hash(possible_action)\n two_ahead.append(np.hstack((next_state[-1],\n next_state[-1] != possible_action))\n .flatten())\n if not two_ahead:\n # if it's a terminal state, no two-ahead, so set the max to 0\n max_next_state = 0\n else:\n # evaluate Q on the two-ahead actions\n two_ahead = np.array(two_ahead)\n two_ahead[two_ahead == 2] = -1\n max_next_state = self.sess.run(\n self.output,\n feed_dict={self.input: two_ahead}).flatten()\n\n # calc the maximum\n max_next_state = np.max(max_next_state)\n predicted_outputs.append(max_next_state)\n if last_step:\n # because we start from last step, `last_step` will be true\n observed_reward.append(example_reward)\n # then set it to false so non-last steps get reward 0\n last_step = False\n else:\n observed_reward.append(0)\n # Q-network output from the inputs\n predicted_outputs = self.discount * np.vstack(predicted_outputs).flatten()\n observed_inputs = np.array(observed_inputs)\n # possible max value in a state is 2, set all 2's to -1's\n observed_inputs[observed_inputs == 2] = -1\n observed_reward = np.vstack(observed_reward).flatten()\n return observed_inputs, observed_reward, predicted_outputs, distance_from_reward", "def set_env(\n env: gym.Env, max_episode_steps: int, env_wrappers: List[gym.Wrapper] = None\n) -> Tuple[gym.Env, int]:\n if max_episode_steps > 0:\n env._max_episode_steps = max_episode_steps\n else:\n max_episode_steps = env._max_episode_steps\n\n if not isinstance(env.action_space, Discrete):\n env = ActionNormalizer(env)\n\n if env_wrappers:\n for env_wrapper in env_wrappers:\n env = env_wrapper(env)\n\n return env, max_episode_steps", "def reward(self, observation, action, reward):\r\n\r\n if reward > 0 :\r\n print(\"win\") \r\n self.done=1 \r\n self.done_MC=1\r\n\r\n else:\r\n self.done=0\r\n\r\n self.current_state=observation\r\n self.current_action=action\r\n self.current_reward=reward\r\n self.reward_list.append(reward.item())\r\n \r\n self.log_probs.append(self.log_prob)\r\n self.values.append(self.value)\r\n self.rewards.append(self.torch.tensor([self.current_reward], dtype=self.torch.float, device=self.device))\r\n self.masks.append(self.torch.tensor([1-self.done], dtype=self.torch.float, device=self.device))", "def reset_env(self):\n self.unemployment = self.init_unemployment\n self.inflation = self.init_inflation\n self.interest = self.init_interest\n\n labor_demand = (1 - self.unemployment) * self.n_agents\n production = self.firm.production_function(labor_demand)\n labor_costs = self.init_wage * labor_demand\n price = (1 + self.firm.markup) / (1 - self.firm.alpha) * labor_costs / production\n profit = price * production - labor_costs\n self.firm.reset(\n price=price,\n production=production,\n labor_costs=labor_costs,\n labor_demand=labor_demand,\n average_profit=profit,\n profit=profit,\n )\n\n wage_increases = {}\n demand = {}\n consume = production / self.n_agents\n for agent_id, agent in self.agents.items():\n agent.reset(labor=1 - self.init_unemployment, consumption=consume)\n wage_increases[agent_id] = self.inflation\n demand[agent_id] = consume\n\n return wage_increases, demand", "def discount_rewards(rewards, gamma):\n reward_shape = rewards.shape\n if len(reward_shape) == 1:\n discounted_r = np.zeros(shape=(*reward_shape, 1), dtype=np.float)\n else:\n discounted_r = np.zeros(shape=reward_shape, dtype=np.float)\n running_add = 0\n\n for t in reversed(range(0, rewards.size)):\n running_add = running_add * gamma + rewards[t]\n discounted_r[t] = running_add\n\n return discounted_r", "def __init__(\n self,\n env_spec,\n policy,\n qf,\n replay_buffer,\n use_target=False,\n discount=0.99,\n n_epoch_cycles=20,\n max_path_length=None,\n n_train_steps=50,\n buffer_batch_size=64,\n min_buffer_size=int(1e4),\n rollout_batch_size=1,\n reward_scale=1.,\n input_include_goal=False,\n smooth_return=True,\n exploration_strategy=None,\n ):\n self.env_spec = env_spec\n self.policy = policy\n self.qf = qf\n self.replay_buffer = replay_buffer\n self.n_epoch_cycles = n_epoch_cycles\n self.n_train_steps = n_train_steps\n self.buffer_batch_size = buffer_batch_size\n self.use_target = use_target\n self.discount = discount\n self.min_buffer_size = min_buffer_size\n self.rollout_batch_size = rollout_batch_size\n self.reward_scale = reward_scale\n self.evaluate = False\n self.input_include_goal = input_include_goal\n self.smooth_return = smooth_return\n self.max_path_length = max_path_length\n self.es = exploration_strategy\n self.init_opt()", "def _problem_set_up(self):\n self.curr_best_reward = -np.inf\n self.curr_reward = -np.inf\n # Set up history\n self.history.curr_best_reward = []\n self.history.curr_reward = []", "def initialize_internal_rewards(\n num_events: int = 1,\n init_value_or_range: Union[float, Tuple[float, float]] = (0.1, 1.0),\n) -> internal_reward.InternalRewards:\n return internal_reward.InternalRewards(num_events=num_events,\n init_value_or_range=init_value_or_range)", "def reset(self):\n self.reward = 0", "def qlearning(env, iterations=1000, gamma=0.9, alpha=0.1):\n nS = env.nS # number of states\n nA = env.nA # number of actions\n Q_value = np.zeros((nS, nA))\n policy = np.ones((env.nS,env.nA))/env.nA\n epsilon = 1\n s_t1 = env.reset() # reset the environment and place the agent in the start square\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: Don't forget to decay epsilon according to GLIE\n\n curr_state = s_t1\n \n start = time.time() # to time how long convergence takes\n print(\"---Q Learning---\\nTraining Started.\")\n \n for k in range (1, iterations):\n # if (k%10000) == 0:\n # print(\"Now playing iteration: \", k)\n epsilon = 1/k\n curr_action, reward, new_state, done = take_one_step(env, policy, curr_state)\n new_action = sample_action(policy, new_state)\n Q_value[curr_state, curr_action] = Q_value[curr_state, curr_action] + alpha * (reward + gamma * (Q_value[new_state, np.argmax(Q_value[new_state])]) - Q_value[curr_state, curr_action])\n \n # epsilon-greedy policy update\n Q_list = np.argwhere(Q_value[curr_state] == np.amax(Q_value[curr_state])).flatten() # get a list of all indices where Q is maximum, (argmax(Q))\n max_Q = np.random.choice(Q_list.flatten()) # randomly pick from those indices. Picking each index is equally likely.\n for a in range (nA):\n if a == max_Q:\n policy[curr_state][a] = epsilon/nA + (1 - epsilon) # for the chosen maximal index of Q, set the policy to epsilon/m + 1 - epsilon\n else:\n policy[curr_state][a] = epsilon/nA \n \n # print(\"Q_value = {0}\".format(Q_value))\n # print(\"policy = {0}\".format(policy))\n \n if done:\n curr_state = env.reset() # reset the environment and place the agent in the start square\n curr_action = sample_action(policy, curr_state)\n else:\n curr_state = new_state\n curr_action = new_action\n \n stop = time.time()\n print(\"Training Completed.\")\n print(\"It took: {0} iterations and {1} minutes\".format(k,(stop-start)/60))\n \n ############################\n det_policy = np.argmax(Q_value, axis=1)\n return Q_value, det_policy", "def __init__(self, robot, human_policy, initial_world_state,\n num_theta = 2, num_ingredients = 3, reward_set = [((0,2,1),0), ((1,1,2),1)],\n gamma = 0.95):\n self.robot = robot\n self.human_policy = human_policy\n self.num_ingredients = num_ingredients\n self.reward_set = reward_set\n self.gamma = gamma\n self.world_state = initial_world_state\n self.theta_set = list(range(num_theta))\n self.allStates = self.getAllStates()\n #self.allObservations = self.getAllObservations()", "def _train_simulate(self, env, train_episode=None):\n # The initial observation\n o_r_d_i = [env.reset()] + [None]*3 # o_r_d_i means \"Observation_Reward_Done_Info\"\n # Reset all the manager parameters\n self.reset(o_r_d_i[0][\"manager\"])\n done = False\n current_option = None\n # Render the current state\n if self.parameters[\"display_environment\"]:\n self.show_render.render(o_r_d_i[0])\n\n while not done:\n # If no option is activated then choose one\n if current_option is None:\n current_option = self.select_option(o_r_d_i, train_episode)\n assert current_option.score == 0, \"the option's reset function must reset the score to 0.\"\n\n # choose an action\n action = current_option.act(train_episode)\n\n # make an action and display the state space\n o_r_d_i = env.step(action)\n if self.parameters[\"display_environment\"]:\n self.show_render.render(o_r_d_i[0])\n\n # check if the option ended correctly\n correct_termination = self.check_end_option(current_option, o_r_d_i[0][\"manager\"])\n\n # update the option\n intra_reward = self.compute_intra_reward(o_r_d_i, correct_termination)\n current_option.update_option(o_r_d_i, action, correct_termination, train_episode, intra_reward)\n\n # If the option is done, update the manager\n if correct_termination is not None:\n if check_type(current_option, AbstractOption):\n # record the correct transition when the option is a regular option (i.e. not an explore option)\n self.successful_transition.append(correct_termination)\n self.write_success_rate_transitions()\n\n # the manager does not need to know if the correct_termination is 0 or 1.\n self.update_manager(o_r_d_i, current_option, train_episode)\n\n current_option = None\n\n done = self.check_end_manager(o_r_d_i)\n\n self.write_manager_score(train_episode)", "def env_runner(env, policy, num_local_steps, summary_writer):\n last_state = env.reset()\n last_features = policy.get_initial_features()\n length = 0\n rewards = 0\n\n while True:\n terminal_end = False\n rollout = PartialRollout()\n\n for _ in range(num_local_steps):\n fetched = policy.act(last_state, *last_features)\n action, value_, features = fetched[0], fetched[1], fetched[2:]\n # argmax to convert from one-hot\n state, reward, terminal, info = env.step(action.argmax())\n\n # collect the experience\n rollout.add(last_state, action, reward, value_, terminal, last_features)\n length += 1\n rewards += reward\n\n last_state = state\n last_features = features\n\n if info:\n summary = tf.Summary()\n for k, v in info.items():\n summary.value.add(tag=k, simple_value=float(v))\n summary_writer.add_summary(summary, policy.global_step.eval())\n summary_writer.flush()\n\n timestep_limit = env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps')\n if terminal or length >= timestep_limit:\n terminal_end = True\n if length >= timestep_limit or not env.metadata.get('semantics.autoreset'):\n last_state = env.reset()\n last_features = policy.get_initial_features()\n print(\"Episode finished. Sum of rewards: %d. Length: %d\" % (rewards, length))\n length = 0\n rewards = 0\n break\n\n if not terminal_end:\n rollout.r = policy.value(last_state, *last_features)\n\n # once we have enough experience, yield it, and have the ThreadRunner place it on a queue\n yield rollout", "def nchain_extras(env, gamma=0.99):\n\n # How to handle <TimeLimit<______>> and other Wrappers?\n # assert isinstance(env, gym.envs.toy_text.nchain.NChainEnv)\n\n # Action constants\n A_FORWARD = 0\n A_BACKWARD = 1\n\n states = np.arange(env.observation_space.n)\n actions = np.arange(env.action_space.n)\n\n p0s = np.zeros(env.observation_space.n)\n p0s[0] = 1.0\n\n # Populate dynamics\n t_mat = np.zeros(\n (env.observation_space.n, env.action_space.n, env.observation_space.n)\n )\n\n # Backward action moves to 0th state if it doesn't fail, forward if it does\n t_mat[:, A_BACKWARD, 0] = 1.0 - env.slip\n for s1 in states:\n t_mat[s1, A_BACKWARD, min(s1 + 1, env.observation_space.n - 1)] = env.slip\n\n # Forward action moves to next state if it doesn't fail, 0th if it does\n for s1 in states:\n t_mat[s1, A_FORWARD, min(s1 + 1, env.observation_space.n - 1)] = 1.0 - env.slip\n t_mat[:, A_FORWARD, 0] = env.slip\n\n terminal_state_mask = np.zeros(env.observation_space.n)\n\n xtr = DiscreteExplicitExtras(\n states, actions, p0s, t_mat, terminal_state_mask, gamma=gamma,\n )\n\n phi = Indicator(Indicator.Type.OBSERVATION_ACTION, xtr)\n\n state_action_rewards = np.zeros((env.observation_space.n, env.action_space.n))\n state_action_rewards[:, A_BACKWARD] = env.small\n state_action_rewards[env.observation_space.n - 1, A_FORWARD] = env.large\n reward = Linear(state_action_rewards.flatten())\n\n return (xtr, phi, reward)", "def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):\n if episode_life:\n env = EpisodicLifeEnv(env)\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = WarpFrame(env)\n if scale:\n env = ScaledFloatFrame(env)\n if clip_rewards:\n env = ClipRewardEnv(env)\n if frame_stack:\n env = FrameStack(env, 4)\n return env", "def set_env(self, env):\n\n self.env = env\n self.sim_env = copy.deepcopy(self.env)\n self.sim_env.reset_at_episode_end = False # Avoids expensive re-sampling of jets every time we parse a path\n self.init_episode()", "def __init__(\n self, brain, reward_buff_cap, trainer_parameters, training, load, seed, run_id\n ):\n super().__init__(brain, trainer_parameters, training, run_id, reward_buff_cap)\n self.param_keys = [\n \"batch_size\",\n \"beta\",\n \"buffer_size\",\n \"epsilon\",\n \"hidden_units\",\n \"lambd\",\n \"learning_rate\",\n \"max_steps\",\n \"normalize\",\n \"num_epoch\",\n \"num_layers\",\n \"time_horizon\",\n \"sequence_length\",\n \"summary_freq\",\n \"use_recurrent\",\n \"summary_path\",\n \"memory_size\",\n \"model_path\",\n \"reward_signals\",\n ]\n self.check_param_keys()\n\n # Make sure we have at least one reward_signal\n if not self.trainer_parameters[\"reward_signals\"]:\n raise UnityTrainerException(\n \"No reward signals were defined. At least one must be used with {}.\".format(\n self.__class__.__name__\n )\n )\n\n self.step = 0\n self.policy = PPOPolicy(seed, brain, trainer_parameters, self.is_training, load)\n\n stats = defaultdict(list)\n # collected_rewards is a dictionary from name of reward signal to a dictionary of agent_id to cumulative reward\n # used for reporting only. We always want to report the environment reward to Tensorboard, regardless\n # of what reward signals are actually present.\n self.collected_rewards = {\"environment\": {}}\n for _reward_signal in self.policy.reward_signals.keys():\n self.collected_rewards[_reward_signal] = {}\n\n self.stats = stats\n\n self.training_buffer = Buffer()\n self.episode_steps = {}" ]
[ "0.62321734", "0.61911786", "0.6097618", "0.6054359", "0.5976745", "0.59590447", "0.59165645", "0.5758093", "0.56996363", "0.56274784", "0.56224126", "0.55879575", "0.5585855", "0.5566248", "0.55561215", "0.55531347", "0.5542366", "0.55359405", "0.5531425", "0.55208486", "0.55071753", "0.5504959", "0.54834217", "0.54811615", "0.54627705", "0.54579127", "0.5454004", "0.5452689", "0.5452491", "0.5432411", "0.5425193", "0.54113555", "0.5410122", "0.5406402", "0.53741354", "0.53451514", "0.5341267", "0.5329526", "0.53265435", "0.5315128", "0.529682", "0.52855533", "0.5274444", "0.52708685", "0.52635556", "0.52621", "0.52564013", "0.5245889", "0.5245653", "0.5244481", "0.52224404", "0.5206141", "0.5189301", "0.5174223", "0.51728916", "0.51722956", "0.51717174", "0.51714504", "0.51672775", "0.5166023", "0.5160388", "0.5157903", "0.5143038", "0.51239", "0.51223946", "0.51194835", "0.5119216", "0.51180947", "0.5108623", "0.5105192", "0.50965416", "0.50867987", "0.5083645", "0.5082367", "0.5080192", "0.5070967", "0.50662255", "0.50648946", "0.506194", "0.5059321", "0.5045709", "0.50307405", "0.50248176", "0.5018604", "0.50176615", "0.50084776", "0.5005997", "0.4998552", "0.49965", "0.49960956", "0.499428", "0.49906912", "0.4987606", "0.49804005", "0.4980256", "0.49773654", "0.49771556", "0.49728435", "0.49610484", "0.4958473" ]
0.5081516
74
Defines the discount that are returned by `step()`. Override this method to define an environment that uses nonstandard discount values, for example an environment with arrayvalued discounts.
def discount_spec(self) -> types.NestedArraySpec: return array_spec.BoundedArraySpec( shape=(), dtype=np.float32, minimum=0.0, maximum=1.0, name='discount' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def discount_spec(self):\n task_discount_spec = self._task.get_discount_spec()\n if task_discount_spec is not None:\n return task_discount_spec\n else:\n return super(Environment, self).discount_spec()", "def add_discount(self, discount):\n self.gamma = discount", "def apply_discount(self, product):\n pass", "def discount(self,discountFactor,type='geometric'):\n for e in self.estimators:\n e.discount(discountFactor,type)\n return", "def make_test_discount(self):\n return Discount()", "def discount(self, cart):", "def discount(self,discountFactor,type='geometric'):\n assert(discountFactor > 0)\n if type == 'hyperbolic':\n #avg' = avg*(1-alpha(N)) + alpha(N)*item\n #if alpha(N) ~= 1/(N+1) then this converges to the true average\n #sum' = (N+1)/N sum*(1-alpha(N)) + alpha(N)(N+1)*item\n #sum' = sum*(1-alpha(N))/(N alpha(N)) + item\n #if alpha is a constant, we get\n #sum' = sum*(1/alpha-1)/N + item\n self.discount((1.0/discountFactor - 1.0)/self.sumWeight)\n return\n if self.regularizationLambda == 0:\n self.scale *= discountFactor\n self.sumWeight *= discountFactor\n else:\n AtAreg = np.eye(self.n)*self.regularizationLambda/self.scale\n self.AtA = self.AtA*discountFactor + AtAreg\n self.Atb *= discountFactor\n self.btb *= discountFactor\n self.sumWeight *= discountFactor\n self.calc_AtAinv()\n self.x = np.dot(self.AtAinv,self.Atb)\n return", "def apply_discounts(self):\n # for each valid discount...\n for discount in list(DiscountTypes):\n # only apply the discount if it is set in the cart\n if(discount in self.cart.discounts):\n getattr(self, discount.value)()", "def discount(self, period):\n\t\treturn 1.0/compound(period)", "def __init__(self, alpha, beta, gamma, discount_factors, y_scale,\n unrestricted_weights=None, discounting=None):\n self.attr = dict()\n self.attr['y_scale'] = y_scale\n self.attr['alpha'] = alpha\n self.attr['gamma'] = gamma\n self.attr['beta'] = beta\n\n if discounting is not None:\n # Implement exponential discounting or hyperbolic discounting\n np.testing.assert_equal(discounting in ['exponential', 'hyperbolic'], True)\n\n if discounting in ['hyperbolic']:\n df_beta = discount_factors[0]\n df_delta = discount_factors[1]\n\n new_dfx = {\n t: (df_beta * df_delta ** t if t > 0.0 else 1) for t in discount_factors.keys()\n }\n elif discounting in ['exponential']:\n df_delta = discount_factors[0]\n new_dfx = {t: (df_delta ** t if t > 0.0 else 1) for t in discount_factors.keys()}\n self.attr['discount_factors'] = new_dfx\n else:\n # Implement nonparametric discounting.\n self.attr['discount_factors'] = discount_factors\n\n # Optional argument: nonparametric weight on y_t in the CES function.\n if unrestricted_weights is None:\n # We apply the g() function here so that y_weights can be used identically below\n df = self.attr['discount_factors']\n y_weights = {t: y_scale * d_t ** (gamma - 1.0) for t, d_t in df.items()}\n self.attr['y_weights'] = y_weights\n else:\n # Nonparametric weight: no g() function applied in this case.\n self.attr['y_weights'] = unrestricted_weights\n\n self._check_attributes_nonstationary = partial(check_attributes_nonstationary, self)\n self._check_attributes_nonstationary()", "def implied_discount_factor(p1: Instrument, c1: Instrument, p2: Instrument, c2: Instrument) -> float:\n return (c1.price - p1.price - c2.price + p2.price)/ (c2.strike - c1.strike)", "def set_discount_rate(self, order_value, discount_rate):\r\n if float(discount_rate) == 0:\r\n self.discount_rate = float(RetailCustomer.discount_rate)\r\n else:\r\n self.discount_rate = float(discount_rate)", "def discount(ir, period):\n\treturn ir.discount(period)", "def get_discount(self, price):\r\n pass", "def discount_type(self, discount_type):\n\n self._discount_type = discount_type", "def __init__(self, alpha, beta, gamma, discount_factors, y_scale,\n unrestricted_weights=None, discounting=None, warmglow_type=\"constant\"):\n self.attr = dict()\n self.attr['y_scale'] = y_scale # weight on utility from charity euro\n self.attr['alpha'] = alpha # warm glow parameter\n self.attr['gamma'] = gamma # correlation aversion\n self.attr['beta'] = beta # risk aversion for self and charity euro\n self.attr[\"warmglow_type\"] = warmglow_type\n\n np.testing.assert_equal(warmglow_type in [\"constant\", \"linear\"], True)\n\n if discounting is not None:\n # Implement exponential discounting or hyperbolic discounting\n np.testing.assert_equal(discounting in ['exponential', 'hyperbolic'], True)\n\n if discounting in ['hyperbolic']:\n df_beta = discount_factors[0]\n df_delta = discount_factors[1]\n\n new_dfx = {\n t: (df_beta * df_delta ** t if t > 0.0 else 1) for t in discount_factors.keys()\n }\n elif discounting in ['exponential']:\n df_delta = discount_factors[0]\n new_dfx = {t: (df_delta ** t if t > 0.0 else 1) for t in discount_factors.keys()}\n self.attr['discount_factors'] = new_dfx\n else:\n # Implement nonparametric discounting.\n self.attr['discount_factors'] = discount_factors\n\n # Optional argument: nonparametric weight on y_t in the CES function.\n if unrestricted_weights is None:\n df = self.attr['discount_factors']\n y_weights = {t: y_scale for t, d_t in df.items()}\n self.attr['y_weights'] = y_weights\n else:\n # Nonparametric weight: no g() function applied in this case.\n self.attr['y_weights'] = unrestricted_weights\n\n self._check_attributes_warmglow = partial(check_attributes_warmglow, self)\n self._check_attributes_warmglow()", "def base_discount_amount(self, base_discount_amount):\n\n self._base_discount_amount = base_discount_amount", "def KartDiscreteSkip(KartMultiDiscretizer):\n\n def __init__(self, env, max_skip):\n super(KartDiscreteSkip, self).__init__(env)\n\n self.max_skip = max_skip\n\n def reset(self, **kwargs):\n observation = super(KartDiscreteSkip, self).reset(**kwargs)\n observation, _, _, _ = self.env.step(self._actions[0].copy())\n return observation", "def _get_discount(self):\n\n # For every 2 PENS, one free discount\n number_of_pens = len([x for x in self._products if x.code == 'PEN'])\n discount = 5.0 * int(number_of_pens / 2)\n\n # If there are more than 3 T-Shirts in the basket, 5 EUR of discount in every of them (25%)\n number_of_tshirts = len([x for x in self._products if x.code == 'TSHIRT'])\n if number_of_tshirts >= 3:\n discount += 5.0 * number_of_tshirts\n\n return discount", "def discount_amount(self, discount_amount):\n\n self._discount_amount = discount_amount", "def discount_amount(self, discount_amount):\n\n self._discount_amount = discount_amount", "def discount_amount(self, discount_amount):\n\n self._discount_amount = discount_amount", "def discounted(self, discounted):\n\n self._discounted = discounted", "def __init__(self, env):\n super().__init__(env) \n # beta of entropy used in A2C\n self.beta = 0.9\n # loss function of A2C value_model is mse\n self.loss = 'mse'", "def __init__(self, *args):\n methods = [\n OP_DOM,\n OP_DUA,\n PR_DOM,\n PR_DUA\n ]\n super(RWGDominantSystem, self).__init__(*args)\n self.main = discretize(args[0].main, *methods)\n self.cavities = [discretize(grid, *methods) for grid in args[0].cavities]\n self.use_strong_form = False", "def __init__(self, mdp, discount=0.9, iterations=100):\n super().__init__()\n self.mdp = mdp\n self.discount = discount\n self.iterations = iterations\n self.epsilon = 0.01\n self.values, self.policy = self.value_iteration()", "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "def __init__(self, mdp, discount = 0.9, iterations = 1000):\n ValueIterationAgent.__init__(self, mdp, discount, iterations)", "def discount_and_normalize_rewards(self, episode_rewards):\n # Get empty array with the same size as the rewards array\n discounted_episode_rewards = np.zeros_like(episode_rewards)\n\n # Variable that stores value of the discounted reward being calculated by the loop\n current_reward = 0.0\n # Loop that does the magic\n for i in reversed(range(len(episode_rewards))):\n # Calculate the discounted reward\n current_reward = current_reward * gamma + episode_rewards[i]\n # Store it in the array\n discounted_episode_rewards[i] = current_reward\n\n # Normalize.\n mean = np.mean(discounted_episode_rewards)\n std = np.std(discounted_episode_rewards)\n discounted_episode_rewards = (discounted_episode_rewards - mean) / (std)\n\n return discounted_episode_rewards", "def test_noop(self):\n base_env = _DiscreteEnvironmentOneReward(\n action_dtype=np.int64,\n reward_spec=specs.Array(dtype=np.float32, shape=()))\n wrapped_env = wrappers.DelayedRewardWrapper(base_env, accumulation_period=1)\n base_episode_reward = _episode_reward(base_env)\n wrapped_episode_reward = _episode_reward(wrapped_env)\n self.assertEqual(base_episode_reward, wrapped_episode_reward)", "def train_disc(self, real_batch, noise_batch): \n with tf.GradientTape() as tape:\n disc_loss = self.disc_loss(real_batch, noise_batch)\n disc_grad = tape.gradient(disc_loss, self.disc_model.trainable_variables)\n self.disc_optimizer.apply_gradients(zip(disc_grad, self.disc_model.trainable_variables))\n\n return disc_loss", "def loyalty_discount(self):\n if self.cart.user.is_loyal:\n self.cart._total *= 0.98", "def discount_and_normalize_rewards(episode_rewards):\n # Get empty array with the same size as the rewards array\n discounted_episode_rewards = np.zeros_like(episode_rewards)\n\n # Variable that stores value of the discounted reward being calculated by the loop\n current_reward = 0.0\n # Loop that does the magic\n for i in reversed(range(len(episode_rewards))):\n # Calculate the discounted reward\n current_reward = current_reward * gamma + episode_rewards[i]\n # Store it in the array\n discounted_episode_rewards[i] = current_reward\n\n # Normalize.\n mean = np.mean(discounted_episode_rewards)\n std = np.std(discounted_episode_rewards)\n discounted_episode_rewards = (discounted_episode_rewards - mean) / (std)\n\n return discounted_episode_rewards", "def discount(self, r, done, s):\n discounted_r, cumul_r = np.zeros_like(r), 0\n for t in reversed(range(0, len(r))):\n cumul_r = r[t] + cumul_r * self.gamma\n discounted_r[t] = cumul_r\n return discounted_r", "def discounted_reward(self, discount):\n\n tl = len(self)\n return (1 - discount) * np.sum(discount ** np.arange(tl) * self.rewards)", "def discount_rewards(self, r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n if r[t] != 0:\n running_add = 0 # Pong-specific\n running_add = running_add * self.gamma + r[t]\n discounted_r[t] = running_add\n \n #print(\"Mean reward before normalized: {}\".format(np.mean(discounted_r)))\n mu = np.mean(discounted_r)\n var = np.var(discounted_r)\n discounted_r -= mu \n discounted_r /= np.sqrt(var+1e-6)\n return discounted_r", "def env_init(self):\r\n self.dispersionModel = InvasiveUtility.Levin\r\n notDirectedG = networkx.Graph(self.simulationParameterObj.graph)\r\n adjMatrix = adjacency_matrix(notDirectedG)\r\n\r\n edges = self.simulationParameterObj.graph.edges()\r\n simulationParameterObj = self.simulationParameterObj\r\n if self.dispersionModel == InvasiveUtility.Levin:\r\n parameters = InvasiveUtility.calculatePath(notDirectedG,adjMatrix, edges, simulationParameterObj.downStreamRate,\r\n simulationParameterObj.upStreamRate)\r\n C = (1 - simulationParameterObj.upStreamRate * simulationParameterObj.downStreamRate) / (\r\n (1 - 2 * simulationParameterObj.upStreamRate) * (1 - simulationParameterObj.downStreamRate))\r\n self.dispertionTable = np.dot(1 / C, parameters)\r\n self.germinationObj = GerminationDispersionParameterClass(1, 1)\r\n #calculating the worst case fully invaded rivers cost\r\n worst_case = repmat(1, 1, self.simulationParameterObj.nbrReaches * self.simulationParameterObj.habitatSize)[0]\r\n cost_state_unit = InvasiveUtility.get_unit_invaded_reaches(worst_case,\r\n self.simulationParameterObj.habitatSize) * self.actionParameterObj.costPerReach\r\n stateCost = cost_state_unit + InvasiveUtility.get_invaded_reaches(\r\n worst_case) * self.actionParameterObj.costPerTree\r\n stateCost = stateCost + InvasiveUtility.get_empty_slots(worst_case) * self.actionParameterObj.emptyCost\r\n costAction = InvasiveUtility.get_budget_cost_actions(repmat(3, 1, self.simulationParameterObj.nbrReaches)[0],\r\n worst_case, self.actionParameterObj)\r\n networkx.adjacency_matrix(self.simulationParameterObj.graph)\r\n return \"VERSION RL-Glue-3.0 PROBLEMTYPE non-episodic DISCOUNTFACTOR \" + str(\r\n self.discountFactor) + \" OBSERVATIONS INTS (\" + str(\r\n self.simulationParameterObj.nbrReaches * self.simulationParameterObj.habitatSize) + \" 1 3) ACTIONS INTS (\" + str(\r\n self.simulationParameterObj.nbrReaches) + \" 1 4) REWARDS (\" + str(self.Bad_Action_Penalty)+\" \"+str(\r\n -1 * (costAction + stateCost)) + \") EXTRA \"+str(self.simulationParameterObj.graph.edges()) + \" BUDGET \"+str(self.actionParameterObj.budget) +\" by Majid Taleghan.\"", "def compute_amount_discounted(promotion, amount):\n if promotion.promo_type == '1': # % off\n amount_discounted = promotion.promo_amount * amount / Decimal(100)\n amount_discounted = Decimal(str(round(amount_discounted, 2)))\n elif promotion.promo_type == '2': # $ off\n if promotion.promo_amount < amount:\n amount_discounted = promotion.promo_amount\n else:\n amount_discounted = amount\n elif promotion.promo_type == '3': # fixed $ cost\n if promotion.promo_amount < amount:\n amount_discounted = amount - promotion.promo_amount\n else:\n # If you have a fixed cost promo of $20, but your items \n # only cost $10, you don't save.\n amount_discounted = 0\n LOG.debug('compute discount: amount_discounted = %s' % amount_discounted)\n return amount_discounted", "def disc_step(real_data,fake_data):\n with tf.GradientTape() as tape:\n loss = discriminator_loss(real_data,fake_data)\n loss = tf.add_n([loss] + discriminator.losses)\n gradients = tape.gradient(loss, discriminator.trainable_variables)\n d_optimizer.apply_gradients(zip(gradients, discriminator.trainable_variables))\n return loss", "def train_step_disc(self, z, y):\n with tf.GradientTape() as tape:\n loss = Dloss(self.disc_model, z, y)\n gradients = tape.gradient(loss, self.disc_model.trainable_variables)\n self.disc_optimizer.apply_gradients(\n (g, v) for (g,v) in zip(gradients, self.disc_model.trainable_variables))", "def __init__(self, discount_amount=None, discount_percentage=None, original_price=None, price_treatment=None): # noqa: E501 # noqa: E501\n self._discount_amount = None\n self._discount_percentage = None\n self._original_price = None\n self._price_treatment = None\n self.discriminator = None\n if discount_amount is not None:\n self.discount_amount = discount_amount\n if discount_percentage is not None:\n self.discount_percentage = discount_percentage\n if original_price is not None:\n self.original_price = original_price\n if price_treatment is not None:\n self.price_treatment = price_treatment", "def base_discount_amount(self):\n return self._base_discount_amount", "def __init__(self, env = GridWorldEnv(), discountingFactor = 0.9,\n convergenceThreshold = 1e-4, iterationThreshold = 1000,\n mode='prod'):\n self.env = env\n self.gamma = discountingFactor\n self.th = convergenceThreshold\n self.maxIter = iterationThreshold\n self.stateCount = self.env.get_statespace_len()\n self.actionCount = self.env.get_actionspace_len()\n self.uniformActionProbability = 1.0/self.actionCount\n self.stateDict = self.env.stateDict\n self.actionDict = self.env.actionDict\n self.mode = mode\n self.stateCount = self.env.get_statespace_len()\n self.V = np.zeros(self.stateCount)\n self.Q = [np.zeros(self.actionCount) for s in range(self.stateCount)]\n self.Policy = np.zeros(self.stateCount)\n self.totalReward = 0\n self.totalSteps = 0", "def discount(self):\r\n return DiscountResource(self)", "def test_fixed_simple():\n env = FixedRating(num_users=1,\n num_items=2,\n rating_frequency=1.0,\n num_init_ratings=0)\n assert env.name == 'fixed'\n users, items, ratings = env.reset()\n\n # Test that the users and items have empty features.\n assert users[0].shape == (0,)\n assert items[0].shape == (0,)\n assert env.online_users[0].shape == (0,)\n\n # Recommend item 0, we shouldn't observe new users or items.\n users, items, ratings, _ = env.step(np.array([[0]]))\n assert users == {}\n assert items == {}\n\n # Test that item 0 will have a rating of 1.\n assert ratings[(0, 0)][0] == 1\n\n # Recommend item 1, the environment should rate it 5.\n users, items, ratings, _ = env.step(np.array([[1]]))\n assert users == {}\n assert items == {}\n assert ratings[(0, 1)][0] == 5\n\n # Test the internal state of the environment.\n assert len(env.users) == 1\n assert env.users[0].shape == (0,)\n assert len(env.items) == 2\n assert env.items[0].shape == (0,)\n assert len(env.ratings) == 2\n assert env.ratings[0, 0][0] == 1\n assert env.ratings[0, 1][0] == 5", "def discretize(self, N, **kwds):\n\n # test one item to determine case handling\n item0 = list(self.conditional.values())[0]\n\n if type(item0) is float:\n # degenerate case. Treat the parameterization as constant.\n return self.dstns[0].discretize(N, **kwds)\n\n if type(item0) is list:\n return TimeVaryingDiscreteDistribution(\n [self[i].discretize(N, **kwds) for i, _ in enumerate(item0)]\n )", "def add_discount(self, discount):\n self.discounts.append(discount)", "def initialize(self, num_states, num_action, discount):\n self.num_states = num_states\n self.num_action = num_action\n if self.exploration:\n self.explorer.reset()\n\n self.means = np.zeros((num_states, num_action))\n self.visits = np.zeros((num_states, num_action)) # Visit counts", "def add_disc_sum_rew(trajectories, gamma):\n\n for trajectory in trajectories:\n if gamma < 0.999: # don't scale for gamma ~= 1\n rewards = trajectory['rewards'] * (1 - gamma)\n else:\n rewards = trajectory['rewards']\n disc_sum_rew = discount(rewards, gamma)\n trajectory['disc_sum_rew'] = disc_sum_rew", "def _discount_rewards(self, non_discounted_rewards):\n discounted_rewards = [0.0] * len(non_discounted_rewards)\n total_rewards = 0\n for t in reversed(range(len(non_discounted_rewards))):\n total_rewards = total_rewards * self.discount_factor + non_discounted_rewards[t]\n discounted_rewards[t] = total_rewards\n return discounted_rewards", "def depreciation(self):\n\n _depreciation = np.full(self.num_years, self.fixed_dev_cost / self.num_years)\n return _depreciation", "def add_disc_sum_rew(trajectories, gamma):\n for trajectory in trajectories:\n if gamma < 0.999: # don't scale for gamma ~= 1\n rewards = trajectory['rewards'] * (1 - gamma)\n else:\n rewards = trajectory['rewards']\n disc_sum_rew = discount(rewards, gamma)\n trajectory['disc_sum_rew'] = disc_sum_rew", "def reset_env(self):\n self.unemployment = self.init_unemployment\n self.inflation = self.init_inflation\n self.interest = self.init_interest\n\n labor_demand = (1 - self.unemployment) * self.n_agents\n production = self.firm.production_function(labor_demand)\n labor_costs = self.init_wage * labor_demand\n price = (1 + self.firm.markup) / (1 - self.firm.alpha) * labor_costs / production\n profit = price * production - labor_costs\n self.firm.reset(\n price=price,\n production=production,\n labor_costs=labor_costs,\n labor_demand=labor_demand,\n average_profit=profit,\n profit=profit,\n )\n\n wage_increases = {}\n demand = {}\n consume = production / self.n_agents\n for agent_id, agent in self.agents.items():\n agent.reset(labor=1 - self.init_unemployment, consumption=consume)\n wage_increases[agent_id] = self.inflation\n demand[agent_id] = consume\n\n return wage_increases, demand", "def helper_discount_rewards(rewards, discount_rate):\n discounted_rewards = np.zeros(len(rewards))\n cumulative_rewards = 0\n for step in reversed(range(len(rewards))):\n cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate\n discounted_rewards[step] = cumulative_rewards\n return discounted_rewards", "def mc_importance_sampling(env, behavior_policy, target_policy, num_episodes, discount_factor=1.0,\n sampling_function=sample_episode):\n\n # Keeps track of current V and count of returns for each state\n # to calculate an update.\n V = defaultdict(float)\n returns_count = defaultdict(float)\n \n # YOUR CODE HERE\n \n epsilon = 1e-6\n \n \n # Due to the structure of the gym environment, it is not trivial to map the entire state space\n # so we only map the state space of the BlackJack env\n count_zeros = False\n if (isinstance(env.observation_space, gym.spaces.tuple_space.Tuple)):\n if (len(env.observation_space.spaces) == 3):\n count_zeros = True\n \n state_tuples = [(first, second, bool(third)) for first in range(2,env.observation_space.spaces[0].n)\n for second in range(1,env.observation_space.spaces[1].n)\n for third in range(env.observation_space.spaces[2].n)]\n returns = {state_tuple: [] for state_tuple in state_tuples}\n \n if count_zeros:\n returns_count = Counter({state_tuple: 0 for state_tuple in state_tuples})\n \n for episode in tqdm(range(num_episodes)): # num_episodes\n \n env.reset()\n states, actions, rewards, dones = sampling_function(env, behavior_policy)\n p_return = 0\n \n pi = target_policy.get_probs(states, actions)\n b = (behavior_policy.get_probs(states, actions) + epsilon)\n pi_div_b = target_policy.get_probs(states, actions) / (behavior_policy.get_probs(states, actions) + epsilon)\n\n for index in reversed(range(len(states))): # Reverse so we loop in opposite direction through timesteps\n c_state = states[index]\n c_action = actions[index]\n c_reward = rewards[index]\n\n p_return = discount_factor * p_return + c_reward\n W = np.cumprod(pi_div_b[index:])\n \n p_return = W[0] * p_return\n if len(returns[c_state]) == 0:\n returns[c_state] = [p_return]\n else:\n returns[c_state].append(p_return)\n\n if count_zeros:\n returns_count[c_state] += 1\n \n V = {state: np.nan_to_num(np.mean(value)) for (state, value) in returns.items()}\n \n if count_zeros:\n zero_counts = [True for item in list(returns_count) if returns_count[item] == 0]\n no_of_zero = sum(zero_counts)\n if no_of_zero>0:\n print(f\"Did not reach {no_of_zero} states in MC estimation. Value estimation for these states is missing.\")\n else:\n print(\"Reached all states in MC estimation.\")\n \n return V", "def discount_percentage(self, discount_percentage):\n\n self._discount_percentage = discount_percentage", "def discount_rewards(rewards, gamma):\n reward_shape = rewards.shape\n if len(reward_shape) == 1:\n discounted_r = np.zeros(shape=(*reward_shape, 1), dtype=np.float)\n else:\n discounted_r = np.zeros(shape=reward_shape, dtype=np.float)\n running_add = 0\n\n for t in reversed(range(0, rewards.size)):\n running_add = running_add * gamma + rewards[t]\n discounted_r[t] = running_add\n\n return discounted_r", "def get_frequency_discount(self):\n if self.frequency == 3:\n return getattr(settings, \"DISCOUNT_3_MONTHS\", 0)\n elif self.frequency == 6:\n return getattr(settings, \"DISCOUNT_6_MONTHS\", 0)\n elif self.frequency == 12:\n return getattr(settings, \"DISCOUNT_12_MONTHS\", 0)\n else:\n return 0", "def discount(rewards, discount_factor=.99):\n # Compute discounted rewards (trust me this works and hopefully it's super fast)\n timesteps = len(rewards) # make into matrix\n rewards = tf.convert_to_tensor([rewards],dtype=tf.float32)\n # create lower triangular matrix of discount_factor weights\n T = tf.convert_to_tensor([[max(1+i-j,0) for j in range(timesteps)] for i in range(timesteps)],dtype=tf.float32)\n T = tf.math.pow(discount_factor, T)\n T = tf.linalg.band_part(T, -1, 0)\n # apply discount factor\n return tf.matmul(rewards, T)", "def __init__(self):\n super().__init__()\n self.TERRAIN_VARIANCE = 0.0", "def policy_eval_v(policy, env, discount_factor=1.0, theta=0.00001):\n # Start with an all 0 value function\n V = np.zeros(env.nS)\n \n # loop door alle states heen \n # sla de oude state value op \n # Bereken de nieuwe state value door de SOM (kans omhoog * loop over waar je terrecht kunt komen * reward) kans omlaag..\n # kijk of je nog door moet gaan of stoppen\n delta = 1000 \n while delta > theta:\n # for x in range(2):\n delta = 0\n \n# loop throw possible states\n for state in range(env.nS):\n old_state_value = V[state]\n new_state_value = 0\n\n # loop shrow possible actions in state\n for action in range(env.nA):\n\n # print(\"kans omhoog\", policy[state][action])\n # print(\"kans omhoog uitkomen\", env.P[state][action][0][0])\n # print(\"direct reward\",env.P[state][action][0][2] )\n # print(\"value of that new state\", discount_factor * V[env.P[state][action][0][1]] )\n\n current_state_value = policy[state][action] * env.P[state][action][0][0] * ( env.P[state][action][0][2] + ( discount_factor * V[env.P[state][action][0][1]] ) ) \n# print(\"current state value\", current_state_value)\n new_state_value += current_state_value\n \n delta = max(delta, abs(old_state_value - new_state_value))\n V[state] = new_state_value\n# print(V[state])\n# print(\"delta\", delta)\n return np.array(V)", "def disc_val(self, val_data, batch_size):\n fakes = self.generate_poses(len(val_data))\n labels = np.array([1] * len(val_data) + [0] * len(fakes))\n data = np.concatenate([val_data, fakes])\n return self.discriminator.evaluate(data, labels,\n batch_size=batch_size)", "def set_all(self):\n\n self.ecm = EnergyConsumptionModel(\n vehicle_type=\"car\",\n vehicle_size=list(self.array.coords[\"size\"].values),\n powertrains=list(self.array.coords[\"powertrain\"].values),\n cycle=self.cycle,\n gradient=self.gradient,\n country=self.country,\n )\n\n diff = 1.0\n\n while diff > 0.0001:\n old_driving_mass = self[\"driving mass\"].sum().values\n self.set_vehicle_mass()\n self.set_power_parameters()\n self.set_component_masses()\n self.set_auxiliaries()\n self.set_power_battery_properties()\n self.set_battery_properties()\n self.set_energy_stored_properties()\n self.set_recuperation()\n\n if \"FCEV\" in self.array.powertrain.values:\n self.set_fuel_cell_power()\n self.set_fuel_cell_mass()\n\n # if user-provided values are passed,\n # they override the default values\n if \"capacity\" in self.energy_storage:\n self.override_battery_capacity()\n\n diff = (self[\"driving mass\"].sum().values - old_driving_mass) / self[\n \"driving mass\"\n ].sum()\n\n self.set_ttw_efficiency()\n self.calculate_ttw_energy()\n self.set_ttw_efficiency()\n\n self.set_range()\n\n if self.target_range:\n self.override_range()\n\n self.set_share_recuperated_energy()\n self.set_battery_fuel_cell_replacements()\n self.adjust_cost()\n\n self.set_electric_utility_factor()\n self.set_electricity_consumption()\n self.set_costs()\n self.set_hot_emissions()\n self.set_particulates_emission()\n self.set_noise_emissions()\n self.create_PHEV()\n if self.drop_hybrids:\n self.drop_hybrid()\n\n self.remove_energy_consumption_from_unavailable_vehicles()", "def get_discount(self, *args: Any) -> DiscountStrategy:\n\n return self.discount_strategy.get_discount(*args)", "def genericDiscountRate(self, items):\n rate = float(items)\n if items >= self.minUnits:\n rate = (float(int(items / self.divisor)) * self.multiplier * (1.0 - self.discountPerc)) + (float(int(items % self.divisor)) * self.multiplier * (1.0 - self.discountPerc))\n return rate", "def __init__(self, eps: float = 1e-9):\n super(DiceLoss, self).__init__()\n self.eps = eps", "def discount(self):\n discount_dict = {gem_color : 0 for gem_color in GemColor}\n for card in self.cards_possessed:\n discount_dict[card.discount_profit] += 1\n return GemsCollection(discount_dict)", "def default(cls, environment, critic=None, exploration_noise=None, *args, **kwargs):\n if critic is None:\n critic = NNEnsembleQFunction.default(environment)\n if exploration_noise is None:\n noise = Constant(0.1)\n return super().default(\n environment, critic=critic, exploration_noise=noise, *args, **kwargs\n )", "def discount(elev, slr, discount_rate=0.025):\n # feet to centimeters with cm/year rise estimate. TODO: This is a rough,\n # terrible estimate and should be replaced to reflect actual science\n # (i.e., not the bathtub model)\n T = ((6 - elev) / 0.0328084) / slr\n run = np.repeat(1, 1000)\n buf = np.repeat(0, T)\n lost = np.append(buf, np.repeat(1, 1000 - T))\n return 1 - (np.npv(discount_rate, lost) / np.npv(discount_rate, run)), T", "def distribution(self, env):\n pass", "def discount_reward(reward, gamma):\n discount_r = np.zeros_like(reward)\n r_total = 0\n for _ in reversed(range(0, reward.size)):\n if reward[_] != 0:\n r_total = 0\n r_total = r_total * gamma + reward[_]\n discount_r[_] = r_total\n return discount_r", "def tiered_discount(request):\n\n cart = Cart.objects.from_request(request)\n discount = TieredDiscount.objects.valid(cart.total)\n if discount:\n amount = discount.amount(cart.total)\n else:\n amount = None\n\n return {\n 'tiered_discount': discount,\n 'tiered_discount_amount': amount,\n }", "def discount(self, rewards, dones, gamma):\n discounted = []\n ret = 0\n for reward, done in zip(rewards[::-1], dones[::-1]):\n ret = reward + gamma * ret * (1. - done)\n discounted.append(ret)\n return discounted[::-1]", "def __init__(self):\n self.counts = [0] * 10\n self.values = [2000] * 10\n self.epsilon = 0.1", "def calc_diesel_equiv_captured (self):\n if self.generation_wind_proposed == 0:\n excess_percent = 0\n else:\n excess_percent = self.excess_energy / self.generation_wind_proposed\n excess_captured_percent = excess_percent * \\\n (self.cd['percent excess energy capturable'] / 100.0)\n if self.comp_specs['secondary load']:\n net_excess_energy = excess_captured_percent * \\\n self.generation_wind_proposed\n else:\n net_excess_energy = 0\n\n #~ conversion = 0.99/0.138/0.8/293\n conversion = self.cd['efficiency electric boiler']/ \\\n (1/constants.mmbtu_to_gal_HF)/ \\\n self.cd['efficiency heating oil boiler']/\\\n (constants.mmbtu_to_kWh)\n self.diesel_equiv_captured = net_excess_energy * conversion\n\n #~ print 'self.diesel_equiv_captured ',self.diesel_equiv_captured", "def __post_init__(self): \n c_model = self.concentration_model\n # Check if the diameter is vectorised.\n if (isinstance(c_model.infected, InfectedPopulation) and not np.isscalar(c_model.infected.expiration.diameter)\n # Check if the diameter-independent elements of the infectious_virus_removal_rate method are vectorised.\n and not (\n all(np.isscalar(c_model.virus.decay_constant(c_model.room.humidity, c_model.room.inside_temp.value(time)) + \n c_model.ventilation.air_exchange(c_model.room, time)) for time in c_model.state_change_times()))):\n raise ValueError(\"If the diameter is an array, none of the ventilation parameters \"\n \"or virus decay constant can be arrays at the same time.\")", "def __init__(self):\n super().__init__()\n self.type = 'UniformDiscrete'\n self.dimensionality = 1\n self.distType = 'Discrete'\n self.memory = True", "def discount_reward(r_dic, gamma):\n r = 0\n for i in range(len(r_dic) - 1, -1, -1):\n if r_dic[i] != 0:\n r = r_dic[i]\n else:\n r = r * gamma\n r_dic[i] = r\n r_dic = (r_dic - r_dic.mean()) / (r_dic.std() + 1e-8)\n return r_dic", "def step_env(self):\n raise NotImplementedError\n # Not needed for this homework", "def initializeDomainCondition(self):\n print('Initialize the condition.')\n\n self.fluidPDF = np.zeros([self.typesFluids, self.ny, self.nx, 9])\n self.fluidsDensity = np.zeros([self.typesFluids, self.ny, self.nx])\n self.physicalVX = np.zeros([self.ny, self.nx])\n self.physicalVY = np.zeros([self.ny, self.nx])\n self.forceX = np.zeros([self.typesFluids, self.ny, self.nx])\n self.forceY = np.zeros([self.typesFluids, self.ny, self.nx])\n if (self.PictureExistance == \"'no'\"):\n for i in sp.arange(self.ny):\n for j in sp.arange(self.nx):\n# for k in sp.arange(self.typesFluids):\n tmpCenterX = int(self.nx / 2); tmpCenterY = int(self.ny / 2)\n if (self.isDomain[i, j] == True):\n# if (sp.sqrt((i - tmpCenterY) * (i - tmpCenterY) + (j - \\\n# tmpCenterX) * (j - tmpCenterX)) <= 15.):\n# if (i < 15 and np.abs(j - tmpCenterX) < 15):\n# if ((i >0 and i < 28) and (j >=102 and j < 154)):\n if (i < self.ny - 10):\n# if (i < 128 and i > 70):\n self.fluidsDensity[0, i, j] = self.initialDensities[0]\n self.fluidPDF[0, i, j, :] = self.weightsCoeff * self.initialDensities[0]\n self.fluidsDensity[1, i, j] = self.backgroundDensities[1]\n self.fluidPDF[1, i, j, :] = self.weightsCoeff * self.backgroundDensities[1]\n else:\n self.fluidsDensity[1, i, j] = self.initialDensities[1]\n self.fluidPDF[1, i, j, :] = self.weightsCoeff * self.initialDensities[1]\n self.fluidsDensity[0, i, j] = self.backgroundDensities[0]\n self.fluidPDF[0, i, j, :] = self.weightsCoeff * self.backgroundDensities[0] \n \n if (self.isCycles == \"'no'\" and self.PictureExistance == \"'yes'\"):\n for i in sp.arange(self.ny):\n for j in sp.arange(self.nx):\n if (i < self.ny - 20):\n # if ( np.abs(i - 60) < 20):\n for k in sp.arange(self.typesFluids):\n if (k == 0 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.initialDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.initialDensities[k]\n if (k == 1 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.backgroundDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.backgroundDensities[k]\n else:\n for k in sp.arange(self.typesFluids):\n if (k == 0 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.backgroundDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.backgroundDensities[k]\n if (k == 1 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.initialDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.initialDensities[k]\n elif (self.isCycles == \"'yes'\" and self.PictureExistance == \"'yes'\"):\n username = getpass.getuser()\n pathIniFile = '/home/' + username + '/LBMInitial/'\n if (os.path.exists(pathIniFile) == True): \n #for the old fluid distribution\n #the domain of the network\n iniFile = tb.open_file(pathIniFile + 'SimulationResults.h5', 'r')\n for i in sp.arange(self.typesFluids-1):\n self.fluidsDensity[i, :-30, :] = eval('iniFile.root.FluidMacro.FluidDensityType%gin%d[:-30, :]' % (i, self.lastStep))\n self.fluidsDensity[i, -30:, :] = self.backgroundDensities[i]\n for j in sp.arange(self.ny):\n for k in sp.arange(self.nx):\n self.fluidPDF[i, j, k, :] = self.weightsCoeff * \\\n self.fluidsDensity[i, j, k]\n iniFile.close()\n# for the new fluid in the domain\n for i in sp.arange(self.ny):\n for j in sp.arange(self.nx):\n if (i < self.ny - 30 and self.isDomain[i, j] == 1):\n self.fluidsDensity[-1, i, j] = self.backgroundDensities[-1]\n self.fluidPDF[-1, i, j, :] = self.backgroundDensities[-1] * \\\n self.weightsCoeff\n# continue\n elif (i >= self.ny - 30 and self.isDomain[i, j] == 1):\n self.fluidsDensity[-1, i, j] = self.initialDensities[-1]\n self.fluidPDF[-1, i, j, :] = self.initialDensities[-1] * \\\n self.weightsCoeff\n else:\n print(\"There is no file for initializing the domain.\")\n sys.exit()", "def discount_rewards(r):\n\tdiscounted_r = np.zeros_like(r)\n\trunning_add = 0\n\tfor t in reversed(xrange(0, r.size)):\n\t\trunning_add = running_add * gamma + r[t]\n\t\tdiscounted_r[t] = running_add\n\treturn discounted_r", "def __init__(self):\n self.counts = [0] * 10\n self.values = [0] * 10\n self.epsilon = 0.1", "def discount_rewards(r):\n\tdiscounted_r = np.zeros_like(r)\n\trunning_add = 0\n\tfor t in reversed(range(0, r.size)):\n\t\trunning_add = running_add * gamma + r[t]\n\t\tdiscounted_r[t] = running_add\n\treturn np.array(discounted_r)", "def discount(r, gamma):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def discounted(self):\n return self._discounted", "def reinforce(env, estimator_policy, estimator_value, num_episodes, discount_factor=1.0):\n\n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes)) \n \n Transition = collections.namedtuple(\"Transition\", [\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n \n for i_episode in range(num_episodes):\n # Reset the environment and pick the fisrst action\n state = env.reset()\n \n episode = []\n \n # One step in the environment\n for t in itertools.count():\n \n # Take a step\n #action_means = np.ndarray.flatten(estimator_policy.predict(state))\n #action = np.random.multivariate_normal(mean=action_means, cov=full_var)\n\n action = estimator_policy.predict(state)\n\n '''\n max_idx = np.argmax(np.abs(action))\n a_max = action[max_idx]\n\n if a_max > high_threshold or a_max < low_threshold: \n action_clipped = action / (10*np.abs(a_max))\n '''\n\n\n '''\n action_clipped = [np.max([np.min([action[0], high_threshold]), low_threshold]), \n np.max([np.min([action[1], high_threshold]),\n low_threshold])]\n '''\n\n next_state, reward, done, _ = env.step(action)\n\n '''\n if t > 50:\n done = True\n '''\n \n # Keep track of the transition\n episode.append(Transition(\n state=state, action=action, reward=reward, next_state=next_state, done=done))\n \n # Update statistics \n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t\n \n '''\n # Print out which step we're on, useful for debugging.\n print(\"\\rStep {} @ Episode {}/{} ({})\".format(\n t, i_episode + 1, num_episodes, stats.episode_rewards[i_episode - 1]), end=\"\")\n # sys.stdout.flush()\n '''\n\n if done:\n break\n \n state = next_state\n \n monitor_epoch = 50\n if i_episode % monitor_epoch == 0 and i_episode > 0: \n print(\"avg reward : %f\" %(np.mean(stats.episode_rewards[i_episode-monitor_epoch:i_episode])))\n\n\n baseline_value = np.mean([cur_trans.reward for cur_trans in episode]) \n\n # Go through the episode and make policy updates\n for t, transition in enumerate(episode):\n # The return after this timestep\n total_return = sum(discount_factor**i * t.reward for i, t in enumerate(episode[t:]))\n \n advantage = total_return - baseline_value\n #advantage += np.max([0, baseline_value - v_prev])\n\n # Update our policy estimator\n estimator_policy.update(transition.state, advantage, transition.action)\n\n #print(p_action)\n\n if i_episode % 200 == 0 and i_episode > 0:\n plt.figure(1)\n plt.plot(transition.state[0], transition.state[1], 'bo')\n if t == len(episode) - 1:\n plt.show()\n \n \n return stats", "def policy_eval(env, policy, V, discount_factor):\n policy_value = np.zeros(env.nS)\n for state, action in enumerate(policy):\n for probablity, next_state, reward, info in env.P[state][action]:\n policy_value[state] += probablity * (reward + (discount_factor * V[next_state]))\n\n return policy_value", "def _ion_densities(self):\n nD = self.ne_in*(6-self.zeff_in)/(5.)\n nC = self.ne_in*(self.zeff_in-1)/(30.)\n nC[np.where(nC<0)]=0.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC", "def discount_rewards_and_normalize(self, rewards):\n discounted_rewards = np.empty(len(rewards))\n cumulative_rewards = 0\n\n for step in reversed(range(len(rewards))):\n cumulative_rewards = rewards[step] + cumulative_rewards * self.gamma\n discounted_rewards[step] = cumulative_rewards\n\n reward_mean = discounted_rewards.mean()\n reward_std = discounted_rewards.std()\n\n return [(reward - reward_mean) / reward_std\n for reward in discounted_rewards]", "def discount_amount(self):\r\n customer = self.records.find_customers(str(self.__customer).strip())\r\n order_value = self.order_value\r\n discount = customer.get_discount(order_value)\r\n return discount", "def exam_cookie_discount():\n return DiscountPeriod.objects.get(id=settings.COOKIE_CORNER_EXAM_COOKIE_DISCOUNT_PERIOD_ID)", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r", "def __init__(self, env):\n super().__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3", "def discount_rewards(r):\r\n discounted_r = np.zeros_like(r)\r\n running_add = 0\r\n for t in reversed(range(0, r.size)):\r\n running_add = running_add * gamma + r[t]\r\n discounted_r[t] = running_add\r\n return discounted_r", "def __init__(self, env=None, tilesEnv=False):\n super(MarioEnv, self).__init__(env)\n self.resetCount = -1\n # reward is distance travelled. So normalize it with total distance\n # https://github.com/ppaquette/gym-super-mario/blob/master/ppaquette_gym_super_mario/lua/super-mario-bros.lua\n # However, we will not use this reward at all. It is only for completion.\n self.maxDistance = 3000.0\n self.tilesEnv = tilesEnv", "def uniform_discretization(grid: GridBase) -> float:\n dx_mean = np.mean(grid.discretization)\n if np.allclose(grid.discretization, dx_mean):\n return float(dx_mean)\n else:\n raise RuntimeError(\"Grid discretization is not uniform\")", "def discount_rewards(r):\n discounted_r = np.zeros_like(r)\n running_add = 0\n for t in reversed(range(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r" ]
[ "0.59988624", "0.58988583", "0.5898098", "0.5847042", "0.5623778", "0.5441156", "0.5411965", "0.52358055", "0.5201164", "0.5162187", "0.5124095", "0.5123308", "0.5074123", "0.49902314", "0.4981849", "0.49234065", "0.49178696", "0.49120015", "0.4909685", "0.4880568", "0.4880568", "0.4880568", "0.4842317", "0.4835311", "0.4820173", "0.48053044", "0.47873893", "0.47873893", "0.47873893", "0.47873893", "0.47873893", "0.477873", "0.4764972", "0.47591075", "0.4694215", "0.4692726", "0.46885383", "0.467299", "0.46724895", "0.4662956", "0.46605796", "0.465452", "0.46169645", "0.46032658", "0.4582372", "0.4543534", "0.45432636", "0.45337507", "0.45331466", "0.45328075", "0.453217", "0.45157945", "0.45088276", "0.44963214", "0.44883707", "0.44859156", "0.44794148", "0.44786596", "0.44674537", "0.44639105", "0.44619116", "0.4444976", "0.4434704", "0.44314826", "0.44310352", "0.4430286", "0.442874", "0.4427647", "0.44155684", "0.4414568", "0.44092634", "0.44090137", "0.44037884", "0.43993706", "0.43988466", "0.43975404", "0.43953514", "0.4391648", "0.43822277", "0.43796065", "0.4375415", "0.436623", "0.4364084", "0.4361438", "0.43582633", "0.43567085", "0.43552417", "0.43518102", "0.43513715", "0.43489036", "0.43456942", "0.43429917", "0.4342235", "0.43418524", "0.4341347", "0.43398976", "0.4336467", "0.43336377", "0.4330677", "0.4328849" ]
0.5466874
5
Describes the `TimeStep` fields returned by `step()`. Override this method to define an environment that uses nonstandard values for any of the items returned by `step()`. For example, an environment with arrayvalued rewards.
def time_step_spec(self) -> ts.TimeStep: return ts.time_step_spec(self.observation_spec(), self.reward_spec())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step_env(self):\n raise NotImplementedError\n # Not needed for this homework", "def __init__(self, step_time, step=None):\n self.step_vector = step\n self.step_time = step_time\n self.ref_timer = None", "def test_no_timesteps_property(self):\n expected_values = {\n 'no_timesteps': 1000,\n 'no_sweeps': 10,\n 'no_channels': 4,\n }\n test_rec = rt.Recording(\n np.zeros(\n [\n expected_values['no_channels'],\n expected_values['no_timesteps'],\n expected_values['no_sweeps'],\n ]\n ),\n dt=0.1,\n )\n self.assertEqual(\n test_rec.no_timesteps,\n expected_values['no_timesteps'],\n 'Expected {} for `no_timesteps` property; got {} instead.'.format(\n expected_values['no_timesteps'], test_rec.no_timesteps\n ),\n )", "def get_time_step_values(self):\n return TensorMeshAppender.get_time_step_values(self)", "def get_time_step_values(self):\n return DiscretizeMeshReader.get_time_step_values(self)", "def get_time_step_values(self):\n return TensorMeshReader.get_time_step_values(self)", "def time_interval_prop(self, time_step, nsteps):\n world.time = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting time\")", "def time_step_output(self, current_time, time_step):\n pass", "def get_time_step_values(self):\n return GravObsReader.get_time_step_values(self)", "def step(self, action: np.ndarray) -> 'EnvStep':\n ...", "def reset(self):\n self._timestep = np.array([0])", "def get_time_step_values(self):\n return GravGradReader.get_time_step_values(self)", "def _setVals(self, step=0):\n self.step = step", "def step(\n self,\n action: Union[Dict[str, Any], np.ndarray],\n env_id: Optional[np.ndarray] = None,\n ) -> Union[TimeStep, Tuple]:", "def obtain_field_timestep(self, field_name, tstep):\n return np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"FlowSolution_%04d\" % tstep][field_name][\" data\"][:,:,:])", "def GetTimestepValues(self):\n if self.__timesteps is None: self.__timesteps = self.__SetInputTimesteps()\n # self.__timesteps should already be of type list\n return self.__timesteps if self.__timesteps is not None else None", "def dt(self, _):\n raise NotImplementedError(\n \"We do not support setting dt/ time step except during setup\")", "def get_time_step_values(self):\n if self.need_to_read():\n self._read_up_front()\n return self._timesteps if self._timesteps is not None else None", "def step_time(self, timestep=None):\n if self._plumb is not None:\n self._plumb.step(timestep)\n self.update_conditions()\n\n # TODO(jacob): Consider if this function should return anything\n # about the state of the system: plumbing engine state, current\n # time, etc.", "def get_time_step_values(self):\n return TopoReader.get_time_step_values(self)", "def onTimeStep(self, timeStep):\n pass", "def test_step_properties(self, _step: PropertyMock):\n now = datetime.utcnow()\n _step.return_value = MagicMock(\n start_time=now,\n end_time=now,\n elapsed_time=0,\n is_visible=True\n )\n es = exposed.ExposedStep()\n self.assertEqual(now, es.start_time)\n self.assertEqual(now, es.end_time)\n self.assertEqual(0, es.elapsed_time)", "def get_time_step_values(self):\n return MagObsReader.get_time_step_values(self)", "def get_time_step_values(self):\n return OcTreeReader.get_time_step_values(self)", "def zero_timings(self):\r\n self.step = 0\r\n self.current_T = 0.0", "def generate_environment(timestep=3600,\n year_timer=2017,\n year_co2=2017,\n try_path=None,\n location=(51.529086, 6.944689),\n altitude=55,\n new_try=False):\n\n # Create environment\n timer = time.TimerExtended(timestep=timestep, year=year_timer)\n\n weather = weath.Weather(timer, useTRY=True, pathTRY=try_path,\n location=location, altitude=altitude,\n new_try=new_try)\n\n market = germanmarket.GermanMarket()\n co2em = co2.Emissions(year=year_co2)\n\n environment = env.EnvironmentExtended(timer=timer,\n weather=weather,\n prices=market,\n location=location,\n co2em=co2em)\n\n return environment", "def __init__(self, session, trial_nr, phase_durations, phase_names,\n parameters, timing, load_next_during_phase,\n verbose, condition='hrf'):\n super().__init__(session, trial_nr, phase_durations, phase_names,\n parameters, timing, verbose, load_next_during_phase)\n self.condition = condition\n self.last_fix_time, self.last_stim_time = 0.0", "def step(\n self, action: Union[numpy.ndarray, int], state: numpy.ndarray = None, dt: int = None\n ) -> tuple:\n data = super(AtariEnvironment, self).step(action=action, state=state, dt=dt)\n if state is None:\n observ, reward, terminal, info = data\n observ = self.gym_env.unwrapped.ale.getRAM() if self.obs_ram else observ\n return observ, reward, terminal, info\n else:\n state, observ, reward, terminal, info = data\n observ = ale_to_ram(self.gym_env.unwrapped.ale) if self.obs_ram else observ\n return state, observ, reward, terminal, info", "def __init__(self, iteration=None, detailed_step=None):\n self.iteration = iteration\n self.detailed_step = detailed_step\n self.objective = None\n self.wirelength = None\n self.density = None\n self.density_weight = None\n self.hpwl = None\n self.rmst_wl = None\n self.overflow = None\n self.goverflow = None\n self.route_utilization = None\n self.pin_utilization = None\n self.max_density = None\n self.gmax_density = None\n self.gamma = None\n self.tns = None\n self.wns = None\n self.eval_time = None", "def check_step(step):\n assert isinstance(step, list), \"Step must be a list\"\n assert (len(step) == 3 or len(step) == 4), \\\n \"Step must be a list of length 3 or 4 (to include temporary values)\"\n assert isinstance(step[0], type), (\n \"The first element of the step \"\n \"must be a class (e.g. measurement or a calibration routine)\")\n assert isinstance(step[2],\n dict), (\"The second element of the step \"\n \"must be a dictionary containing settings\")", "def set_first_machine_time_step(self, first_machine_time_step):", "def dump_step(self,status):\n\n L = self.level\n stats.add_to_stats(step=status.step, time=status.time, type='timing_step', value=time.time()-self.t0)\n stats.add_to_stats(step=status.step, time=status.time, type='niter', value=status.iter)\n stats.add_to_stats(step=status.step, time=status.time, type='residual', value=L.status.residual)\n\n pass", "def trace(self): # noqa\n\n # pylint: disable=protected-access,invalid-name\n def get_wrapper(env_step):\n \"\"\"Generate the wrapper for env.step().\"\"\"\n @wraps(env_step)\n def tracing_step(): # noqa\n \"\"\"Call *__monitor* for the next event if one exist before\n calling ``env.step()``.\"\"\"\n if len(self.__env._queue) > 0:\n t, prio, eid, event = self.__env._queue[0]\n self.__monitor(t, prio, eid, event)\n return env_step()\n\n return tracing_step\n\n self.__env.step = get_wrapper(self.__env.step)", "def timestep_definition(self, timestep_definition):\n\n self._timestep_definition = timestep_definition", "def __init__(self, step_time, step_interval=None, n_step=None, ss=None):\n self.ss = ss\n self.n_step = n_step\n self.interval = step_interval\n self.step_time = step_time", "def __init__(self, task, time_limit=float('inf'), random_state=None,\n n_sub_steps=None,\n raise_exception_on_physics_error=True,\n strip_singleton_obs_buffer_dim=False,\n max_reset_attempts=1):\n super(Environment, self).__init__(\n task=task,\n time_limit=time_limit,\n random_state=random_state,\n n_sub_steps=n_sub_steps,\n raise_exception_on_physics_error=raise_exception_on_physics_error,\n strip_singleton_obs_buffer_dim=strip_singleton_obs_buffer_dim)\n self._max_reset_attempts = max_reset_attempts\n self._reset_next_step = True", "def __init__(self,\n debug=False,\n urdf_version=None,\n control_time_step=0.005,\n action_repeat=5,\n control_latency=0,\n pd_latency=0,\n on_rack=False,\n motor_kp=1.0,\n motor_kd=0.02,\n render=False,\n num_steps_to_log=2000,\n env_randomizer=None,\n log_path=None,\n signal_type='ik',\n target_position=None,\n backwards=None,\n gait_type='trot',\n terrain_type='plane',\n terrain_id='plane',\n mark='base',\n ):\n self.phase = 0\n\n self._gait_type = gait_type \n # for observation space bounding \n self.max_speed = 1.0\n self.min_speed = 0.5 # change back to 0.2 for OLD TD3 model evaluation\n \n self.min_side_speed = 0.0\n self.max_side_speed = 0.0\n\n self.speed = np.random.uniform(self.min_speed, self.max_speed)\n self.side_speed = np.random.uniform(self.min_side_speed, self.max_side_speed)\n self.speed_des = [self.speed, self.side_speed]\n\n # Initialization variables for periodic reward sum composition\n self.theta_FL = phase_constants.PHASE_VALS[self._gait_type]['front_left']\n self.theta_FR = phase_constants.PHASE_VALS[self._gait_type]['front_right']\n self.theta_RL = phase_constants.PHASE_VALS[self._gait_type]['rear_left']\n self.theta_RR = phase_constants.PHASE_VALS[self._gait_type]['rear_right']\n\n self.min_swing_ratio = 0.6\n self.max_swing_ratio = 0.8\n self.ratio = np.random.uniform(self.min_swing_ratio, self.max_swing_ratio)\n\n super(rexPeriodicRewardEnv,\n self).__init__(urdf_version=urdf_version,\n accurate_motor_model_enabled=True,\n motor_overheat_protection=False,\n motor_kp=motor_kp,\n motor_kd=motor_kd,\n remove_default_joint_damping=False,\n control_latency=control_latency,\n pd_latency=pd_latency,\n on_rack=on_rack,\n render=render,\n num_steps_to_log=num_steps_to_log,\n env_randomizer=env_randomizer,\n log_path=log_path,\n control_time_step=control_time_step,\n action_repeat=action_repeat,\n target_position=target_position,\n signal_type=signal_type,\n backwards=backwards,\n debug=debug,\n terrain_id=terrain_id,\n terrain_type=terrain_type,\n mark=mark,\n ratio=self.ratio,\n forward_reward_cap=5\n )\n\n self.height_des = 0.206 # this is init standing height for rex\n\n self.cycle_complete = 0\n self.cycle_len = 1000 # this is L\n \n # vonmises variables\n self.kappa = phase_constants.VON_MISES_KAPPA\n\n rex_joints = p.getNumJoints(bodyUniqueId=self.rex.quadruped)\n link_name_to_ID = {}\n for i in range(rex_joints):\n name = p.getJointInfo(self.rex.quadruped, i)[12].decode('UTF-8')\n link_name_to_ID[name] = i\n\n self.link_name_to_ID = link_name_to_ID\n self.toe_pos_last = { 'front_left_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['front_left_toe_link'])[0],\n 'front_right_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['front_right_toe_link'])[0],\n 'rear_left_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['rear_left_toe_link'])[0],\n 'rear_right_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['rear_right_toe_link'])[0]\n\n } \n\n print('Using Periodic Reward Composition Rex Environment')", "def reset(self, time):\n for key in self.data['step']:\n self.data['step'][key] = None\n\n self.time = time", "def interact(self, n_steps=100, verbose=False, add_last_observation=True):\n\n\n def env_step(i,action):\n \"\"\"environment reaction,\n :returns: observation, reward, is_alive, info\"\"\"\n\n if not self.just_ended[i]:\n new_observation, cur_reward,is_done,info = self.envs[i].step(action)\n if is_done:\n # game ends now, will finalize on next tick\n self.just_ended[i] = True\n new_observation = self.preprocess_observation(new_observation)\n\n #note: is_alive=True in any case because environment is still alive (last tick alive) in our notation\n return new_observation, cur_reward,True,info\n\n\n else:\n assert self.just_ended[i] == True\n\n # reset environment, get new observation to be used on next tick\n new_observation = self.preprocess_observation(self.envs[i].reset())\n\n #reset memory for new episode\n for m_i in range(len(new_memory_states)):\n new_memory_states[m_i][i] = 0\n\n if verbose:\n print(\"env %i reloaded\" % i)\n\n self.just_ended[i] = False\n\n return new_observation,0,False,{'end':True}\n\n\n history_log = []\n\n for i in range(n_steps - int(add_last_observation)):\n res = self.agent_step(self.prev_observations, *self.prev_memory_states)\n actions, new_memory_states = res[0],res[1:]\n\n new_observations, cur_rewards, is_alive, infos = \\\n zip(*map(env_step,range(len(self.envs)),actions))\n\n\n # append data tuple for this tick. Is alive is always True\n history_log.append((self.prev_observations, actions, cur_rewards, new_memory_states, is_alive, infos))\n\n self.prev_observations = new_observations\n self.prev_memory_states = new_memory_states\n\n if add_last_observation:\n fake_actions = np.array([env.action_space.sample() for env in self.envs])\n fake_rewards = np.zeros(shape=len(self.envs))\n is_fake_alive = np.ones(shape=len(self.envs))\n history_log.append((self.prev_observations,fake_actions,fake_rewards,self.prev_memory_states,\n is_fake_alive,[None]*len(self.envs)))\n\n # cast to numpy arrays\n observation_log, action_log, reward_log, memories_log, is_alive_log, info_log = zip(*history_log)\n\n # tensor dimensions\n # [batch_i, time_i, observation_size...]\n observation_log = np.array(observation_log).swapaxes(0, 1)\n\n # [batch, time, units] for each memory tensor\n memories_log = map(lambda mem: np.array(mem).swapaxes(0, 1), zip(*memories_log))\n\n # [batch_i,time_i]\n action_log = np.array(action_log).swapaxes(0, 1)\n\n # [batch_i, time_i]\n reward_log = np.array(reward_log).swapaxes(0, 1)\n\n # [batch_i, time_i]\n is_alive_log = np.array(is_alive_log).swapaxes(0, 1).astype('uint8')\n\n\n return observation_log, action_log, reward_log, memories_log, is_alive_log, info_log", "def __init__(self, timestep_definition=None, pseudo_time_stepping=None, auto_load_ramping=None, write_control_definition=None, excitation_frequencies=None, eigenfrequency_scope=None, processors=None, max_run_time=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._timestep_definition = None\n self._pseudo_time_stepping = None\n self._auto_load_ramping = None\n self._write_control_definition = None\n self._excitation_frequencies = None\n self._eigenfrequency_scope = None\n self._processors = None\n self._max_run_time = None\n self.discriminator = None\n\n if timestep_definition is not None:\n self.timestep_definition = timestep_definition\n if pseudo_time_stepping is not None:\n self.pseudo_time_stepping = pseudo_time_stepping\n if auto_load_ramping is not None:\n self.auto_load_ramping = auto_load_ramping\n if write_control_definition is not None:\n self.write_control_definition = write_control_definition\n if excitation_frequencies is not None:\n self.excitation_frequencies = excitation_frequencies\n if eigenfrequency_scope is not None:\n self.eigenfrequency_scope = eigenfrequency_scope\n if processors is not None:\n self.processors = processors\n if max_run_time is not None:\n self.max_run_time = max_run_time", "def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):\n self.num_inference_steps = num_inference_steps\n timesteps = (\n np.linspace(0, self.num_train_timesteps - 1, num_inference_steps + 1)\n .round()[::-1][:-1]\n .copy()\n .astype(np.int64)\n )\n self.timesteps = torch.from_numpy(timesteps).to(device)\n self.model_outputs = [\n None,\n ] * self.config.solver_order\n self.lower_order_nums = 0", "def timesteps_experiment():\n\n print(\"TIMESTEPS EXPERIMENT\")\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'timestep_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'time_steps'\n changing_param_value = [1, 2, 4, 8, 16, 32, 64, 128, 256]\n # {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n # set constant parameters\n set_params(epochs=20)\n set_params(dropout=0.3)\n set_params(use_word_emb=1)\n\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(use_word_emb = value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id = new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n if value == changing_param_value[0]:\n set_params(preproc_data_id=new_model_id)", "def __set_defaults_to_runtime_variables(self) -> None:\n self.current_time_in_eighths = N_EIGHTHS_PER_MEASURE\n self.current_measure_durations = []\n self.past_movements = []\n self.current_motion_start_element = self.counterpoint[0]\n self.is_last_element_consonant = True", "def step(self, step, observation, **extra_feed):\n extra_feed['act_step'] = step\n a, v, state, neglogp = self._evaluate([self.act_action, self.vf, self.state, self.act_neglogp], observation, **extra_feed)\n if state.size == 0:\n state = None\n return a, v, state, neglogp", "def test_tstep(self):\n model = BDF(debug=None)\n\n sid = 42\n n1 = n2 = 5\n dt1 = dt2 = 0.1\n no1 = no2 = 3\n card = ['TSTEP', sid,\n n1, dt1, no1, None, None, None, None, None,\n n2, dt2, no2]\n model.add_card(card, card[0], comment='tstep comment')\n model.validate()\n tstep = model.tsteps[42]\n tstep.raw_fields()\n tstep.write_card()\n tstep.write_card(size=16)\n\n sid = 43\n N = 5\n DT = 0.1\n NO = 3\n tstep2 = model.add_tstep(sid, N, DT, NO)\n tstep2.raw_fields()\n tstep2.write_card()\n tstep2.write_card(size=16)\n save_load_deck(model)", "def _step(self, action: types.NestedArray) -> ts.TimeStep:", "def get_empty_device_properties_dict(self, step_type=None):\n return {\n 'step_type':\n step_type if step_type is not None else str(\n type(self).__name__),\n 'property_values': [],\n 'timestamp':\n self.preroutine_timestamp,\n }", "def put_time(self, step, value):\n assert step > 0, \"Step must be larger than 0.\"\n # XXX: Currently the time axis is not unlimited due to a limitation\n # in h5netcdf - thus no new time steps can be created after the\n # initialization.\n assert step <= self._f.dimensions[\"time_step\"]\n\n self._f.variables[\"time_whole\"][step - 1] = value", "def _reset(self) -> ts.TimeStep:", "def dt(self):\n if isinstance(self._time_axis, are_ax.RegularAxis):\n return self._time_axis.step\n raise RuntimeError(\"Time step is not available for orbits constructed with non-regular time axis\")", "def onTimeStepStart(self, timeStep):\n pass", "def setValuesInStep(\n self, stepName: str, interactionProperty: str = \"\", contactControls: str = \"\"\n ):\n pass", "def setValuesInStep(\n self, stepName: str, interactionProperty: str = \"\", contactControls: str = \"\"\n ):\n pass", "def __init__(__self__, *,\n multistep_number: int,\n outcome_summary: str,\n run_duration: 'outputs.DurationResponse',\n step_id: str):\n pulumi.set(__self__, \"multistep_number\", multistep_number)\n pulumi.set(__self__, \"outcome_summary\", outcome_summary)\n pulumi.set(__self__, \"run_duration\", run_duration)\n pulumi.set(__self__, \"step_id\", step_id)", "def _on_step(self) -> bool:\n # print(\"locals \", self.locals)\n # # what timestep you think\n # print(\"timestep \",CustomCallback.step)\n # # what timestep a2c or ppo2 learn() is on \n # print(\"a2c/ppo2 num timestep\",self.num_timesteps)\n \n # TODO: add flag to save screenshots or not\n subfolder = os.path.join(self.directory, 'screen/')\n filepath = os.path.join(subfolder)\n img_name = '_screenshot_' + str(self.num_timesteps)\n \n if(self.algo == \"A2C\" or self.algo == \"PPO2\"):\n # self.locals['obs'] gives black and white imgs\n obs = self.env.get_images()\n for i in range(self.num_envs):\n mpl.image.imsave(subfolder+\"env_\" + str(i) + img_name + \"_.png\", obs[i])\n elif (self.algo == \"DQN\"):\n self.env.ale.saveScreenPNG(subfolder+\"env_\" + str(0) + img_name + \"_.png\")\n\n step_stats = {self.num_timesteps: {\n 'num_timesteps': self.num_timesteps,\n 'state': self.num_timesteps/self.num_envs,\n }\n }\n # add step to dict\n CustomCallback.main_data_dict.update(step_stats)\n key = self.num_timesteps\n\n # collection of minimum data: action, reward, lives\n if(self.algo == \"DQN\"):\n CustomCallback.main_data_dict[key]['action_env_0'] = self.locals['action']\n CustomCallback.main_data_dict[key]['action_name_env_0'] = self.actions[self.locals['env_action']]\n if(self.game == \"Pong\"):\n CustomCallback.main_data_dict[key]['curr_score_env_0'] = self.locals['episode_rewards'][-1]\n else:\n CustomCallback.main_data_dict[key]['cumulative_life_reward'] = self.locals['episode_rewards'][-1]\n if(self.isLives == True):\n CustomCallback.main_data_dict[CustomCallback.step]['lives'] = self.locals['info']['ale.lives']\n else:\n for i in range(self.num_envs):\n CustomCallback.main_data_dict[key]['action_env_'+str(i)] = self.locals['actions'][i]\n CustomCallback.main_data_dict[key]['action_name_env_'+str(i)] = self.actions[self.locals['actions'][i]]\n CustomCallback.main_data_dict[key]['step_reward_env_'+str(i)] = self.locals['rewards'][i]\n if(self.isLives == True):\n if(CustomCallback.step == 1):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = 3\n if(CustomCallback.step >= 2):\n CustomCallback.main_data_dict[key]['lives_env_'+str(i)] = self.locals['infos'][i]['ale.lives']\n\n if(self.game == \"Pong\" and self.algo != \"DQN\"):\n # extra processing for Pong scores\n self.find_life_game_info_a2c_ppo2_pong()\n\n # at the last step, write data into csv files\n if(CustomCallback.step == (self.num_steps/self.num_envs)):\n self.make_dataframes(self.df_list)\n # save minimal data\n self.df_to_csv(\"df_og.csv\", self.df_list)\n self.df_to_parquet()\n CustomCallback.step = CustomCallback.step + 1\n return True", "def __init__(self, step_time, saw_time, step_interval=None, n_step=None, ss=None):\n self.ss = ss\n self.n_step = n_step\n self.interval = step_interval\n self.step_time = step_time\n self.saw_time = saw_time", "def time_step(self):\n return self._time_step", "def timeStep(self):\n return self.params['h']", "def __init__( self, level, outter = None ):\n assert isinstance( level, int )\n assert isinstance( outter, Env ) or ( outter is None )\n\n self._level = level\n self._steps = []\n self._outter = outter", "def __create_input(self, timestep):\n self.litter = {}\n if timestep == 0:\n self.initial = {}\n if self.initial_mode!='zero':\n self._define_components(self.initial_def, self.initial)\n if self.md.litter_mode == 'constant yearly':\n self._define_components(self.md.constant_litter, self.litter)\n else:\n timeind = self._map_timestep2timeind(timestep)\n if self.md.litter_mode=='monthly':\n infdata = self.md.monthly_litter\n elif self.md.litter_mode=='yearly':\n infdata = self.md.yearly_litter\n self._define_components(infdata, self.litter, tsind=timeind)\n self._fill_input()", "def step(self, observation: dict) -> dict:\n raise NotImplementedError(\"step\")", "def test_no_step_defaults(self):\n es = exposed.ExposedStep()\n self.assertIsNone(es._step)", "def __init__( self ):\n self._env = None\n self._steps = None\n\n self._initialize( )", "def record(self, var_keys, value=None):\n\n for var_key in make_list(var_keys):\n\n # Create empty lists\n if 't' not in self.log:\n self.log['t'] = []\n if var_key not in self.log:\n self.log[var_key] = [None] * len(self.log['t'])\n\n if self.model.t not in self.log['t']:\n\n # Create empty slot for new documented time step\n for v in self.log.values():\n v.append(None)\n\n # Store time step\n self.log['t'][-1] = self.model.t\n\n if value is None:\n v = getattr(self, var_key)\n else:\n v = value\n\n self.log[var_key][-1] = v", "def time_step(self):\n\n rho_rel = np.abs(self.rho_dt / self.rho)\n rho_rel_max = np.max(rho_rel)\n e_rel = np.abs(self.e_dt / self.e)\n e_rel_max = np.max(e_rel)\n x_rel = np.abs(self.u / self.dx)\n x_rel_max = np.max(x_rel)\n y_rel = np.abs(self.w / self.dy)\n y_rel_max = np.max(y_rel)\n rel = [rho_rel_max, e_rel_max, x_rel_max, y_rel_max]\n delta = np.max(np.abs(rel))\n\n if 0.1 <= delta <= 1e3:\n self.dt = self.p / delta\n else:\n self.dt = self.p", "def step(self, step=None):\n pass", "def timing_default(self):\n\n return {\"runtimes\": [], \"dates\": []}", "def __init__(self, handle_auto_reset: bool = False):\n self._handle_auto_reset = handle_auto_reset\n self._current_time_step = None\n common.assert_members_are_not_overridden(\n base_cls=PyEnvironment, instance=self, denylist=('reset', 'step')\n )", "def get_time_step(self):\n return self._time_step", "def lab_run_med(character_id, time_step):\n pass", "def timestep(self, simsystem, osc, obs):\n pass", "def _generate_steps(\n episode: Sequence[Any],\n step_metadata_skip_list: AbstractSet[str]) -> Dict[str, Any]:\n step_metadata = _empty_nested_list(\n get_step_metadata(episode[0], step_metadata_skip_list))\n\n steps = {\n 'observation':\n _empty_nested_list(episode[0].timestep.observation),\n 'action':\n _empty_nested_list(episode[0].action),\n 'reward': [],\n 'discount': [],\n 'is_terminal': [],\n 'is_first': [],\n 'is_last': [],\n }\n steps.update(step_metadata)\n\n prev_step = None\n for step in episode:\n if prev_step is not None:\n steps['is_first'].append(prev_step.timestep.first())\n steps['is_terminal'].append(False)\n steps['is_last'].append(prev_step.timestep.last())\n steps['observation'] = _append_nested(\n steps['observation'], prev_step.timestep.observation)\n steps['reward'].append(step.timestep.reward)\n steps['discount'].append(step.timestep.discount)\n steps['action'] = _append_nested(steps['action'], step.action)\n step_metadata = get_step_metadata(prev_step, step_metadata_skip_list)\n for k, v in step_metadata.items():\n steps[k] = _append_nested(steps[k], v)\n prev_step = step\n if prev_step is not None:\n # We append the observation of the final step (action and reward were\n # included in the previous step.\n # The terminal flag is inferred like in termination(), truncation()\n # from dm_env/_environment.py\n is_terminal = (\n prev_step.timestep.last() and prev_step.timestep.discount == 0.0)\n steps['is_first'].append(prev_step.timestep.first())\n steps['is_terminal'].append(is_terminal)\n steps['is_last'].append(True)\n steps['observation'] = _append_nested(\n steps['observation'], prev_step.timestep.observation)\n # Discount, action and reward are meaningless in the terminal step\n steps['reward'].append(np.zeros_like(prev_step.timestep.reward))\n steps['discount'].append(\n np.zeros_like(prev_step.timestep.discount))\n steps['action'] = _append_nested(\n steps['action'],\n tf.nest.map_structure(np.zeros_like, prev_step.action))\n step_metadata = get_step_metadata(prev_step, step_metadata_skip_list)\n for k, v in step_metadata.items():\n steps[k] = _append_nested(steps[k], v)\n return steps", "def step(self, **kwargs):\n pass", "def _set_runtimes(self):\n self._run_times =np.zeros(self.n_runs, dtype = np.float)", "def __init__(self, env=None, tilesEnv=False):\n super(MarioEnv, self).__init__(env)\n self.resetCount = -1\n # reward is distance travelled. So normalize it with total distance\n # https://github.com/ppaquette/gym-super-mario/blob/master/ppaquette_gym_super_mario/lua/super-mario-bros.lua\n # However, we will not use this reward at all. It is only for completion.\n self.maxDistance = 3000.0\n self.tilesEnv = tilesEnv", "def __init__(self):\n self.step_list = [steps.Raw()]", "def getSteps():", "def set_step_time(self, us):\n if us < 20: # 20 us is the shortest possible for esp8266\n self.step_time = 20\n else:\n self.step_time = us", "def StepTimeout(self):\n return recipe_api.StepTimeout", "def __init__(self, step_time, mu=None, sigma=None, n_step=None, ss=None):\n self.ss = ss\n self.n_step = n_step\n self.mu = mu\n self.sigma = sigma\n self.step_time = step_time", "def setTimeDiscretisation(self,timeStepIntervals = None, timeStepSizes = None):\n if timeStepIntervals != None:\n self.timeStepIntervals = timeStepIntervals\n pass\n elif timeStepSizes != None:\n self.timeStepSizes = timeStepSizes\n pass\n else:\n raise Warning(\"You should give at least an argument to the setTimeDiscretisation function\")\n if self.timeStepIntervals != None:\n print(\"dbg hm \",self.timeStepIntervals)\n print(\"dbg hm \",self.problem.calculationTimes[-1])\n self.timeStepSizes = self.problem.calculationTimes[-1]/self.timeStepIntervals\n pass\n else:\n self.timeStepIntervals = self.problem.calculationTimes[-1]/self.timeStepSizes\n pass", "def reset(self) -> ts.TimeStep:\n self._current_time_step = self._reset()\n return self._current_time_step", "def extend(self, step):\n self.timesteps.extend(step.timesteps)\n self.masks.extend(step.masks)\n self.x.extend(step.x)\n self.y.extend(step.y)\n self.i.extend(step.i)\n self.j.extend(step.j)\n self.end_time = step.end_time\n self.times = np.arange(self.start_time, self.end_time + self.step, self.step)\n self.u = np.concatenate((self.u, step.u))\n self.v = np.concatenate((self.v, step.v))\n for attr in self.attributes.keys():\n if attr in step.attributes.keys():\n self.attributes[attr].extend(step.attributes[attr])", "def onTimeStepEnd(self, timeStep):\n pass", "def get_duration_steps(self):\n return {\n # acc. to ATV-A 121 chap. 5.2 (till 2012)\n ATV: (60 * 3, 60 * 48),\n # acc. to DWA-A 531 chap. 5.2.1\n DWA_adv: (60 * 3, 60 * 24),\n # acc. to DWA-A 531 chap. 5.2.1\n DWA: (60, 60 * 12)\n }[self.worksheet]", "def __verify_steps(self):\n if self.major[2] not in self.data[self.root]:\n self.data[self.root][self.major[2]] = {\"step\": []}\n elif not isinstance(self.data[self.root][self.major[2]][\"step\"], list):\n self.data[self.root][self.major[2]][\"step\"] = [self.data[self.root][self.major[2]][\"step\"]]", "def update_env(S, episode, step_counter):\n env_list = ['-'] * (N_STATES - 1) + ['T']\n if S == 'terminated':\n interaction = 'Episode %s: total_steps = %s' % (episode + 1, step_counter)\n print('\\r{}'.format(interaction), end='')\n time.sleep(2)\n print('\\r ', end='')\n else:\n env_list[S] = 'o'\n interaction = ''.join(env_list)\n print('\\r{}'.format(interaction), end='')\n time.sleep(FRESH_TIME)", "def ntimestep(self):\n if self._ntimestep is None:\n self._ntimestep = self.get_data_ntimestep()\n\n return self._ntimestep", "def StepEnv(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __init__(self, env, agent_config: Munch):\n\n super(Agent_DQN, self).__init__(env=env, agent_config=agent_config)\n\n ###########################\n # YOUR IMPLEMENTATION HERE #\n self.state = Munch({**self.state,\n \"step\": 0,\n \"num_actions\": env.action_space.n,\n \"metrics_capture_window\": agent_config.metrics_capture_window,\n \"replay_size\": agent_config.replay_size,\n \"position\": 0,\n \"total_num_steps\": agent_config.total_num_steps,\n \"episodes\": agent_config.episodes,\n \"gamma\": agent_config.gamma,\n \"learning_rate\": agent_config.learning_rate,\n \"initial_epsilon\": agent_config.initial_epsilon,\n \"final_epsilon\": agent_config.final_epsilon,\n \"epsilon\": agent_config.initial_epsilon,\n \"steps_to_explore\": agent_config.steps_to_explore,\n \"network_update_interval\": agent_config.network_update_interval,\n \"network_train_interval\": agent_config.network_train_interval,\n \"batch_size\": agent_config.batch_size,\n \"mode\": \"Random\",\n \"state_counter_while_testing\": 0,\n \"beta\": agent_config.beta,\n \"lambda_val\": agent_config.lambda_val,\n \"eta\": agent_config.eta\n })\n # self.run_name = agent_config.run_name\n # self.state.model_save_path = agent_config.model_save_path\n # self.state.model_save_interval = agent_config.model_save_interval\n # self.log_path = agent_config.log_path\n # self.tensorboard_summary_path = agent_config.tensorboard_summary_path\n self.is_cuda_available = torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if self.is_cuda_available else \"cpu\")\n # self.state.model_test_path = agent_config.model_test_path\n # self.state.step = 0\n\n # Environment and network parameters\n # self.env = env\n # self.state.num_actions = env.action_space.n\n self.action_list = np.arange(self.state.num_actions)\n self.input_shape = [4, 84, 84]\n # self.state.metrics_capture_window = agent_config.metrics_capture_window\n # self.state.replay_size = agent_config.replay_size\n self.replay_memory = []\n # self.state.position = 0\n # self.state.total_num_steps = agent_config.total_num_steps\n # self.state.episodes = agent_config.episodes\n # self.state.gamma = agent_config.gamma\n # self.state.learning_rate = agent_config.learning_rate\n # self.state.initial_epsilon = agent_config.initial_epsilon\n # self.state.final_epsilon = agent_config.final_epsilon\n # self.state.epsilon = self.state.initial_epsilon\n # self.state.steps_to_explore = agent_config.steps_to_explore\n self.state.epsilon_step = (self.state.initial_epsilon - self.state.final_epsilon) / self.state.steps_to_explore\n\n # self.state.network_update_interval = agent_config.network_update_interval\n # self.state.network_train_interval = agent_config.network_train_interval\n\n self.last_n_rewards = deque([], self.state.metrics_capture_window)\n self.start_to_learn = agent_config.start_to_learn\n self.ddqn = agent_config.ddqn\n self.use_icm = agent_config.use_icm\n self.intrinsic_episode_reward = []\n self.last_n_intrinsic_rewards = deque([], self.state.metrics_capture_window)\n\n # self.state.batch_size = agent_config.batch_size\n # self.state.mode = \"Random\"\n # self.state.state_counter_while_testing = 0\n self.q_network = DQN(env=env, args=self.state.config).to(self.device)\n self.target_network = DQN(env=env, args=self.state.config).to(self.device)\n self.loss_function = F.smooth_l1_loss\n self.optimiser = optim.Adam(self.q_network.parameters(), lr=agent_config.learning_rate)\n self.probability_list = np.zeros(env.action_space.n, np.float32)\n self.q_network.train()\n self.target_network.eval()\n\n self.icm_model = ICM(env=env, num_actions=self.state.num_actions, args=self.state.config).to(self.device)\n\n self.inverse_loss_fn = nn.CrossEntropyLoss()\n self.forward_loss_fn = nn.MSELoss()\n\n # self.state.beta = agent_config.beta\n # self.state.lambda_val = agent_config.lambda_val\n # self.state.eta = agent_config.eta\n\n # create necessary paths\n # self.create_dirs()\n self.meta = None\n\n if agent_config.test_dqn:\n print('loading trained model')\n self.q_network.load_state_dict(torch.load(self.state.model_test_path, map_location=self.device))\n\n # self.log_file = open(self.state.model_save_path + '/' + self.run_name + '.log', 'w') if not agent_config.test_dqn else None\n\n # Set target_network weight\n self.target_network.load_state_dict(self.q_network.state_dict())\n\n # self.writer = SummaryWriter(agent_config.tensorboard_summary_path)", "def before_run(self, run_context):\n logging.info('Before creating the session...')\n\n self._global_step_value = run_context.session.run(self._global_step)\n if self._global_step_value % self._iterations_per_loop == 0:\n\n # Calling `play` the environment roll out a trajectory of length\n # `self._max_horizon`. Currently, we support two modes for play:\n # (1) stochastic play (similar to PPO)\n # (2) Monte-Carlo Tree Search (MCTS) play\n self._env_wrapper.play(self._max_horizon)\n\n # Computes explained variance between predicted values (from network)\n # and computed return values from environment.\n ev = math_utils.explained_variance(\n np.asarray(self._env_wrapper.trajectory_values),\n np.asarray(self._env_wrapper.trajectory_returns))\n tf_utils.add_summary(\n float(ev), 'Variation/explained_variance', self._global_step_value,\n self.summary_writer)\n\n if type(self._env_wrapper).__name__ == 'Env':\n # Update queues for episode data\n # (length of episodes and episode rewards)\n self._episode_reward_buf.extend(\n self._env_wrapper.trajectory_per_episode_rewards)\n self._episode_length_buf.extend(\n self._env_wrapper.trajectory_per_episode_lengths)\n else:\n self._episode_reward_buf.extend(\n self._env_wrapper.master_game.trajectory_per_episode_rewards)\n self._episode_length_buf.extend(\n self._env_wrapper.master_game.trajectory_per_episode_lengths)\n\n # Summaries for the current trajectory\n tf_utils.summary_stats(self._episode_reward_buf, 'Reward',\n 'Episode Rewards', self._global_step_value,\n self.summary_writer, False)\n tf_utils.summary_stats(self._episode_length_buf, 'Reward',\n 'Episode Length', self._global_step_value,\n self.summary_writer, False)\n\n mcts_tensor = np.full(\n np.asarray(self._env_wrapper.trajectory_values).shape,\n self._env_wrapper.mcts_sampling)\n\n run_context.session.run(\n self._iterator.initializer,\n feed_dict={\n self.features_ph['mcts_features']:\n self._env_wrapper.trajectory_states,\n self.features_ph['policy_features']:\n self._env_wrapper.policy_trajectory_states,\n self.labels_ph['action_tensor']:\n self._env_wrapper.trajectory_actions,\n self.labels_ph['value_tensor']:\n self._env_wrapper.trajectory_values,\n self.labels_ph['return_tensor']:\n self._env_wrapper.trajectory_returns,\n self.labels_ph['old_neg_logprob_tensor']:\n self._env_wrapper.trajectory_neg_logprobs,\n self.labels_ph['mean_tensor']:\n self._env_wrapper.trajectory_means,\n self.labels_ph['logstd_tensor']:\n self._env_wrapper.trajectory_logstds,\n self.labels_ph['mcts_enable_tensor']:\n mcts_tensor,\n self.labels_ph['policy_action_tensor']:\n self._env_wrapper.policy_trajectory_actions,\n self.labels_ph['policy_value_tensor']:\n self._env_wrapper.policy_trajectory_values,\n self.labels_ph['policy_return_tensor']:\n self._env_wrapper.policy_trajectory_returns,\n self.labels_ph['policy_old_neg_logprob_tensor']:\n self._env_wrapper.policy_trajectory_neg_logprobs,\n })", "def control_timestep(self):\n if self._overridden_n_sub_steps is not None:\n return self.physics.timestep() * self._overridden_n_sub_steps\n else:\n return self.task.control_timestep", "def step(self, step):\n if self.local_vars_configuration.client_side_validation and step is None: # noqa: E501\n raise ValueError(\"Invalid value for `step`, must not be `None`\") # noqa: E501\n\n self._step = step", "def create_step(self, step):\n raise NotImplementedError", "def __init__(self, env: gym.Env, eval_episodes: int, render_freq: int, \n fps: int, verbose=0):\n super().__init__(verbose=verbose)\n self.env = env\n self.eval_episodes = eval_episodes\n self.render_freq = render_freq\n self.fps = fps", "def _reset(self):\n self.spot_supervisor.reset()\n return ts.TimeStep(ts.StepType.FIRST, np.float32(0.0), DISCOUNT,\n np.zeros(23, dtype=np.float32))", "def setup_steps(self):\n step1 = ground_step.Ground(5745, 495, 40, 44)\n step2 = ground_step.Ground(5788, 452, 40, 44)\n step3 = ground_step.Ground(5831, 409, 40, 44)\n step4 = ground_step.Ground(5874, 366, 40, 176)\n\n step5 = ground_step.Ground(6001, 366, 40, 176)\n step6 = ground_step.Ground(6044, 408, 40, 40)\n step7 = ground_step.Ground(6087, 452, 40, 40)\n step8 = ground_step.Ground(6130, 495, 40, 40)\n\n step9 = ground_step.Ground(6345, 495, 40, 40)\n step10 = ground_step.Ground(6388, 452, 40, 40)\n step11 = ground_step.Ground(6431, 409, 40, 40)\n step12 = ground_step.Ground(6474, 366, 40, 40)\n step13 = ground_step.Ground(6517, 366, 40, 176)\n\n step14 = ground_step.Ground(6644, 366, 40, 176)\n step15 = ground_step.Ground(6687, 408, 40, 40)\n step16 = ground_step.Ground(6728, 452, 40, 40)\n step17 = ground_step.Ground(6771, 495, 40, 40)\n\n step18 = ground_step.Ground(7760, 495, 40, 40)\n step19 = ground_step.Ground(7803, 452, 40, 40)\n step20 = ground_step.Ground(7845, 409, 40, 40)\n step21 = ground_step.Ground(7888, 366, 40, 40)\n step22 = ground_step.Ground(7931, 323, 40, 40)\n step23 = ground_step.Ground(7974, 280, 40, 40)\n step24 = ground_step.Ground(8017, 237, 40, 40)\n step25 = ground_step.Ground(8060, 194, 40, 40)\n step26 = ground_step.Ground(8103, 194, 40, 360)\n\n step27 = ground_step.Ground(8488, 495, 40, 40)\n\n self.step_group = pygame.sprite.Group(step1, step2,\n step3, step4,\n step5, step6,\n step7, step8,\n step9, step10,\n step11, step12,\n step13, step14,\n step15, step16,\n step17, step18,\n step19, step20,\n step21, step22,\n step23, step24,\n step25, step26,\n step27)", "def step_param(self):\n if self.variable_name is None:\n return self.step_name\n elif self.step_name is None:\n return self.variable_name\n else:\n return '{step}__{var}'.format(\n step=self.step_name, var=self.variable_name)", "def _create_trace(self):\n\t\tself.trace=algebraic_dict(self.N_time_steps,self.N_actions)", "def onTimeStepStart(self, timeStep):\n self.queuedInTimeStep = 0\n self.dequeuedInTimeStep = 0\n \n pass" ]
[ "0.59744036", "0.5681818", "0.5647982", "0.5532288", "0.5392571", "0.53780484", "0.5341918", "0.53083825", "0.5307224", "0.5280149", "0.52758807", "0.5254811", "0.52457494", "0.5231521", "0.522465", "0.5220665", "0.5217766", "0.5196336", "0.51878935", "0.51844066", "0.51774937", "0.51528937", "0.51511943", "0.51490057", "0.5141927", "0.5137274", "0.51196474", "0.5114506", "0.50754833", "0.505094", "0.50339943", "0.5025587", "0.50143814", "0.5013822", "0.5012368", "0.50042135", "0.5003208", "0.499886", "0.49960324", "0.4977623", "0.49682266", "0.49671245", "0.49666667", "0.496536", "0.4953529", "0.494154", "0.49349642", "0.49299505", "0.49131548", "0.49074087", "0.4901282", "0.4897905", "0.4897905", "0.48828977", "0.4863086", "0.48563007", "0.48524004", "0.48406854", "0.4830571", "0.48267964", "0.482602", "0.482283", "0.48223963", "0.48218882", "0.48216027", "0.48192358", "0.48123905", "0.48023936", "0.48020676", "0.47938263", "0.47874743", "0.47848594", "0.47819898", "0.4776202", "0.47734368", "0.47699904", "0.47601825", "0.4757058", "0.47550824", "0.47507244", "0.4746308", "0.47456762", "0.47412056", "0.47397482", "0.4736617", "0.47301677", "0.4727939", "0.47117028", "0.47060272", "0.47038653", "0.46979734", "0.46936655", "0.46923563", "0.46879417", "0.46827254", "0.46826935", "0.46789515", "0.4664059", "0.46630335", "0.46625343" ]
0.54468334
4
Returns the current timestep.
def current_time_step(self) -> ts.TimeStep: return self._current_time_step
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_time_step(self):\n return self._time_step", "def time_step(self):\n return self._time_step", "def GetTimeStep(self):\n time_step = None\n\n time_step = self._solver_collection.GetTimeStep()\n \n if not time_step is None:\n\n self.time_step = time_step\n\n return self.time_step", "def time_step(self) -> float:\n return self._timestep", "def timeStep(self):\n return self.params['h']", "def getCurrentStep():", "def sim_step(self):\n return traci.simulation.getCurrentTime()/1000 # milliseconds to seconds", "def get_step(self):\n return self.step", "def get_step(self):\n return self.step", "def cur_step(self):\n return self._cur_step", "def timestep(self) -> Optional[float]:\n dt = None\n if len(self.time) > 1 and self.is_equidistant:\n dt = (self.time[1] - self.time[0]).total_seconds() # type: ignore\n return dt", "def get_time_step_to_enqueue(self):\n return self.time_step_to_enqueue", "def get_time_step(self):\n for body in self.bodies:\n # If body is a Satelite\n if body.name == \"Satelite\":\n # Assuming that acceleration for a small times step is constant\n t = 0.01 * norm(body.velocity) / norm(body.acc)\n if t < self.t:\n return t\n return self.t", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def current_step(self) -> FlowNode:\n return self._current_step", "def get_step(self) -> int:\n return self.step", "def getTime(self):\n return self.step / (self.max_step + int(self.include))", "def internal_timestep(self):\n try:\n return self._internal_dt\n except AttributeError:\n # the component hasn't started running yet\n _ = self.calc_grads_and_timesteps(False, False)\n return self._internal_dt", "def step ( self ) :\n return self.__step", "def time(self, step: int) -> float:\n return self._start_time + self._parameters.dt*(step - self._start_step)", "def dt(self):\n if isinstance(self._time_axis, are_ax.RegularAxis):\n return self._time_axis.step\n raise RuntimeError(\"Time step is not available for orbits constructed with non-regular time axis\")", "def step(self):\n\n e = self.event_queue.get()\n self.current_time = e.time\n component = e.component\n component.output(self.current_time)\n component.input(self.current_time)\n component.fire()\n\n self.event_queue.put(VirtualTimeScheduler.Event(self.current_time + component.interval, component))\n\n return self.current_time", "def full_step_time(self):\n\n total_step_time = self.duration()\n return total_step_time / (2 * self.half_to_full_step_time_ratio + (self.num_steps() - 2))", "def get(self):\n if self.running:\n return self.accumulated_time + pg.time.get_ticks() - self.start_time\n else:\n return self.accumulated_time", "def gettime(self):\n return self.t", "def ntimestep(self):\n if self._ntimestep is None:\n self._ntimestep = self.get_data_ntimestep()\n\n return self._ntimestep", "def getStep():\n # TODO: can there be non-Step logs?", "def step(self) -> int:\n return self._step", "def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt", "def dt(self):\n return self._data_writer.get_simulation_time_step_ms()", "def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0", "def current_time(cls) -> float:", "def half_step_time(self):\n\n return self.full_step_time() * self.half_to_full_step_time_ratio", "def time_step_spec(self) -> ts.TimeStep:\n return ts.time_step_spec(self.observation_spec(), self.reward_spec())", "def get_time(self) -> float:\n raise NotImplementedError()", "def getCurrentSimulationTime(self):\r\n raise NotImplementedError()", "def get_time_walking(self):\n return self.time_step_to_enqueue - self.time_entered", "def time(self):\n return self._begin", "def _step(self) -> int:\n return self._config[CONF_STEP]", "def getHeadingTime(self) -> float:\n return self.timestep_cached_heading_tm", "def t0(self):\n return self._time_axis.start", "def getTime(self) -> float:\n return self.t", "def get_time(self):\n return self._current_time_sec", "def get_current_step(self):\n try:\n return self.get_step_by_id(self.current_step.id)\n except (AttributeError, ValueError):\n message = \"The current step for this ticket is not set.\"\n logger.debug(message)\n raise KeyError(message)", "def get_current_time(self):\n return self.time", "def get_step(self):\n # decide which direction and how far\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def GetAnimationStep(self):\r\n\r\n return self._animation_step", "def reset(self) -> ts.TimeStep:\n self._current_time_step = self._reset()\n return self._current_time_step", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def current_state_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"current_state_time\")", "def control_timestep(self):\n if self._overridden_n_sub_steps is not None:\n return self.physics.timestep() * self._overridden_n_sub_steps\n else:\n return self.task.control_timestep", "def get_time(self):\n return self._current_time", "def get_time(self):\n return self._current_time", "def currentTick(self):\n return self._currentTick", "def getSimulationTime(self):\r\n raise NotImplementedError()", "def target_temperature_step(self):\n return self._temp_step", "def getStep(self, *args):\n return _CompuCell.Simulator_getStep(self, *args)", "def get_time_step_values(self):\n if self.need_to_read():\n self._read_up_front()\n return self._timesteps if self._timesteps is not None else None", "def target_temperature_step(self):\n return self._target_temperature_step", "def current_state_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"current_state_time\")", "def next_run_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"next_run_time\")", "def next_run_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"next_run_time\")", "def get_time(self):\n clock = self.pipeline.get_clock()\n tm = clock.get_internal_time()\n return tm / 1.e9", "def get_current_task(self):\n return self.get_current_step().get_last_task()", "def initialTime(self):\n return self.params['t0']", "def step( self, name ):\n duration = self.summarize_step( start=self.step_start, step_name=name, level=self.level )\n now = time.time()\n self.step_start = now\n return duration", "def get_current_token(self):\n with self._lock:\n if self._unfinished_ids:\n return self._unfinished_ids[0] - self._step\n\n return self._current", "def __get_starting_time(self):\n return self.__starting_time", "def get_time(self):\n return self.get_timed() / 10.0", "def time_step_output(self, current_time, time_step):\n pass", "def get_time(self):\n return self.widget().time()", "def _prev_shifted_time(self):\n return self._prev_sim_time + self.options.time.start_clocktime", "def get_step(self):\n direction = choice([1,-1])\n direction = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_first_step(self):\n return self.get_step_by_index(0)", "def t(self):\n return self._data_writer.get_current_run_time_ms()", "def currentTickStartedAt(self):\n return self._currentTickStartedAt", "def getStartTime(self):\n return _osgAnimation.MatrixLinearSampler_getStartTime(self)", "def get_last_timestep(self):\n d = DataFrame(list(self.svalin_db.find({}, {'time'})))\n d.index = d.time\n last_time = convert_datetime(d.time.values[-1])\n return last_time", "def step(self):\n if self._step is None:\n return self._n_fft // 2\n else:\n return self._step", "def getStartTime(self):\n return _osgAnimation.Vec4LinearSampler_getStartTime(self)", "def get_time(self):\n return self.time", "def get_time(self):\n return self._ticks", "def get_time_step_values(self):\n return TensorMeshReader.get_time_step_values(self)", "def get_t(self):\n return self.t", "def get_steps(self):\n return self.steps", "def _STEPS2TIME(step):\n return step/1000.", "def now(self):\n return self._startTime + self.timeToOffset(self.currentTime, self._timeScale)", "def time_step(self):\n\n rho_rel = np.abs(self.rho_dt / self.rho)\n rho_rel_max = np.max(rho_rel)\n e_rel = np.abs(self.e_dt / self.e)\n e_rel_max = np.max(e_rel)\n x_rel = np.abs(self.u / self.dx)\n x_rel_max = np.max(x_rel)\n y_rel = np.abs(self.w / self.dy)\n y_rel_max = np.max(y_rel)\n rel = [rho_rel_max, e_rel_max, x_rel_max, y_rel_max]\n delta = np.max(np.abs(rel))\n\n if 0.1 <= delta <= 1e3:\n self.dt = self.p / delta\n else:\n self.dt = self.p", "def get_reltriggertimes(self):\n return np.array(self.trtimes)-self.soundstarttime", "def getStartTime(self):\n return _osgAnimation.Vec3LinearSampler_getStartTime(self)", "def get_time(cls):\n now = rospy.Time.now()\n return now.secs + now.nsecs*(10**-9) # time in seconds", "def value(self):\n current_value = self.initial_value * self.schedule(self.step / self.nvalues)\n self.step += 1.\n return current_value", "def getStartTime(self):\n return _osgAnimation.Vec2LinearSampler_getStartTime(self)", "def time(self) -> float:\n return self.sim_scene.data.time", "def getStartTime(self):\n return _osgAnimation.QuatSphericalLinearSampler_getStartTime(self)" ]
[ "0.87353057", "0.83974636", "0.81454396", "0.7974334", "0.7622381", "0.75591457", "0.7524713", "0.75237733", "0.75237733", "0.7476018", "0.7327006", "0.7309547", "0.73024344", "0.7277173", "0.7277173", "0.7277173", "0.7277173", "0.72636175", "0.7225894", "0.72174436", "0.7172915", "0.7149396", "0.7144977", "0.714474", "0.70629865", "0.69952554", "0.6948339", "0.6947402", "0.6919923", "0.6910559", "0.6874838", "0.68176806", "0.6787651", "0.67870754", "0.678698", "0.67792857", "0.6755136", "0.6738443", "0.672975", "0.67085695", "0.67029643", "0.67003703", "0.6697337", "0.6694024", "0.666931", "0.66497976", "0.6635497", "0.6634298", "0.66199106", "0.66189206", "0.6610643", "0.6604076", "0.6595292", "0.6595292", "0.6576648", "0.6576602", "0.6561728", "0.6561728", "0.6554996", "0.65343964", "0.65316427", "0.6520097", "0.65199655", "0.6499596", "0.648669", "0.64827317", "0.64827317", "0.6482165", "0.64797217", "0.6476738", "0.64666045", "0.64605224", "0.6454884", "0.64430654", "0.64375633", "0.6408773", "0.63886374", "0.6387099", "0.6385713", "0.6384686", "0.6380242", "0.6379483", "0.6370788", "0.63696694", "0.636601", "0.6343864", "0.6335418", "0.6333992", "0.63246477", "0.6323028", "0.632295", "0.63148695", "0.6297996", "0.6281405", "0.62793267", "0.6272145", "0.6264926", "0.62631786", "0.6259062", "0.6258786" ]
0.9203299
0
Starts a new sequence and returns the first `TimeStep` of this sequence.
def reset(self) -> ts.TimeStep: self._current_time_step = self._reset() return self._current_time_step
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_first_step(self):\n return self.get_step_by_index(0)", "def start(self):\n\t\tself._start = time.clock()\n\t\tif self._initial is None:\n\t\t\tself._initial = self._start\n\t\treturn self", "def before(self, time: float) -> 'Trajectory':\n return self.split(time)[0]", "def start(self, step=None):\n\n if step is None:\n while True:\n next_event = self._pop_next_event()\n if next_event:\n self.current_time = next_event.datetime\n next_event.call()\n else:\n break\n else:\n # TODO: this is not right...\n while True:\n run_to = self.current_time + step\n while True:\n next_event = self._pop_next_event(run_to)\n if next_event:\n next_event.call()\n else:\n break\n print \"{time} Simulation Finished\".format(time=self.current_time)", "def next(self):\n steps = self.context.get('process.steps', [])\n\n if len(steps) < 1:\n return None\n\n if self._index is None:\n self._index = 0\n elif self._index < len(steps)-1:\n self._index += 1\n\n return Step(attributes=steps[self._index], index=self._index)", "def current_time_step(self) -> ts.TimeStep:\n return self._current_time_step", "def start(self):\n return self.trial.start + timedelta(seconds=self.start_checkpoint)", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def first(self) -> Task:\n return self._tasks[0]", "def step(self, action: types.NestedArray) -> ts.TimeStep:\n if self._current_time_step is None or self.should_reset(\n self._current_time_step\n ):\n return self.reset()\n\n self._current_time_step = self._step(action)\n return self._current_time_step", "def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0", "def set_first_machine_time_step(self, first_machine_time_step):", "def start_record_trajectory(self):\r\n return self._arm.start_record_trajectory()", "def getStartTime(self):\n return _osgAnimation.MatrixLinearSampler_getStartTime(self)", "def start_time(self):\n # TODO: use pd.Timestamp instead\n return self.time[0].to_pydatetime()", "def first(seq):\n try: # try iterator interface\n return seq.next()\n except AttributeError:\n pass\n try: # seq is no iterator, try indexed lookup\n return seq[0]\n except IndexError:\n pass\n raise TypeError(\n \"Argument to `first()` method needs to be iterator or sequence.\")", "def load_first_ts_after(self, time):\n\n # get time step list\n df_ts = self.doc.c.sim.df.time_steps()\n \n if type(time) in [float, int]:\n if len(df_ts[df_ts.simulation_time > time]) == 0:\n raise RuntimeError(\"{} contains no timestep after {} d\".format(self.doc.c.original_filename, time))\n else:\n ts_no = int(df_ts[df_ts.simulation_time > time].reset_index().iloc[0].file_index)\n self.doc.loadTimeStep(ts_no)\n return df_ts[df_ts.simulation_time > time].reset_index().iloc[0]\n elif type(time) == datetime:\n if len(df_ts[df_ts.simulation_date>time])==0:\n raise RuntimeError(\"{} contains no timestep after {}\".format(self.doc.c.original_filename, time))\n else:\n ts_no = int(df_ts[df_ts.simulation_date > time].reset_index().iloc[0].file_index)\n self.doc.loadTimeStep(ts_no)\n return df_ts[df_ts.simulation_date > time].reset_index().iloc[0]\n else:\n raise ValueError(\"parameter 'time' must be of type float (simulation time in days) \")", "def getStartTime(self):\n return _osgAnimation.Vec3LinearSampler_getStartTime(self)", "def first(seq):\n return next(iter(seq))", "def start(self):\n try:\n return self.index[0]\n except:\n pass", "def getStartTime(self):\n return _osgAnimation.Vec4LinearSampler_getStartTime(self)", "def start(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"start\")", "def first_loop_start(self) -> int:\n return self.__first_loop_start", "def getStartTime(self):\n return _osgAnimation.Vec2LinearSampler_getStartTime(self)", "def get_first(self):\n raise NotImplementedError(\"get_first: You should have implemented this method!\")", "def getStartTime(self):\n return _osgAnimation.Animation_getStartTime(self)", "def get_time_step(self):\n return self._time_step", "def min_time(self):\n #{{{ function to return time of first sample\n\n return self.mintime", "def start(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"start\")", "def start(self):\n # type: () -> datetime\n return self._start", "def start_run(self):\n return mlflow.start_run(\n run_id=self.run_id,\n experiment_id=self.experiment_id,\n run_name=self.run_name,\n nested=self.nested)", "def getStartTime(self):\n return _osgAnimation.QuatSphericalLinearSampler_getStartTime(self)", "def start(self):\r\n return self.schedule()", "def take_step(self):\n choices_of_steps = [(0,1), (1,0), (0,-1), (-1,0)]\n return random.choices(choices_of_steps)[0]", "def GetTimeStep(self):\n time_step = None\n\n time_step = self._solver_collection.GetTimeStep()\n \n if not time_step is None:\n\n self.time_step = time_step\n\n return self.time_step", "def start(self):\n moment = self.tz_match(self.moment)\n\n delta_to_start = timedelta(minutes=moment.minute % self.freq_minutes,\n seconds=moment.second,\n microseconds=moment.microsecond)\n\n start = moment - delta_to_start\n return start", "def get_start_point(self):\n return self.first_point", "def time_step_spec(self) -> ts.TimeStep:\n return ts.time_step_spec(self.observation_spec(), self.reward_spec())", "def start_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"start_time\")", "def getStartTime(self):\n return _osgAnimation.Channel_getStartTime(self)", "def step(self):\n try:\n return next(self.generator)\n except StopIteration:\n return None", "def get_start(self):\n return self._start", "def start(self) -> datetime:\n return self._start", "def start(self) -> pos.Pos:\n return self.__start", "def getStart(self):\n return self._start", "def __pos__(self):\n ts = self._fsm.get(self._id)\n return SMTimeSeries(ts._time, ts._value, self._fsm)", "def StartTimer(self):\n self._start_time = time.time()", "def setStartTime(self, *args):\n return _osgAnimation.Animation_setStartTime(self, *args)", "def new_task(self):\n self.true_trajectory = self.simulate()\n self.x0 = self.true_trajectory[0]\n self.xT = self.true_trajectory[-1]\n return self.reset()", "def step(self):\n\n e = self.event_queue.get()\n self.current_time = e.time\n component = e.component\n component.output(self.current_time)\n component.input(self.current_time)\n component.fire()\n\n self.event_queue.put(VirtualTimeScheduler.Event(self.current_time + component.interval, component))\n\n return self.current_time", "def next_step(self):\n logging.debug(u\"Moving to next step\")\n\n if not self.steps or len(self.steps) < 1:\n logging.debug(u\"- no steps have ben set\")\n return None\n\n index = self.get('_index')\n\n if index is None:\n index = 0\n elif index < len(self.steps)-1:\n index += 1\n else:\n logging.debug(u\"- all steps have ben consumed\")\n return None\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- triggering step #{}\".format(index+1))\n self.set('_index', index)\n step = self.steps[index]\n step.trigger(bot=self.bot)\n return step", "def next_run(self):\n for run in self._runs:\n # Because the runs are ordered, look for the first run where\n # stop_time is in the future.\n if run.is_next_run(self._now):\n return run\n # If we arrive here, no next run (today).\n return None", "def First():\n return CheckForError(lib.Generators_Get_First())", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tstartticks = self._startticks if self.startticks else _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = startticks\n\t\t\tanim.start()\n\t\t\tstartticks += anim.duration\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def start(self) -> Vertex:\n return self._start", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tif not self._startticks:\n\t\t\tself._startticks = _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = self._startticks\n\t\t\tanim.start()\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def next(self):\n last_time = self.next_time\n current_time = time.time()\n delta = last_time + self.interval - current_time\n\n if last_time > current_time + self.interval:\n # Clock appears to have moved backwards. Reset\n # the timer to avoid waiting for the clock to\n # catch up to whatever time it was previously.\n self.next_time = current_time + self.interval\n elif delta < 0 and abs(delta) > self.interval * self.max_catchup:\n # Execution of jobs is too far behind. Give up on\n # trying to catch up and reset the time, so that\n # will only be repeated a maximum of\n # self.max_catchup times.\n self.next_time = current_time - \\\n self.interval * self.max_catchup\n else:\n self.next_time = last_time + self.interval\n\n return self", "def _get_start(self):\n return self._start", "def step ( self ) :\n return self.__step", "def start(self):\n return self.reset(\n starting=1,\n stopped=0,\n )", "def step(self, time):\n raise \"use method step of class ReactorNet\" \n #return _cantera.reactor_step(self.__reactor_id, time) ", "def take_min(self):\n return self.get_first()", "def startNextAnim(self):\n self.notify.debug(\"startNextAnim self.okToStartNextAnim=%s\" % self.okToStartNextAnim)\n #import pdb; pdb.set_trace()\n self.curIval = None\n if self.okToStartNextAnim:\n self.notify.debug(\"got pass okToStartNextAnim\")\n whichAnim = self.chooseAnimToRun()\n self.notify.debug(\"whichAnim=%s\" % whichAnim)\n self.lastPlayingAnimPhase = whichAnim # merely for debugging\n self.curIval = self.createAnimSequence(whichAnim)\n self.notify.debug(\"starting curIval of length %s\" % self.curIval.getDuration())\n self.curIval.start()\n else:\n self.notify.debug(\"false self.okToStartNextAnim=%s\" %self.okToStartNextAnim)", "def step(self, time):\n return _cantera.reactornet_step(self.__reactornet_id, time)", "def start_pose():\n global start_pose\n while start_pose is None:\n pass\n return start_pose", "def start(self) -> pdarray:\n return self._starts", "def get_next_batch_start(self):\n return None", "def getFirst(self, t):\n index = self._findFirst(t)\n if index >= 0:\n return self.jobs[index]\n else:\n return None", "def start_time(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"start_time\")", "def start(self):\n return self.__start", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def RespStart(builder):\n return Start(builder)", "def time_step(self):\n return self._time_step", "def step(self):\n return self._step", "def get_time_step(self):\n for body in self.bodies:\n # If body is a Satelite\n if body.name == \"Satelite\":\n # Assuming that acceleration for a small times step is constant\n t = 0.01 * norm(body.velocity) / norm(body.acc)\n if t < self.t:\n return t\n return self.t", "def start(self):\n return self._start", "def start(self):\n return self._start", "def start(self):\n return self._start", "def start(self):\n return self._start", "def start(self):\n return self._start", "def seek_to_start_time(self):\n return 0", "async def get_next(self) -> Probe:\n schedule: Optional[Schedule] = None\n while schedule is None:\n try:\n # Try to get the earliest scheduled probe\n schedule = self.queue[0]\n except IndexError:\n # If there is none, wait for a change\n async with self.queue_changed:\n await self.queue_changed.wait()\n else:\n # Wait until it's time to run the scheduled probe\n with trio.move_on_at(schedule.next_time):\n # However, if the queue changes before it's time to run,\n # we forget the selected schedule to re-elect a new one.\n async with self.queue_changed:\n await self.queue_changed.wait()\n schedule = None\n # Just before running it, check if it's not actually removed\n if schedule is not None and schedule.removed:\n heapq.heappop(self.queue)\n schedule = None\n # Immediately reschedule the next run of the selected probe\n schedule.advance()\n heapq.heapreplace(self.queue, schedule)\n # Then let the caller actually run the elected probe\n return schedule.probe", "def get_next_known_start_time(self, current_time):\n raise NotImplementedError()", "def start_time(self):\n return RPR.GetAudioAccessorStartTime(self.id)", "def readFirst(self):\n return self.models[0].time_next", "def __next__(self):\n temp = timescale()\n try:\n temp.MJD = np.atleast_1d(self.MJD)[self.__index__].copy()\n except IndexError as exc:\n raise StopIteration from exc\n # add to index\n self.__index__ += 1\n return temp", "def get_sequence(self):\n if os.path.isfile(self.input):\n with open(self.input, \"r\") as file:\n self.sequence = file.read()\n else:\n raise oma.SequenceError(\"Cannot open {0}\".format(self.input))", "def start(self) -> int:\n return self._start", "def start(self) -> int:\n return self._start", "def createAnimSequence(self, animPhase):\n result = Sequence( self.phaseIvals[animPhase],\n Wait(self.phaseInfo[self.curPhase][1]),\n Func(self.startNextAnim)\n )\n # self.notify.debug(\"createAnimSequence %s\" % result)\n return result", "def next_schedule(self) -> datetime:\n return next(self._interval)", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def step(self):\n return self._step", "def start_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"start_time\")", "def start_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"start_time\")" ]
[ "0.6460188", "0.6099878", "0.59512615", "0.5906501", "0.5806284", "0.57064736", "0.5619724", "0.5561282", "0.5480415", "0.5464584", "0.5464028", "0.54418164", "0.5433138", "0.5427295", "0.5409704", "0.5409105", "0.5397404", "0.5380647", "0.5376489", "0.5375282", "0.53676325", "0.5362454", "0.5335123", "0.532989", "0.5308167", "0.5298724", "0.5287576", "0.5262275", "0.5247483", "0.5245195", "0.52443624", "0.5226137", "0.52234507", "0.5198671", "0.51922023", "0.51869464", "0.51814085", "0.5180324", "0.5173474", "0.5173474", "0.5167226", "0.5158737", "0.5144332", "0.5111203", "0.51077217", "0.5106091", "0.5076588", "0.5072823", "0.50657", "0.5065412", "0.5062914", "0.5062708", "0.5059165", "0.5056741", "0.5054609", "0.50530076", "0.50478256", "0.5045759", "0.50402004", "0.5031111", "0.5028188", "0.5026777", "0.5021786", "0.50201714", "0.5009777", "0.50083584", "0.5002387", "0.49955904", "0.49913538", "0.49899533", "0.4986744", "0.49801224", "0.49801224", "0.4977187", "0.49762163", "0.4969016", "0.49689332", "0.4966333", "0.4966333", "0.4966333", "0.4966333", "0.4966333", "0.49650857", "0.4961724", "0.49610114", "0.4959461", "0.49575147", "0.4956356", "0.49534968", "0.49411857", "0.49411857", "0.4936149", "0.49301395", "0.4928105", "0.4928105", "0.4928105", "0.4928105", "0.4927928", "0.4927928", "0.4927928" ]
0.5536652
8
Updates the environment according to the action and returns a `TimeStep`. If the environment returned a `TimeStep` with `StepType.LAST` at the previous step the implementation of `_step` in the environment should call `reset` to start a new sequence and ignore `action`. This method will start a new sequence if called after the environment has been constructed and `reset` has not been called. In this case `action` will be ignored. If `should_reset(current_time_step)` is True, then this method will `reset` by itself. In this case `action` will be ignored.
def step(self, action: types.NestedArray) -> ts.TimeStep: if self._current_time_step is None or self.should_reset( self._current_time_step ): return self.reset() self._current_time_step = self._step(action) return self._current_time_step
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self, action):\n if self._reset_next_step:\n return self.reset()\n\n # Apply the game_rules\n for rule in self.game_rules:\n rule.step(self._state, self._meta_state)\n\n # Apply the action\n self.action_space.step(self._state, action)\n\n # Step the physics\n self.physics.step(self._state)\n\n # Compute reward\n self.step_count += 1\n reward, should_reset = self.task.reward(\n self._state, self._meta_state, self.step_count)\n\n # Take observation\n observation = self.observation()\n\n # Return transition\n if should_reset:\n self._reset_next_step = True\n return dm_env.termination(reward=reward, observation=observation)\n else:\n return dm_env.transition(reward=reward, observation=observation)", "def step(self, action):\n if self._reset_next_step:\n self._reset_next_step = False\n return self.reset()\n\n self._hooks.before_step(self._physics_proxy, action, self._random_state)\n self._observation_updater.prepare_for_next_control_step()\n\n try:\n for i in range(self._n_sub_steps):\n self._hooks.before_substep(self._physics_proxy, action,\n self._random_state)\n self._physics.step()\n self._hooks.after_substep(self._physics_proxy, self._random_state)\n # The final observation update must happen after all the hooks in\n # `self._hooks.after_step` is called. Otherwise, if any of these hooks\n # modify the physics state then we might capture an observation that is\n # inconsistent with the final physics state.\n if i < self._n_sub_steps - 1:\n self._observation_updater.update()\n physics_is_divergent = False\n except control.PhysicsError as e:\n if not self._raise_exception_on_physics_error:\n logging.warning(e)\n physics_is_divergent = True\n else:\n raise\n\n self._hooks.after_step(self._physics_proxy, self._random_state)\n self._observation_updater.update()\n\n if not physics_is_divergent:\n reward = self._task.get_reward(self._physics_proxy)\n discount = self._task.get_discount(self._physics_proxy)\n terminating = (\n self._task.should_terminate_episode(self._physics_proxy)\n or self._physics.time() >= self._time_limit\n )\n else:\n reward = 0.0\n discount = 0.0\n terminating = True\n\n obs = self._observation_updater.get_observation()\n\n if not terminating:\n return dm_env.TimeStep(dm_env.StepType.MID, reward, discount, obs)\n else:\n self._reset_next_step = True\n return dm_env.TimeStep(dm_env.StepType.LAST, reward, discount, obs)", "def step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n self.microgridPolicy.improveAction(action);\n\n self.microgrid.update();\n\n self.updateState();\n done = self.microgridPolicy.verifyStopConditions();\n reward = self.microgridPolicy.computeReward(done)\n if done: \n if self.steps_beyond_done is None:\n self.steps_beyond_done = 0\n else:\n logger.warn(\"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.\")\n self.steps_beyond_done += 1\n self.clock.increaseTimeStep();\n return self.state, reward, done, {}", "def step(self, action):\n if self.platform is None:\n raise RuntimeError(\"Call `reset()` before starting to step.\")\n\n if not self.action_space.contains(action):\n raise ValueError(\n \"Given action is not contained in the action space.\")\n\n num_steps = self.frameskip\n\n # ensure episode length is not exceeded due to frameskip\n step_count_after = self.step_count + num_steps\n if step_count_after > self.episode_length:\n excess = step_count_after - self.episode_length\n num_steps = max(1, num_steps - excess)\n\n reward = 0.0\n for _ in range(num_steps):\n self.step_count += 1\n if self.step_count > self.episode_length:\n raise RuntimeError(\"Exceeded number of steps for one episode.\")\n\n # send action to robot\n robot_action = self._gym_action_to_robot_action(action)\n t = self.platform.append_desired_action(robot_action)\n\n # Use observations of step t + 1 to follow what would be expected\n # in a typical gym environment. Note that on the real robot, this\n # will not be possible\n observation = self._create_observation(t + 1)\n\n reward += self.compute_reward(observation, self.info)\n\n is_done = self.step_count == self.episode_length\n\n return observation, reward, is_done, self.info", "def step(self, action):\n if self._reset_next_step:\n return self.reset()\n\n self._step_count += 1\n \n reward = self._action_space.step(\n action, self._sprites, keep_in_frame=self._keep_in_frame)\n\n # Update sprite positions from their velocities\n for sprite in self._sprites:\n sprite.update_position(keep_in_frame=self._keep_in_frame)\n\n reward += self._task.reward(self._sprites)\n observation = self.observation()\n\n if self.should_terminate():\n self._reset_next_step = True\n return dm_env.termination(reward=reward, observation=observation)\n else:\n return dm_env.transition(reward=reward, observation=observation)", "def _step(self, action):\n if self._episode_ended:\n # The last action ended the episode. Ignore the current action and start a new episode\n return self.reset()\n\n env_action = self.y_train[self.id[self.episode_step]]\n self.episode_step += 1\n\n if action == env_action: # Correct action\n if env_action: # Minority\n reward = 1 # True Positive\n else: # Majority\n reward = self.imb_rate # True Negative\n\n else: # Incorrect action\n if env_action: # Minority\n reward = -1 # False Negative\n self._episode_ended = True # Stop episode when minority class is misclassified\n else: # Majority\n reward = -self.imb_rate # False Positive\n\n if self.episode_step == self.X_len - 1: # If last step in data\n self._episode_ended = True\n\n self._state = self.X_train[self.id[self.episode_step]] # Update state with new datapoint\n\n if self._episode_ended:\n return ts.termination(self._state, reward)\n else:\n return ts.transition(self._state, reward)", "def reset(self) -> ts.TimeStep:\n self._current_time_step = self._reset()\n return self._current_time_step", "def step(self, action: int):\n assert self.action_space.contains(action)\n loc = action\n if self.done:\n return self._get_obs(), 0, True, None\n\n reward = NO_REWARD\n # update bord\n self.board[loc] = to_code(self.mark)\n\n # check if game has ended\n status = check_game_status(self.board)\n if status >= 0:\n self.done = True\n if status in [1, 2]:\n reward = O_REWARD if self.mark == 'O' else X_REWARD\n\n # update mark\n self.mark = next_mark(self.mark)\n\n return self._get_obs(), reward, self.done, None", "def step(\n self,\n action: Union[Dict[str, Any], np.ndarray],\n env_id: Optional[np.ndarray] = None,\n ) -> Union[TimeStep, Tuple]:", "def step(self, action):\n action = self.randomization.action_randomizer.randomize(\n action, self._random_state\n )\n\n robot_exception = None\n try:\n self._act(action)\n except RobotException as re:\n logger.error(\n f\"Robot raised exception: {str(re)}. This will finish the current episode.\"\n )\n robot_exception = re\n\n if not self.constants.physical:\n # We don't need to do stepping for physical roll out.\n self.mujoco_simulation.step()\n\n self._synchronize_step_time()\n self.t += 1\n\n obs, reward, done, info = self.get_observation(robot_exception=robot_exception)\n obs, reward, done, info = self.step_finalize(obs, reward, done, info)\n return obs, reward, done, info", "def step(self, action):\n self.t += 1\n state, reward, done, info = self.env.step(action)\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info", "def reset(self):\n self._reset_next_step = False\n self.step_count = 0\n \n self._state = self.state_initializer()\n self._meta_state = self._meta_state_initializer()\n self.task.reset(self._state, self._meta_state)\n self.physics.reset(self._state)\n self.action_space.reset(self._state)\n for rule in self.game_rules:\n rule.reset(self._state, self._meta_state)\n rule.step(self._state, self._meta_state)\n \n return dm_env.restart(self.observation())", "def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:\n next_state, reward, done, _ = self.env.step(action, resize=RESIZE, size = RESIZE_SIZE)\n\n if not self.is_test:\n self.transition += [reward, next_state, done]\n \n # N-step transition\n if self.use_n_step:\n one_step_transition = self.memory_n.store(*self.transition)\n # 1-step transition\n else:\n one_step_transition = self.transition\n\n # add a single step transition\n if one_step_transition:\n self.memory.store(*one_step_transition)\n \n return next_state, reward, done", "def reset(self):\n\n self.curr_episode += 1\n self.curr_step = 0\n\n self.action_episode_memory.append([])\n self.rewards.append([])\n\n self.is_finalized = False\n init_state, init_reward = self._take_action(5 * np.random.randn(self.act_dimension))\n self.initial_conditions.append(init_state)\n return init_state", "def step(self, action):\n\n if not self._is_action_legal(action):\n return self.current_state, self.reward_illegal_action, self._is_terminal_state(), None\n else:\n # Change action passed if environment should behave random\n if self.stochastic:\n if not np.random.choice([True, False], 1, p=[self.p, 1 - self.p]):\n action = np.random.choice(self.possible_actions)\n\n # Needed for reward calculation (must be done before updating data structures)\n number_of_shifts = self._get_number_of_shifts(action)\n is_cargo_mandatory = int(self.vehicle_data[2][action] == 1)\n\n slot = self.end_of_lanes[self.current_Lane]\n self.loading_sequence += \"{}. Load Vehicle Type \\t {} \\t in Lane: \\t {} \\t Row: \\t {} \\n\" \\\n .format(self.sequence_no, action, self.current_Lane, slot)\n\n self.end_of_lanes[self.current_Lane] += self.vehicle_data[4][action]\n\n if self.vehicle_data[1][action] == -1 or \\\n self.number_of_vehicles_loaded[action] < self.vehicle_data[1][action]:\n self.number_of_vehicles_loaded[action] += 1\n\n self.loaded_vehicles[self.current_Lane][self.vehicle_Counter[self.current_Lane]] = action\n self.vehicle_Counter[self.current_Lane] += 1\n\n # Update grids\n for i in range(self.vehicle_data[4][action]):\n self.grid.T[self.current_Lane][slot + i] = self.sequence_no\n self.grid_destination.T[self.current_Lane][slot + i] = self.vehicle_data[3][action]\n self.grid_vehicle_type.T[self.current_Lane][slot + i] = self.vehicle_data[0][action]\n\n # Update lowest destination data structure\n if self.vehicle_data[3][action] < self.lowest_destination[self.current_Lane]:\n self.lowest_destination[self.current_Lane] = self.vehicle_data[3][action]\n\n self.sequence_no += 1\n # Update according to lane selection heuristic\n self.current_Lane = self._get_minimal_lanes()[0]\n\n self.possible_actions = self.get_possible_actions_of_state()\n self.current_state = self._get_current_state()\n\n if self._is_terminal_state():\n # Calculate reward for terminal state\n free_spaces = np.sum(self._get_free_capacity()) / np.sum(self.total_capacity)\n mandatory_vehicles_left_to_load = np.sum(self.vehicle_data[1][self.mandatory_cargo_mask]\n - self.number_of_vehicles_loaded[self.mandatory_cargo_mask])\n reward_features = np.array(\n [is_cargo_mandatory, number_of_shifts, free_spaces, mandatory_vehicles_left_to_load])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, True, {}\n else:\n # Calculate reward\n reward_features = np.array([is_cargo_mandatory, number_of_shifts, 0, 0])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, False, {}", "def step_env(self, action):\n return self.env.step(action)", "def step(self, action):\n self.t += 1\n if self.use_run_time_assurance:\n probe_state, unsafe = self.probe_step(action)\n # switch to safe controller if unsafe\n if unsafe:\n x, x_dot, theta, theta_dot = probe_state\n # go right\n if x <= -self.x_threshold: # go right\n action = 1\n elif x>= self.x_threshold: # go left\n action = 0 \n \n state, reward, done, info = self.env.step(action)\n # Could make a custom reward here if you want\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info", "def _step(self, action: types.NestedArray) -> ts.TimeStep:", "def step(self, action):\n x, y = self._move(action, *self._currentPos)\n\n if chr(self._grid[x, y]) == CASE_TYPES.Wall:\n # error - previous state was already a wall\n self._done = True\n self._trajectory.append(self._currentPos)\n return self._currentPos, -1, self._done, {}\n\n reward = {\n CASE_TYPES.Water: self.waterReward,\n CASE_TYPES.Sand: self.sandReward,\n CASE_TYPES.Open: self.stepReward,\n CASE_TYPES.Termination: self.successReward,\n CASE_TYPES.Trap: (\n -(self.maxSteps - len(self._trajectory)) + self.failureReward +\n self.trapReward)\n }[chr(self._grid[x, y])]\n\n # termination state\n if chr(self._grid[x, y]) in [CASE_TYPES.Termination, CASE_TYPES.Trap]:\n self._done = True\n\n self._currentPos = (x, y)\n\n self._trajectory.append(self._currentPos)\n self._nbSteps += 1\n\n if self._nbSteps >= self.maxSteps and not self._done:\n reward += self.failureReward\n\n return self._currentPos, reward, self._done, {}", "def step(self, action: ActionType) -> EnvResponse:\n action = self.action_space.clip_action_to_space(action)\n if self.action_space and not self.action_space.contains(action):\n raise ValueError(\"The given action does not match the action space definition. \"\n \"Action = {}, action space definition = {}\".format(action, self.action_space))\n\n # store the last agent action done and allow passing None actions to repeat the previously done action\n if action is None:\n action = self.last_action\n self.last_action = action\n if self.visualization_parameters.add_rendered_image_to_env_response:\n current_rendered_image = self.get_rendered_image()\n\n self.current_episode_steps_counter += 1\n if self.phase != RunPhase.UNDEFINED:\n self.total_steps_counter += 1\n\n # act\n self._take_action(action)\n\n # observe\n self._update_state()\n\n if self.is_rendered:\n self.render()\n\n self.total_reward_in_current_episode += self.reward\n\n if self.visualization_parameters.add_rendered_image_to_env_response:\n self.info['image'] = current_rendered_image\n\n self.last_env_response = \\\n EnvResponse(\n reward=self.reward,\n next_state=self.state,\n goal=self.goal,\n game_over=self.done,\n info=self.info\n )\n\n # store observations for video / gif dumping\n if self.should_dump_video_of_the_current_episode(episode_terminated=False) and \\\n (self.visualization_parameters.dump_mp4 or self.visualization_parameters.dump_gifs):\n self.last_episode_images.append(self.get_rendered_image())\n\n return self.last_env_response", "def step(self, action):\n assert self.action_space.contains(action)\n\n if self.done:\n return self._get_obs(), 0, True, None\n\n reward = NO_REWARD\n\n # place\n col = action\n row = get_row(self.board, col)\n\n self.board[row, col] = tocode(self.mark)\n self.turn += 1\n self.status = check_game_status(self.board, row, col)\n\n if self.status >= 0:\n self.done = True\n if self.status in [1, 2]:\n # always called by self\n reward = O_REWARD if self.mark == 'O' else X_REWARD\n\n # switch turn\n self.mark = next_mark(self.mark)\n return self._get_obs(), reward, self.done, None", "def step(self, action):\n # Implement your step method here\n # return (observation, reward, done, info)\n self._state = self._state + action\n # print('Step state:', self._state)\n x, y = self._state\n reward = - (x ** 2 + y ** 2) ** 0.5\n done = abs(x) < 0.01 and abs(y) < 0.01\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=done)", "def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]:\n next_state, reward, done, info = self.env.step(action)\n return next_state, reward, done, info", "def step(self, action):\n # THIS WILL BE CALLED FROM ALL STEP DRIVERS\n self._world = self._action_wrapper.action_to_behavior(world=self._world,\n action=action)\n # 1. move the agent we set the action for\n controlled_agent_id = self._scenario._eval_agent_ids[self._action_wrapper._input_count-1]\n self._world.stepAgent(self._step_time, controlled_agent_id)\n\n # length of agents\n if self._action_wrapper._input_count >= len(self._scenario._eval_agent_ids):\n # CANNOT STEP WORLD IF NOT ALL ACTIONS ARE SET\n self._action_wrapper._input_count = 0\n \n # 2. move all other agent\n self._world.step(self._step_time)\n if self._render:\n self.render()\n\n # TODO needs to know the agents id\n return self.snapshot(\n world=self._world,\n controlled_agents=controlled_agent_id,\n action=action)", "def step(self, action: list) -> None:\n self._input = np.array(\n [self._thrust_surge(action[0]), self._moment_steer(action[1])]\n )\n w, q = odesolver45(\n self._state_dot, self._state, self.config.simulation.t_step_size\n )\n\n self._state = q\n self._state[2] = geom.princip(self._state[2])\n\n self._prev_states = np.vstack([self._prev_states, self._state])\n self._prev_inputs = np.vstack([self._prev_inputs, self._input])\n\n self._step_counter += 1", "def step(self, action):\n done = self.cur_step >= self.max_steps_per_episode\n\n if done:\n raise RuntimeError(\"Episode is done\")\n\n self.cur_step += 1\n\n # Compute new state based on previous state and action\n new_state = self._take_action(action)\n\n # Compute reward value based on previous state and action\n reward = self._get_reward(action)\n\n # Update current state to new state\n self.cur_state = new_state\n\n # Compute observation from current state\n ob = self._get_obs() # Has to come after new state update\n\n # Update action, observation and reward histories\n self.action_episode_memory[self.cur_episode].append(action)\n self.observation_episode_memory[self.cur_episode].append(ob)\n self.reward_episode_memory[self.cur_episode].append(reward)\n\n # Recompute done since action may have modified it\n done = self.cur_step >= self.max_steps_per_episode\n\n return ob, reward, done, {}", "def step(self, action):\n # check if suggested action is valid\n valid = self._take_action(action)\n if not valid:\n _, _ = self._simulate()\n response = self.worst_response\n target = 6*60\n else:\n # simulate until a TS response is needed\n response = np.inf\n while response == np.inf:\n response, target = self._simulate()\n if np.isnan(target): # prio 2 or 3 incident: no target exists\n target = response\n\n self.last_action = action if self.action_type == \"tuple\" else self.action_num_to_tuple[action]\n # calculate reward and new state\n self.reward = self._get_reward(response, target, valid=valid)\n self.state, self.is_done = self._extract_state()\n return self.state, self.reward, self.is_done, {\"note\": \"nothing to report\"}", "def step(\n self, action: Union[numpy.ndarray, int], state: numpy.ndarray = None, dt: int = None\n ) -> tuple:\n data = super(AtariEnvironment, self).step(action=action, state=state, dt=dt)\n if state is None:\n observ, reward, terminal, info = data\n observ = self.gym_env.unwrapped.ale.getRAM() if self.obs_ram else observ\n return observ, reward, terminal, info\n else:\n state, observ, reward, terminal, info = data\n observ = ale_to_ram(self.gym_env.unwrapped.ale) if self.obs_ram else observ\n return state, observ, reward, terminal, info", "def step(self, action):\n self.move_step(action) # Move.\n r, d = self.check_goal() # Check the reward and done state, and create\n # new environment.\n s_new= self.render_env() # Render the new environment.\n return s_new, r, d", "def step(self, action):\n self.action = action\n return self.env.step(action)", "def step(self, action):\n return self._env.step(action)", "def step(self, action: Union[np.ndarray, torch.Tensor]):\n if type(action) == torch.Tensor:\n action = action.squeeze().numpy()\n\n if not type(action) is np.ndarray:\n raise Exception(\"The action must be a Numpy array but is of type %s (value = %s)\" % (type(action), action))\n\n if self.increment_actions and not self.action_space.contains(action):\n action = action.clip(self.action_space.low, self.action_space.high)\n\n # Additionally, we must make sure the value will stay in the range\n # min <= x + action <= max\n if self.increment_actions:\n current_values = self.x[np.array([0, 1, 3, 5])]\n new_flow_values = current_values + action\n else:\n new_flow_values = action\n\n new_flow_values = np.clip(new_flow_values, self.flows_lower_bounds, self.flows_upper_bounds)\n self.update_all_flows(new_flow_values)\n\n if any([x < 0 for x in self.x]):\n pass\n # TODO: should I clip the actions to ensure the flows are always positive?\n # raise Exception(f\"Negative flows! x = {[round(x, 4) for x in self.x]}\")\n\n self.update_fitness()\n\n self.step_number += 1\n\n # reward = self.fitness - self.previous_fitness\n reward = self.fitness\n observation = self.get_observation()\n\n done = (self.step_number == self.total_number_of_episodes)\n info = {}\n return observation, reward, done, info", "def step(self, action):\n assert self.action_space.contains(\n action), \"%r (%s) invalid\" % (action, type(action))\n self.time_step += 1\n reward = float(0)\n self.is_episode_done = False\n\n # For testing code\n current_edge_agg_num = self.time_step\n\n # Rescale the action from [-1, 1] to [1, 2, ... , 9]\n # The action is the number of aggregations on edge servers\n # current_edge_agg_num = int((action + 2) * (action + 2))\n\n logging.info(\"RL Agent: Start time step #%s...\", self.time_step)\n logging.info(\n \"Each edge server will run %s rounds of local aggregation.\",\n current_edge_agg_num)\n\n # Pass the tuned parameter to RL agent\n self.rl_agent.get_tuned_para(current_edge_agg_num, self.time_step)\n\n # Wait for state\n current_loop = asyncio.get_event_loop()\n get_state_task = current_loop.create_task(self.wait_for_state())\n current_loop.run_until_complete(get_state_task)\n #print('State:', self.state)\n\n self.normalize_state()\n #print('Normalized state:', self.state)\n\n reward = self.get_reward()\n info = {}\n\n self.rl_agent.cumulative_reward += reward\n\n # Signal the RL agent to start next time step (next round of FL)\n self.step_done.set()\n\n return np.array([self.state]), reward, self.is_episode_done, info", "def step(self, action):\n \n # Check if the given position is empty\n if self.mat[action[0], action[1]] != 0:\n return (self.mat, -0.9, False)\n \n # Update\n self.mat[action[0], action[1]] = 1\n\n # Check if User won\n if self._check_win(1):\n return (self.mat, 1.0, True)\n\n # Check for game end\n acts = self.action_space\n if len(acts) == 0:\n return (self.mat, 0, True)\n\n # If not done, then randomly spawn an 'O' on the board and recalculate the reward\n spawn_point = acts[np.random.choice(acts.shape[0])]\n self.mat[spawn_point[0], spawn_point[1]] = 2\n\n # Check if User lost\n if self._check_win(2):\n return (self.mat, -1.0, True)\n \n # If nothing wrong happens\n else:\n return (self.mat, -0.1, False)", "def step(self, action):\n (self.state, self.reward, self.terminal, self.truncated,\n self.info) = self.env.step(action)\n\n return self.state, self.reward, self.terminal, self.truncated, self.info", "def initial_step(self, state, action):\n next_state = self.state_transition(state, action)\n env_action = random.choice(list(self.action_space(next_state)[1]))\n next_state = self.state_transition(next_state, env_action)\n return next_state", "def step(self, action):\n return self.env.step(action)", "def step(self, action):\n reward = 0\n pose_all = []\n self.rotor_speeds = np.array([action]*4)\n for _ in range(self.action_repeat):\n done = self.sim.next_timestep(self.rotor_speeds) # update the sim pose and velocities\n reward += self.get_reward()\n pose_all += [self.sim.pose]\n if self.sim.crashed:\n reward = -5\n done = True\n #if (np.square(self.sim.pose[:3] - self.target_pos)).sum() < 1: # Close enough!\n #done = True\n next_state = np.concatenate(pose_all)\n return next_state, reward, done", "def step(self, state, action, reward, done):\n\n self.memory.add(state, action, reward, done)\n if done and self.n_tau % self.update_freq == 0:\n self.n_tau += 1\n return self.update()\n return None", "def step(self, state, action, reward, next_state, done):\n\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n \n # Learn every self.update_every time steps\n self.t_step = (self.t_step + 1) % self.update_every\n\n # Get random subset & learn if enough samples available in memory\n if self.t_step == 0:\n if len(self.memory) > self.batch_size:\n experiences = self.memory.sample()\n self.learn(experiences, self.gamma)\n \n return", "def step(self, action):\n observation, reward, done, _ = self.env.step(action)\n return np.array(observation), reward, done", "def _reset(self) -> ts.TimeStep:", "def step(self, action):\r\n s = self.get_state()\r\n\r\n elements = np.arange(self.S)\r\n # weights = np.squeeze(self.nextStateProbability[s,action])\r\n weights = self.nextStateProbability[s, action]\r\n nexts = choices(elements, weights, k=1)[0]\r\n\r\n # p = self.nextStateProbability[s,action]\r\n # reward = self.rewardsTable[s,action, nexts][0]\r\n reward = self.rewardsTable[s, action, nexts]\r\n\r\n # fully observable MDP: observation is the actual state\r\n self.currentObservation = nexts\r\n\r\n gameOver = False\r\n if self.currentIteration > np.Inf:\r\n ob = self.reset()\r\n gameOver = True # game ends\r\n else:\r\n ob = self.get_state()\r\n\r\n history = {\"time\": self.currentIteration, \"state_t\": s, \"action_t\": action,\r\n \"reward_tp1\": reward, \"state_tp1\": nexts}\r\n # history version with actions and states, not their indices\r\n # history = {\"time\": self.currentIteration, \"action_t\": self.actionListGivenIndex[action],\r\n # \"reward_tp1\": reward, \"observation_tp1\": self.stateListGivenIndex[self.get_state()]}\r\n self.currentIteration += 1\r\n return ob, reward, gameOver, history", "def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % PARAM.UPDATE_EVERY\n if self.t_step == 0:\n if len(self.memory) > PARAM.BATCH_SIZE:\n experiences = self.memory.sample()\n self.learn(experiences, PARAM.GAMMA)", "def step(self, action):\n if self.space is None or self.spacecraft is None:\n raise NotImplementedError(\"The spacecraft must be initialized in the environment implementation\")\n\n self._simulate(action)\n\n obs = self.observation.observe()\n reward = self._reward(action)\n terminal = self._is_terminal()\n\n info = {\n \"velocity\": self.spacecraft.velocity,\n \"crashed\": self.spacecraft.crashed,\n \"action\": action,\n }\n try:\n info[\"cost\"] = self._cost(action)\n except NotImplementedError:\n pass\n\n return obs, reward, terminal, info", "def step(self, _action):\n action = np.hstack((np.zeros(6), _action/10.))\n self.ref_skel.set_positions(self.ref_state.angles)\n for i in range(self.step_per_frame):\n # self.skel.set_forces(self.skel.get_spd(self.ref_skel.q + action, self.world.time_step(), self.Kp, self.Kd))\n self.skel.set_forces(self.skel.get_spd(self.ref_state.angles + action, self.world.time_step(), self.Kp, self.Kd))\n self.world.step()\n\n self.ref_state_time += self.step_per_frame * self.world.time_step()\n if self.ref_state_time >= self.ref_state.dt:\n self.ref_state_time -= self.ref_state.dt\n self.ref_state = self.ref_state.get_next()\n\n return tuple([self.state(), self.reward(), self.is_done(), dict()])", "def step(self, action):\n if self._backend_agent:\n self._backend_agent._on_gym_step_begin(self, action)\n\n result = self.env.step(action)\n (state, reward, done, info) = result\n self.steps_done_in_episode += 1\n self.steps_done_in_instance += 1\n self.total.steps_done_inc()\n if self.max_steps_per_episode and self.steps_done_in_episode >= self.max_steps_per_episode:\n done = True\n result = (state, reward, done, info)\n if not self.is_episode_done and done:\n self.is_episode_done = True\n self.episodes_done += 1\n self.total.episodes_done_inc()\n\n if self._backend_agent:\n self._backend_agent._on_gym_step_end(self, action, result)\n return result", "def step(self):\n old_state = self.state\n action = self.get_next_action()\n new_state, reward, failed = self.env.step(action)\n if self.training_mode:\n if self.violated_soft_constraint or failed:\n self.Q_model.update(old_state, action, new_state, reward,\n failed)\n self.safety_model.update(old_state, action, new_state, reward,\n failed)\n self.updated_safety = True\n else:\n self.Q_model.update(old_state, action, new_state, reward,\n failed)\n self.updated_safety = False\n else:\n self.updated_safety = False\n self.state = new_state\n self.last_action = action\n return new_state, reward, failed", "def step(self, state, action, reward, next_state, done):\n \n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % self.params.update_every\n if self.t_step == 0:\n # If enough samples are available in memory, get random subset and learn\n if len(self.memory) > self.params.batch_size:\n experiences = self.memory.sample()\n self.learn(experiences, self.params.gamma)", "def step(self, action):\n obs, r, done, info = self.env.step(action)\n obs = self.get_observation(obs)\n return obs, r, self.is_done(), info", "def reset(self):\n # Initialize the timestep\n self.timestep = 0\n self.state = self.starting_state\n\n if self.from_data:\n self.episode_num += 1\n\n\n return self.starting_state", "def step(self, action: ActionType) -> None:\n raise NotImplementedError", "def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % UPDATE_EVERY\n if self.t_step == 0:\n # If enough samples are available in memory, get random subset and learn\n if len(self.memory) > BATCH_SIZE:\n experiences = self.memory.sample()\n self.learn(experiences, GAMMA)", "def step(self, action):\n self._robot.send_command(action)\n\n obs = self.get_observation()\n\n reward = self.reward(obs.achieved_goal, self.goal)\n done = self.done(obs.achieved_goal, self.goal)\n next_observation = obs.observation\n return Step(observation=next_observation, reward=reward, done=done)", "def act(self, observation, reward, done):\n if self._not_restarted(observation):\n # not the first action, remember it and update model\n self._remember(self.prev_action, reward, observation, done)\n if len(self.replay_memory) > self.batch_size:\n self._replay()\n\n # determine the next action if not yet done\n action = None\n\n if not done:\n # epsilon greedy\n if random.uniform(0, 1) < self.epsilon_policy.get():\n # exploration: random action\n action = self.action_space.sample()\n action['start_time'] += 1 # non-zero start times\n else:\n # exploitation\n action = self._get_best_action(observation)\n\n self.prev_observation = observation\n self.prev_action = action\n\n return action", "def step(self, action, update=True) -> tuple:\n if self.state.is_terminal():\n raise Exception('Cannot perform action on terminal state!')\n s = self.state if update else self.state.copy()\n if self.render:\n self.env.render()\n s.observation, reward, s.terminal, info = self.env.step(action)\n\n return s.copy() if update else s, reward", "def step(self, action) -> (list, float, bool):\n if len(self.curr_stim) == 0:\n self.curr_stim += [action[0]] * action[1] + [-action[0]] * action[1]\n self.system_step()\n self.frame += 1 / self.config[\"Fs\"]\n self.history.append(self.x2-self.x1)\n return self.get_state(), 0, False", "def step(self, action):\n\n \"\"\"\n Here we should convert the action num to movement action, execute the action in the\n simulation and get the observations result of performing that action.\n \"\"\"\n #if self.step_number > 200:\n #self.reset()\n rospy.logdebug(\"START STEP OpenAIROS\")\n\n self.gazebo.unpauseSim()\n self._set_action(action)\n #self._prey_step()\n self.gazebo.pauseSim()\n obs = self._get_obs()\n done = self._is_done(obs)\n info = {}\n reward = self._compute_reward(obs, done)\n \n self.cumulated_episode_reward = self.cumulated_episode_reward+ reward\n self.step_number += 1\n rospy.logdebug(\"END STEP OpenAIROS\")\n\n return obs, reward, done, info", "def step(self, action):\n \n success = False\n self.curr_step += 1\n self._take_action(action)\n self._take_action(action)\n self._take_action(action)\n\n # initialize reward and get state \n reward = 0.0\n ob = self._get_state()\n\n # give dense rewards \n if not self.sparse_reward:\n reward = self._get_reward()\n\n # bad terminal conditions\n if self.curr_step >= self.max_steps \\\n or self.target_distance >= self.max_distance \\\n or self.mean_radius_sheep >= self.max_radius:\n self.finish = True\n if self.sparse_reward:\n reward = -1.0\n\n # good terminal conditions\n if self.target_distance <= 1.0:\n success = True\n self.finish = True\n if self.sparse_reward:\n reward = 1.0\n\n # update rl parameters\n self.episode_length += 1\n self.episode_reward += reward\n\n # generate info return parameter\n if self.info_mode == 1 and self.finish:\n info = {'r':self.episode_reward, 'l':self.episode_length, \n 's': success}\n else:\n info = {'n':self.num_sheep, 's': success}\n\n return ob, reward, self.finish, info", "def step(self, action):\n obs = self.gym.get_observations()\n all_actions = self.gym.act(obs)\n all_actions.insert(self.gym.training_agent, action)\n state, reward, terminal, info = self.gym.step(all_actions)\n agent_state = self.featurize(state[self.gym.training_agent])\n\n # agent_state_history = self.make_observation(agent_state, self.step)\n agent_reward = reward[self.gym.training_agent]\n\n # self.step += 1\n return agent_state, agent_reward, terminal, info", "def step(self, action):\n self._last_base_position = self.rex.GetBasePosition()\n self._last_base_orientation = self.rex.GetBaseOrientation()\n if self._is_render:\n # Sleep, otherwise the computation takes less time than real time,\n # which will make the visualization like a fast-forward video.\n time_spent = time.time() - self._last_frame_time\n self._last_frame_time = time.time()\n time_to_sleep = self.control_time_step - time_spent\n if time_to_sleep > 0:\n time.sleep(time_to_sleep)\n base_pos = self.rex.GetBasePosition()\n # Keep the previous orientation of the camera set by the user.\n [yaw, pitch, dist] = self._pybullet_client.getDebugVisualizerCamera()[8:11]\n self._pybullet_client.resetDebugVisualizerCamera(dist, yaw, pitch, base_pos)\n\n for env_randomizer in self._env_randomizers:\n env_randomizer.randomize_step(self)\n\n # change up swing and stance ratio and desired speeds randomly for robustness\n if np.random.randint(300) == 0:\n self.ratio = np.random.uniform(self.min_swing_ratio, self.max_swing_ratio)\n\n if np.random.randint(300) == 0:\n self.speed = np.random.uniform(self.min_speed, self.max_speed)\n self.speed_des[0] = self.speed\n\n if np.random.randint(300) == 0:\n self.side_speed = np.random.uniform(self.min_side_speed, self.max_side_speed)\n self.speed_des[1] = self.side_speed\n\n self.base_vel_curr_trans, self.base_vel_curr_rot = self.get_base_velocity()\n action = self._transform_action_to_motor_command(action)\n self.rex.Step(action)\n self.base_vel_next_trans, self.base_vel_next_rot = self.get_base_velocity()\n \n self._env_step_counter += 1\n self.phase += self._action_repeat # the cycle length is CYCLE_TIME/time_step so can add \n # how many times an action was repeated\n\n if self.phase > self.cycle_len:\n self.phase = self.phase % self.cycle_len \n self.cycle_complete += 1\n\n reward = self._reward()\n done = self._termination()\n\n if done:\n self.rex.Terminate()\n\n return np.array(self._get_observation_np()), reward, done, {'action': action}", "def step(self, action):\n force = self.force_mag if action else -self.force_mag\n costheta = math.cos(self.theta)\n sintheta = math.sin(self.theta)\n temp = (\n force + self.polemass_length * self.theta_dot ** 2 * sintheta\n ) / self.total_mass\n thetaacc = (self.gravity * sintheta - costheta * temp) / (\n self.length\n * (4.0 / 3.0 - self.masspole * costheta * costheta / self.total_mass)\n )\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\n self.x += self.tau * self.x_dot\n self.x_dot += self.tau * xacc\n self.theta += self.tau * self.theta_dot\n self.theta_dot += self.tau * thetaacc\n\n return self.state", "def step(self, action: np.ndarray) -> 'EnvStep':\n ...", "def step(self):\n self.step_n += 1\n self.step_t += 1\n # TODO: directly calling agent.act will by-pass BaseDeepAgent, which\n # checks and assigns 'sess' arugment. So we manually set sess here. But\n # is there a better way to do this?\n self.action = self.agent.act(\n state=self.state, sess=self.agent.sess\n )\n next_state, vec_reward, done, _ = self.env.step(self.action)\n reward, done = func_compile_exp_agent(self.action, vec_reward, done)\n self.total_reward = reward + self.reward_decay * self.total_reward\n info = self.agent.step(\n state=self.state, action=self.action, reward=reward,\n next_state=next_state, episode_done=done\n )\n self.record(info)\n flag_success = True if done and reward > 0.0 else False\n if self.savedir is not None:\n self.steps_saver.save(self.episode_n, self.step_t, self.state, self.action,\n vec_reward, reward, done, self.total_reward, flag_success)\n self.state = next_state\n if done:\n self.step_t = 0\n return done", "def step(self, action):\n new_speed = self._state + action\n new_speed[np.where(new_speed < 0)] = 0\n for car_idx in range(self.num_cars):\n # almost instantaneous\n traci.vehicle.slowDown(self.controllable[car_idx], new_speed[car_idx], 1)\n traci.simulationStep()\n self._state = np.array([traci.vehicle.getSpeed(vID) for vID in self.controllable])\n reward = self.compute_reward(self._state)\n # done = np.all(abs(self._state-self.GOAL_VELOCITY) < self.delta)\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=False)", "def step(self, reward, observation):\n self._last_observation = self._observation\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._store_transition(self._last_observation, self.action, reward, False)\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn)\n self.action = onp.asarray(self.action)\n return self.action", "def reset(self):\n self._timestep = np.array([0])", "def step(self, reward, observation):\n self._last_observation = self._observation\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._store_transition(self._last_observation, self.action, reward, False)\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_quantile_samples,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn,\n self._tau,\n self.optimizer)\n self.action = onp.asarray(self.action)\n return self.action", "def step(self, phi_t: np.array, reward: float, done: bool) -> int:\n\n # Get new action based on state\n new_act = self._select_action(phi_t)\n\n # Save trajectory\n if not done:\n self.traj['phi'].append(phi_t)\n self.traj['a'].append(new_act)\n self.traj['r'].append(reward)\n\n # ==\n # Learning (via trace)\n if len(self.traj['r']) > 0:\n self._optimize_model(done)\n\n return new_act", "def step(self, action: Action) -> Tuple[Observation, float, bool, bool, dict]:\n if self.road is None or self.vehicle is None:\n raise NotImplementedError(\"The road and vehicle must be initialized in the environment implementation\")\n\n self.time += 1 / self.config[\"policy_frequency\"]\n self._simulate(action)\n\n obs = self.observation_type.observe()\n reward = self._reward(action)\n terminated = self._is_terminated()\n truncated = self._is_truncated()\n info = self._info(obs, action)\n if self.render_mode == 'human':\n self.render()\n\n return obs, reward, terminated, truncated, info", "def takeAction(self, action):\n return self.env.step(action)", "def step(self, action):\n\n previous_state = self.state\n self._make_action(action) \n self.step_simulation()\n self._make_observation() # Update state\n \n ###################\n ### Reward function\n\n body_position = self.agent.get_position('Torso') # x,y,z coordinates of the agent\n r_foot_collision, l_foot_collision = self.state[-2:] # Feet collision indicators [0/1]\n roll, pitch = self.state[12:14] # Roll and pitch of the agent's convex hull\n\n # Staying upright\n posture = 0\n if abs(roll) > abs(previous_state[12]):\n posture -= .1\n else:\n posture += .125\n\n if abs(pitch) > abs(previous_state[13]):\n posture -= .1\n else:\n posture += .125\n \n hull = 0\n if abs(roll) < .125 and abs(pitch) < .125:\n posture += .1\n # Lifting feet while upright\n # collisions = np.count_nonzero(self.state[14::])\n # posture = (2 - collisions) * .\n\n # Hull location\n progress = body_position[0] - self.previous_body_position\n if progress > 0: \n hull = 0.1 + progress * 40\n if hull > .5: hull = .5\n else:\n hull = -0.1 + progress * 40\n if hull < -.5: hull = -.5\n self.previous_body_position = body_position[0]\n\n \"\"\"\n STATE SPACE:\n include:\n 1. Angular velocity of the torso (also normal velocity?) both can be obtained through gyro and accelerometer\n 2. Change to orientation of the torso instead of convex hull\n 3. \n \"\"\"\n\n # Feet distance\n # Use multiplicative reward?\n # Change in feet position along the X axis\n # pos_lfoot = self.agent.get_position('LFoot')[0]\n # pos_rfoot = self.agent.get_position('RFoot')[0]\n # distance_lfoot = (pos_lfoot - self.previous_feet_position[0])\n # distance_rfoot = (pos_rfoot - self.previous_feet_position[1])\n # if self.previous_feet_position[0] != 0:\n # feet_distance = (distance_lfoot + distance_rfoot) * 100\n # else:\n # feet_distance = 0\n\n # self.previous_feet_position = [pos_lfoot, pos_rfoot]\n\n base = 0.05\n reward = base + posture + hull\n # print('hull: {}'.format(hull))\n # print('posture: {}'.cformat(posture))\n\n # End condition\n if (abs(roll) > self.fall_threshold or abs(pitch) > self.fall_threshold):\n reward -= 2\n self.done = True \n\n # print('Posture: {} \\n Hull: {}'.format(posture, hull))\n # print('Total reward: {}'.format(reward))\n\n return self.state, reward, self.done, {}", "def run_one_step(self):\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n if self.env.check_terminal() is False:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n self.env.update(action)\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n self.agent.update_history(copy.copy(latest_experience))\n self.count += 1\n # If the latest history has a large enough batch, perform an update\n # CHECK IF THIS IS THE RIGHT METHOD\n if self.count % self.batch_size == 0:\n self.agent.update_policy_ordered(max(1, self.batch_size))\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.count = 0\n self.agent.policy.learner.update_target_network()\n else:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n # Perform an update on all of the previous experiences that haven't been updated\n if self.count % self.batch_size > 0:\n self.agent.update_policy_ordered((self.count % self.batch_size) + 1)\n self.count = 0\n return reward", "def step(self, action: Action) -> Feedback: # type: ignore\n self._action_counter += 1\n step_id = self._action_counter\n\n self._encode_and_send_action(action, step_id)\n\n # Wait (blocking!) for the response envelope from the environment\n in_envelope = self._queue.get(block=True, timeout=None) # type: Envelope\n\n msg = self._decode_percept(in_envelope, step_id)\n\n observation, reward, done, info = self._message_to_percept(msg)\n\n return observation, reward, done, info", "def _random_unroll(env, seed=1234, num_steps=10, difficulty=5,\n random_choice_before_reset=False):\n np.random.seed(seed)\n action_spec = env.action_spec()\n if random_choice_before_reset:\n np.random.choice([8], p=[1.])\n timestep = env.reset(difficulty=difficulty)\n trajectory = [timestep]\n actions = [None]\n for _ in range(num_steps):\n if timestep.last():\n if random_choice_before_reset:\n np.random.choice([8], p=[1.])\n timestep = env.reset(difficulty=difficulty)\n action = _make_random_action(action_spec, timestep.observation)\n timestep = env.step(action)\n trajectory.append(timestep)\n actions.append(action)\n return trajectory, actions", "def step(self, action):\n state, reward, done, debug_info = self.sample_transition(action)\n self.set_state(state)\n if \"next_state_heuristic\" in debug_info:\n self._current_heuristic = debug_info[\"next_state_heuristic\"]\n return state, reward, done, debug_info", "def _transform_step(self,\n timestep: dm_env.TimeStep,\n action: Optional[Any] = None) -> step_data.StepData:\n custom_data = None\n if self._step_fn is not None:\n custom_data = self._step_fn(timestep, action, self._environment)\n return step_data.StepData(timestep, action, custom_data)", "def step(self, action):\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {\n \"play\": {\n \"landlord\": self._get_reward(\"landlord\"),\n \"landlord_up\": self._get_reward(\"landlord_up\"),\n \"landlord_down\": self._get_reward(\"landlord_down\")\n },\n \"bid\": {\n \"landlord\": self._get_reward_bidding(\"landlord\")*2,\n \"landlord_up\": self._get_reward_bidding(\"landlord_up\"),\n \"landlord_down\": self._get_reward_bidding(\"landlord_down\")\n }\n }\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}", "def step(self, new_state, reward=None, done=None, mode='train'):\n\n if mode == 'test':\n # Test mode: take greedy action\n action = np.argmax(self.q_table[new_state])\n return action\n \n else:\n # Train mode: take a step and return action\n \n # QL step update \n if self.learning == \"q_learning\":\n self.q_table[self.last_state, self.last_action] += self.alpha * \\\n (reward + self.gamma * max(self.q_table[new_state]) - self.q_table[self.last_state, self.last_action])\n new_action = action_egreedy(self.q_table[self.last_state], self.epsilon, self.action_size)\n \n # SARSA step update \n elif self.learning == \"sarsa\":\n new_action = action_egreedy(self.q_table[new_state], self.epsilon, self.action_size)\n self.q_table[self.last_state, self.last_action] += self.alpha * \\\n (reward + self.gamma * self.q_table[new_state, new_action] - self.q_table[self.last_state, self.last_action])\n \n # Expected SARSA step update \n elif self.learning == \"expected_sarsa\":\n self.q_table[self.last_state, self.last_action] += self.alpha * \\\n (reward + self.gamma * np.mean(self.q_table[new_state]) - self.q_table[self.last_state, self.last_action])\n new_action = action_egreedy(self.q_table[new_state], self.epsilon, self.action_size)\n \n # Double Sarsa step update \n elif self.learning == \"double_sarsa\":\n new_action = action_egreedy(np.mean([self.q_table_1[new_state],self.q_table_2[new_state]], axis=0), self.epsilon, self.action_size)\n if random.random() < 0.5:\n self.q_table_1[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * self.q_table_1[new_state, new_action] - self.q_table_1[self.last_state, self.last_action])\n else:\n self.q_table_2[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * self.q_table_2[new_state, new_action] - self.q_table_2[self.last_state, self.last_action])\n \n # Double Expected Sarsa step update \n elif self.learning == \"double_expected_sarsa\":\n if random.random() < 0.5:\n self.q_table_1[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * np.mean(self.q_table_2[new_state]) - self.q_table_1[self.last_state, self.last_action])\n else:\n self.q_table_2[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * np.mean(self.q_table_1[new_state]) - self.q_table_2[self.last_state, self.last_action])\n new_action = action_egreedy(np.mean([self.q_table_1[new_state],self.q_table_2[new_state]], axis=0), self.epsilon, self.action_size)\n \n # Double QL step update \n elif self.learning == \"double_q_learning\":\n if random.random() < 0.5:\n self.q_table_1[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * self.q_table_2[new_state, np.argmax(self.q_table_1[new_state])] - self.q_table_1[self.last_state, self.last_action])\n else:\n self.q_table_2[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * self.q_table_1[new_state, np.argmax(self.q_table_2[new_state])] - self.q_table_2[self.last_state, self.last_action])\n new_action = action_egreedy(np.mean([self.q_table_1[self.last_state],self.q_table_2[self.last_state]], axis=0), self.epsilon, self.action_size)\n \n else:\n raise ValueError('Learning algorithm not supported')\n \n #rollout state and action\n self.last_state = new_state\n self.last_action = new_action\n return new_action", "def step(self, action, skiprate=1):\n reward = self.game.make_action(action, skiprate)\n next_state = self.game.get_state()\n game_over = self.game.is_episode_finished()\n return next_state, reward, game_over", "def run_one_step(self):\n # Get the current state, action and initialise the reward\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n # Check if the environment has reached a terminal state\n if self.env.check_terminal() is False:\n # Save the initial state and action to an 'experience'\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n # Update the environment using the chosne action\n self.env.update(action)\n # Get the reward to attribute to the agent and save to the experience to save\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n # Get the updated state\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n # If the new state isn't terminal, save the next action and the 'done' flag to the experience\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n # If the new state is terminal, save a dummy action and the 'done' flag to the experience\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n # Update the history with the latest experience\n self.agent.update_history(copy.copy(latest_experience))\n # Update the agents policy using a batch of experiences chosen from the history\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count += 1\n # Update the target network if appropriate\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.agent.policy.learner.update_target_network()\n else:\n # If the environment is in a terminal state, record this and perform a policy update\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count = 0\n return reward", "def step(self, action: nx.Graph):\n # Get the SMILES string associated with this action\n self._state = action\n if self.record_path:\n self._path.append(self._state)\n\n # Update the action space\n self.action_space.update_actions(self._state, self.observation_space)\n self._counter += 1\n\n # Check if we have finished\n # Out of steps or no more moves\n done = len(self.action_space.get_possible_actions()) == 0\n\n # Compute the fingerprints for the state\n return self._state, self.reward(), done, {}", "def reset(self, **kwargs):\n if self._backend_agent:\n self._backend_agent._on_gym_reset_begin(self, **kwargs)\n\n result = self.env.reset(**kwargs)\n if self.steps_done_in_episode > 0 and not self.is_episode_done:\n self.episodes_done += 1\n self.total.episodes_done_inc()\n self.is_episode_done = False\n self.steps_done_in_episode = 0\n\n if self._backend_agent:\n self._backend_agent._on_gym_reset_end(self, result, **kwargs)\n return result", "def observe(self,\n action: types.NestedArray,\n next_timestep: dm_env.TimeStep) -> None:\n self._log(action)\n self._last_timestep = next_timestep\n if next_timestep.observation['REVEALED_CARDS'][0] == -1:\n self._reset_deck()\n self._deck_distribution -= next_timestep.observation['REVEALED_CARDS']\n self._deck_distribution[0] = 0\n if next_timestep.observation['STAGE'] == 'CHOOSE_BET':\n self._bettor.set_payout(next_timestep.reward,\n self._deck_distribution)", "def _single_agent_step(self, action):\n reward = 0.0\n done = False\n self.timestep += 1\n state, player_id = self.game.step(action)\n while not self.game.is_over() and not player_id == self.active_player:\n self.timestep += 1\n action, _ = self.model.agents[player_id].eval_step(\n self._extract_state(state)\n )\n if not self.model.agents[player_id].use_raw:\n action = self._decode_action(action)\n state, player_id = self.game.step(action)\n\n if self.game.is_over():\n reward = self.get_payoffs()[self.active_player]\n done = True\n state = self.reset()\n return state, reward, done\n\n return self._extract_state(state), reward, done", "def step(self, action):\n reward_all = 0\n pose_all = []\n raw_states = []\n for _ in range(self.action_repeat):\n state, reward, done, _ = self.env.step(action) # run up the mountain\n\n processed_state = self.preprocess_state(state)\n raw_states.append(state)\n\n if done and self.i < 200:\n self.success = True\n\n reward_all += reward\n pose_all.append(processed_state)\n\n self.i += 1\n\n if done:\n missing = self.action_repeat - len(pose_all)\n pose_all.extend([pose_all[-1]] * missing)\n break\n\n next_state = np.concatenate(pose_all)\n return next_state, reward_all, done, raw_states", "def reset(self):\n \n self.env.reset()\n obs, _, done, _ = self.env.step(1)\n if done: \n self.env.reset()\n obs, _, done, _ = self.env.step(2)\n if done: \n self.env.reset()\n \n return obs", "def step(self, action):\n total_reward = 0.0\n done = False\n obs_list = []\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n obs_list.append(obs)\n total_reward += reward\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n max_frame = np.max(obs_list[-2:], axis=0)\n return max_frame, total_reward, done, info", "def agent_step(self, reward, state):\n self.sum_rewards += reward\n self.episode_steps += 1\n\n # Make state an array of shape (1, state_dim) to add a batch dimension and\n # to later match the get_action_values() and get_TD_update() functions\n state = np.array(state)\n\n # Select action\n action = self.policy(state)\n \n # Append new experience to replay buffer\n self.replay_buffer.append(self.last_state, self.last_action, reward, 0, state)\n \n # Perform replay steps:\n if self.replay_buffer.size() > self.replay_buffer.minibatch_size:\n self.network_target.load_state_dict(self.network.state_dict())\n for _ in range(self.num_replay):\n # Get sample experiences from the replay buffer\n experiences = self.replay_buffer.sample() \n self.optimize_network(experiences)\n \n # Update the last state and last action.\n self.last_state = state\n self.last_action = action\n \n return action", "def apply_action(self, action):\n return self.__environment.step(action)", "def select_action(self, eps=None, force=False):\n if not force and self.step % Parameters.FRAME_SKIPPING != 0:\n return self.last_action\n # compute epsilon at step t\n completion = self.get_learning_completion()\n if eps is None:\n eps = Parameters.INITIAL_EXPLORATION - \\\n (completion * (Parameters.INITIAL_EXPLORATION - Parameters.FINAL_EXPLORATION))\n if random.random() < eps:\n # take a random action\n action = randint(0, self.action_space)\n else:\n # take a smart action\n input_shape = (\n 1,\n Parameters.IMAGE_HEIGHT,\n Parameters.IMAGE_WIDTH,\n Parameters.AGENT_HISTORY_LENGTH)\n dqn_input = self.environment.get_input().reshape(input_shape)\n action = self.tf_session.run(\n self.dqn.smartest_action, {self.dqn_input: dqn_input})\n\n self.last_action = action\n return(action)", "def step(self, action: CARLAAction, *args: Any, **kwargs: Any) -> Transition:\n observation, reward, done, info = self.env.step(action)\n if observation[\"collision\"] > 0:\n logging.debug(\"A collision occured\")\n done = True\n reward = -1.0\n return observation, reward, done, info", "def _step(self, action: np.ndarray):\n # TODO: How do deal with goal changing?\n denormalize = False if self.use_raw_actions else True\n current_pos = self.sim.data.mocap_pos.copy()\n meanval = (self.mocap_pos_clip_upper + self.mocap_pos_clip_lower)/2.0\n rng = (self.mocap_pos_clip_upper - self.mocap_pos_clip_lower)/2.0\n new_pos = action[:3]*rng + meanval #current_pos + action[:3]*self.range\n # new_pos = current_pos + action[:3]*self.range\n new_pos = np.clip(new_pos, self.mocap_pos_clip_lower, self.mocap_pos_clip_upper)\n self.sim.data.mocap_pos[:] = new_pos.copy()\n self.robot.step({\n 'gripper': action[-2:]\n }, denormalize)", "def reset_goal(self, update_seed=False, sync_type=SyncType.RESET_GOAL):\n\n # Reset stats for one goal in the same episode.\n self.multi_goal_tracker.reset_goal_steps()\n\n # Randomize a target for the robot\n self._goal = self._next_goal()\n self._previous_goal_distance = None\n\n return self._observe_sync(sync_type=sync_type)", "def reset(self):\n if not self.single_agent_mode:\n return self._init_game()\n\n while True:\n state, player_id = self.game.init_game()\n while not player_id == self.active_player:\n self.timestep += 1\n action, _ = self.model.agents[player_id].eval_step(\n self._extract_state(state)\n )\n if not self.model.agents[player_id].use_raw:\n action = self._decode_action(action)\n state, player_id = self.game.step(action)\n\n if not self.game.is_over():\n break\n\n return self._extract_state(state)", "def step(self, state, action, reward, next_state, done):\n q_value = self.Q[state][action]\n q_value_next = np.max(self.Q[next_state]) if not done else 0\n g = reward + self.gamma * q_value_next - q_value\n self.Q[state][action] = q_value + self.alpha * g\n return", "def step(self, action):\n # if self.current_turn<self.MAX_TURNS-1:\n # self.current_turn += 1\n \n\n self.current_turn += 1\n system_action = self.parseAction(action)\n \n # Used for logging and evaluation\n self.updateMetaState(system_action)\n\n self.processSystemAction(system_action)\n\n reward = self.calculateReward()\n\n user_action = self.user.respond(system_action)\n self.processUserAction(user_action)\n observation = self.generateObservation()\n done = self.isDone()\n if done:\n info = { \"successful\": self.user.goals[\"satisfied\"], \n \"first-appearance\": self.first_appearance, \n \"turn-penalty\": self.current_turn,\n \"sugg-all-penalty\":self.sugg_penalty,\n \"info-all-penalty\": self.info_penalty,\n \"eli-kw-used\": self.eli_kw_observed,\n \"eli-query-used\": self.eli_query_observed,\n }\n else:\n info = {}\n if self.training:\n if done and self.user.goals[\"satisfied\"]: reward+=30\n return observation, reward, done, info", "def Step(observation, reward, done, **kwargs): # noqa: N802\n return _Step(observation, reward, done, kwargs)", "def _step(self, action):\n\n reward = 0.0\n x, y = action\n\n if not Creator.add_edge(self.nxgraph, x+1, y+1):\n reward = 0.0\n # TODO: do we return here?\n raise NotImplementedError\n else:\n reward = 1.0\n new_state = EnvTools.get_state(self.nxgraph)\n EnvTools.calculate_reward(self.state, self.previous_state)\n raise NotImplementedError\n\n\n\n pass", "def env_step(action: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n state, reward, done, _ = env.step(action)\n return (\n state.astype(np.float32),\n np.array(reward, np.float32),\n np.array(done, np.int32),\n )" ]
[ "0.6702035", "0.66238755", "0.64235044", "0.6258338", "0.62385756", "0.6237361", "0.6150759", "0.6134917", "0.61343175", "0.61312205", "0.60947925", "0.5976863", "0.5962188", "0.5953434", "0.59465617", "0.59439987", "0.58978987", "0.58788586", "0.58774835", "0.58655053", "0.58542365", "0.5834581", "0.58265305", "0.581372", "0.58122355", "0.579503", "0.57924336", "0.5789864", "0.57771266", "0.57589006", "0.5757548", "0.57533514", "0.57108974", "0.57038283", "0.56855243", "0.5668602", "0.5664342", "0.5650524", "0.5648463", "0.56392384", "0.5638564", "0.5629723", "0.560319", "0.5591075", "0.5585529", "0.55751187", "0.55703586", "0.55613595", "0.555577", "0.55541396", "0.5550539", "0.5548283", "0.5543235", "0.5536319", "0.5513914", "0.5511845", "0.5505623", "0.5489618", "0.5488938", "0.5468569", "0.5466863", "0.546542", "0.54538304", "0.5441264", "0.54177576", "0.54063517", "0.538277", "0.53817666", "0.5368999", "0.5366482", "0.536083", "0.5341993", "0.5323047", "0.5322197", "0.53140867", "0.53051054", "0.53025204", "0.5298595", "0.52982193", "0.52893007", "0.528287", "0.5281701", "0.5279602", "0.52788097", "0.52762336", "0.5261184", "0.5254758", "0.52487296", "0.52478844", "0.52468306", "0.5214496", "0.5203838", "0.52014345", "0.52003634", "0.519932", "0.519664", "0.5191031", "0.5187276", "0.5185002", "0.5183779" ]
0.776358
0
Frees any resources used by the environment. Implement this method for an environment backed by an external process. This method be used directly ```python env = Env(...) Use env. env.close() ``` or via a context manager ```python
def close(self) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close_env(self):\n self.env.close()", "def close(self): \n\t\tself.env.close()", "def terminate(self):\n super(ReacherEnv, self).close()", "def close(self):\n if self.__env:\n self.__env.close()\n self.__env = None\n super(DiskCache, self).close()", "def tear_down(self):\n self.destroy_env()\n self.dut.kill_all()", "def env_cleanup(self):\n pass", "def destroy(self):\r\n print('Destroying Connection to Environment Process.')\r\n self._container = None\r\n super(EnvironmentEndpoint, self).destroy()", "def destroy_env(self):\n self.dut.send_expect(\"quit\", \"# \")\n time.sleep(2)", "def __del__(self):\n # Free the memory in the remote process's address space\n self.CleanUp()", "def close(self):\n logging.info(\"closing SmartBotEnv\")\n super(gym.Env, self).close()", "def __del__(self):\n if self.env_handle:\n self.SQLFreeHandle(SQL_HANDLE_ENV, self.env_handle)\n if self.con_handle:\n self.SQLFreeHandle(SQL_HANDLE_DBC, self.con_handle)", "def close(self):\n if not self.closed:\n C_LIBRARY.free(self.dev)\n self.closed = True", "def __exit__(self, exc_type, exc_value, exc_tb) -> None:\n self.destroy()", "def __exit__(self, exc_type, exc_value, exc_tb) -> None:\n self.destroy()", "def __del__(self):\n if self._close_on_exit:\n self.close()", "def __del__(self):\n self._proc.kill()", "def __del__(self):\n self.exit()", "def __del__(self):\n if hasattr(self, 'dev'):\n kernel32.CloseHandle(self.dev)", "def destroy(self):\r\n for node in self._nodes.copy():\r\n node.destroy()\r\n\r\n for parameter in self._parameters.copy():\r\n parameter.destroy()\r\n\r\n assert len(self._nodes) == 0\r\n assert len(self._parameters) == 0\r\n\r\n super(Environment, self).destroy()", "def _finalize(self, use_stdlib_through_env_vars):\n if use_stdlib_through_env_vars is None:\n use_stdlib_through_env_vars = True\n self._manage_stdlib(use_stdlib_through_env_vars)", "def close(self):\n if self.dev_open:\n try:\n self.lib().close()\n except CygnetExc:\n pass\n self.dev_open = False", "def __del__(self):\n self.destroy()", "def __del__(self):\n self.shutdown()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.__del__()", "def __exit__(self, exc_type, exc_val, exc_tb):\n printy(\"Cleaning after myself...\")\n self.key.delete()\n if self.instance:\n self.instance.terminate()\n # wait for the machine to terminate\n self.wait_for_status(48)\n\n self.sec_grp.delete()\n os.remove(self.key.name + \".pem\")\n printy(\"Builder teardown complete\")", "def __del__(self):\n self.close_connection()\n self.close_engine()", "def close(self):\n rospy.logdebug(\"Closing RobotGazeboEnvironment\")\n rospy.signal_shutdown(\"Closing RobotGazeboEnvironment\")", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.__descriptor.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.__descriptor.close()", "def close(self):\r\n try:\r\n self.proc.terminate()\r\n except (OSError, AttributeError): # pragma: no cover\r\n pass\r\n self.proc = None", "def CleanUp(self):\n if self.process != 0 and self.mem_address != 0:\n # free up the memory we allocated\n #win32api.SetLastError(0)\n self.CheckGuardSignature()\n\n ret = win32functions.VirtualFreeEx(\n c_void_p(self.process),\n c_void_p(self.mem_address),\n win32structures.ULONG_PTR(0),\n wintypes.DWORD(win32defines.MEM_RELEASE))\n if ret == 0:\n print('Error: CleanUp: VirtualFreeEx() returned zero for address ', hex(self.mem_address))\n last_error = win32api.GetLastError()\n print('LastError = ', last_error, ': ', win32api.FormatMessage(last_error).rstrip())\n sys.stdout.flush()\n self._CloseHandle()\n raise WinError()\n self.mem_address = 0\n self._CloseHandle()\n else:\n pass # ActionLogger().log('\\nWARNING: Cannot call VirtualFreeEx! process_id == 0.')", "def reset(self, env):\n self._env = env\n return", "def __del__(self):\n # reset tensorflow graph\n tf.reset_default_graph()\n\n for process_name in self.subprocess:\n if self.subprocess[process_name].poll is None:\n self.close_subprocess(process_name)\n\n del self.root_path\n del self.log\n del self.logger\n del self.instance\n del self.visualizers", "def __del__(self):\n try:\n self.close()\n except:\n pass", "def __del__(self):\n self._cleanup()", "def teardown_test_env():\n if not keep_tmp_dirs:\n print('\\nCleaning up temporary directories...')\n shutil.rmtree(tmp_elm_dpath, ignore_errors=True)\n shutil.rmtree(tmp_elm_examples_dpath, ignore_errors=True)\n\n print('Removing conda environment used for testing...')\n sp.call('conda env remove -y -q -n {}'.format(test_env_name), shell=True, executable='/bin/bash', stdout=sp.DEVNULL)", "def update(self, env):\n del env\n return", "def EndEnv(self,EnvironmentName):\n\n subprocess.call(['rmdir','/Q','/S',f'{EnvironmentName}'], shell=True, cwd=r\"C:\\Users\\caspe\\Envs\")\n\n with open(\"Envs.json\") as delete_file:\n elements = json.load(delete_file)\n\n if EnvironmentName in elements:\n del elements[EnvironmentName]\n\n with open(\"Envs.json\", \"w\") as add_file:\n json.dump(elements, add_file, indent=4)", "def __del__(self):\r\n self.cleanup()", "def destroy(self):\n\n dcgm_agent.dcgmShutdown()\n self._thread_pool.terminate()\n self._thread_pool.close()", "def __del__(self):\n self.clean_up_terminal()", "def destroy(self):\n if self._ptr is not None:\n # run and remove destructor on c data\n _global_destroy(self._display, self._ptr)\n ffi.gc(self._ptr, None)\n self._ptr = None\n self._display = None", "def reset_env(self):\n return self.env.reset()", "def release(self):\n if self._ctx is None:\n return\n self.atomicfile.delete()\n try:\n self._ctx.__exit__(None, None, None)\n finally:\n self._ctx = None", "def exit(self, env=None):\n env = self._find_env(env)\n env.remove_agents(self)", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def __del__(self):\n self.close()", "def del_env(self, envname):\n\n with open(self.envpath, \"r\") as envfile:\n my_vars = {}\n for line in envfile.readlines():\n key, value = self.__kv_pair(line)\n if key is not None:\n my_vars[key] = value\n\n current_value = my_vars.pop(envname, None)\n\n if current_value is None:\n return # do nothing if not set\n\n new_lines = [f\"{k} = {v}\\n\" for k, v in my_vars.items()]\n\n with open(self.envpath, \"w\") as envfile:\n envfile.writelines(new_lines)\n\n os.environ.unsetenv(envname)", "def done(self, env):\n del env\n return False", "def __del__(self) -> None:\n self.close()", "def __del__(self):\n self._destruct()", "def _reset(self):\n if not self._first_create:\n self._sc2_env.close()\n self._sc2_env = self._create_env()\n self._first_create = False\n return self._sc2_env.reset()", "def close(self):\n self.__exit__(None, None, None)", "def close(self):\n self.__exit__(None, None, None)", "def close( self ):\n self.__del__()", "def __del__(self):\n try:\n self.api.transport.session.close()\n except Exception as e:\n log.debug(f\"Failed to close VSO API connection with: {e}\")", "def close(self):\n subprocess.call([\"pkill\", \"controller\"])", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n try:\n self._store.close()\n except AttributeError:\n pass", "def destroy(self):\r\n self.__destroy()", "def cleanup():\n dist.destroy_process_group()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.session.close()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.release()", "def tearDown(self):\n tests.utils.cleanup_environment()", "def tearDown(self):\n tests.utils.cleanup_environment()", "def destroy(self):\n self.db = None\n self.gen_cursor = None\n self.map = None\n self.fmap = None\n self.smap = None\n FlatBaseModel.destroy(self)", "def __del__(self):\n\n self.session.close()", "async def __aexit__(self, exc_type, exc_val, exc_tb):\n await self._close()", "def unsetenv(name):\n # Some platforms (e.g. AIX) do not support `os.unsetenv()` and\n # thus `del os.environ[name]` has no effect onto the real\n # environment. For this case we set the value to the empty string.\n os.environ[name] = \"\"\n del os.environ[name]", "def tearDown(self) -> None:\n\n self.temp_env_file.close()\n os.remove(self.temp_env_file.name)\n\n del self.temp_env_file\n del self.test_name\n del self.helper", "def teardown_appcontext(self):\n storage.close()", "def __exit__(self, exc_type, exc_value, traceback):\n nvmlShutdown()", "def __exit__(self, exc_type=None, exc_value=None, traceback=None):\n if self.cap:\n self.cap.release()\n self.cap = None", "def __del__(self):\n self._close_http_session()", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()", "def close(self):\n \n self.__exit__(None, None, None)\n return", "def clean_up(self):\n dist.destroy_process_group()", "async def aclose(self) -> None:\n if self._engine:\n await self._engine.dispose()\n self._engine = None", "def __del__(self):\n \n if self.parallel_conf is not None:\n #from .parallel import close_parallel_region\n self.parallel_conf.finish_parallel_region()", "def teardown(self):\n\n\t\tself.shutdown = True\n\t\tself.terminate_process()", "def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n self.deinit()", "def __exit__(self, type, value, traceback):\n self.free()", "def Destroy(self):\n self.Disconnected()\n self._io_loop.remove_handler(self._fd)\n os.close(self._fd)\n self._gadget = None\n self._fd = None", "def destroy(self):\n self.context.destroy()", "def __del__(self):\n AppHelper.stopEventLoop()", "def __del__(self):\n AppHelper.stopEventLoop()", "def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)", "def _cleanup(self):\n # If module object not yet created, return\n if getattr(self, \"this\", None) is None:\n return\n\n deallocate = getattr(self, \"deallocate\", None)\n if callable(deallocate):\n deallocate()", "def __del__(self):\n\n if self._cs is not None:\n self._cs.close()", "def __del__(self):\n\n if self._is_open:\n self.close()", "def __exit__(self, exc_type, exc_value, traceback):\n return self.close()", "def close(self):\n self._pool_provider.clear()", "def destroy(self) -> None:\n if self._ptr is not None:\n ffi.release(self._ptr)\n self._ptr = None", "def destroy(self) -> None:\n if self._ptr is not None:\n ffi.release(self._ptr)\n self._ptr = None", "def destroy():\n pass", "def destroy():\n pass" ]
[ "0.7741044", "0.7280734", "0.71917665", "0.71788585", "0.67623675", "0.6581033", "0.6527342", "0.64914477", "0.6491418", "0.64707375", "0.6448349", "0.6402401", "0.6377555", "0.6377555", "0.63491684", "0.6225186", "0.6214336", "0.6168082", "0.61409414", "0.6053957", "0.60520726", "0.60113806", "0.60066885", "0.6002898", "0.59859437", "0.5978754", "0.59762627", "0.59301037", "0.59301037", "0.59044945", "0.58902013", "0.58889794", "0.5883272", "0.58675224", "0.5855163", "0.58449745", "0.5822221", "0.58162403", "0.5808818", "0.5805653", "0.57800347", "0.577933", "0.5767599", "0.57647574", "0.5750126", "0.5747237", "0.5747237", "0.5747237", "0.5747237", "0.5747237", "0.5747237", "0.5747237", "0.5747237", "0.5746613", "0.57447904", "0.57323164", "0.5703796", "0.56819737", "0.5681722", "0.5681722", "0.56782186", "0.56751543", "0.5674827", "0.56734854", "0.56684256", "0.566483", "0.5658423", "0.565456", "0.5653052", "0.5653052", "0.56519526", "0.5649063", "0.5647521", "0.5633792", "0.5632469", "0.56260735", "0.5622806", "0.56216645", "0.56121343", "0.56051797", "0.56041604", "0.56011593", "0.55848736", "0.5570789", "0.5568944", "0.5568929", "0.556702", "0.5566949", "0.5551486", "0.5547914", "0.5547914", "0.5536439", "0.55350703", "0.55258924", "0.5522057", "0.5516308", "0.5514196", "0.5512669", "0.5512669", "0.5511592", "0.5511592" ]
0.0
-1
Allows the environment to be used in a withstatement context.
def __enter__(self): return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_with(self: Parser, node: doc.With) -> None:\n with contextlib.ExitStack() as stack:\n stack.enter_context(self.var_table.with_frame())\n for item in node.items:\n frame = self.eval_expr(item.context_expr)\n if not isinstance(frame, Frame):\n self.report_error(\n item.context_expr, \"Invalid context expression in the with-statement.\"\n )\n rhs = stack.enter_context(frame)\n if item.optional_vars is not None:\n self.eval_assign(target=item.optional_vars, source=rhs, bind_value=bind_with_value)\n self.visit_body(node.body)", "def test_with(self):\n source = \"\"\"\n with guard():\n a = 1\n \"\"\"\n target = \"\"\"\n with guard_new():\n a = 1\n \"\"\"\n self._check_compatibility(source, target)", "def _analyse_stmt_With(self, statement: ast.With, *, next: CFNode) -> CFNode:\n return self._analyse_with(statement, next=next)", "def _analyse_with(\n self,\n statement: Union[ast.AsyncWith, ast.With],\n *,\n next: CFNode,\n ) -> CFNode:\n with_node = self._ast_node(\n statement,\n enter=self._analyse_statements(statement.body, next=next),\n error=self._raise,\n )\n return with_node", "def run401_02():\n\n class Context:\n def __init__(self):\n print('__init__()')\n\n def __enter__(self):\n print('__enter__()')\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # print(exc_type, exc_val, exc_tb)\n print('__exit__()')\n\n with Context():\n print('do something')", "def test_execute_with_context(self):\n pass", "def __enter__(self):\n self.__within_context = True\n if not self.__initialized:\n self.__initialization__()\n return self", "def __enter__(self):\n # mark the beginning of a transaction\n self.execute(*self.sql.transaction())\n # and hand me back to the caller\n return self", "def in_context(self):\n pass", "def context_set(context):\n global __context\n if context == DefaultContext:\n context = context.copy()\n __context = context", "def session_context(self):\n session = self.Session()\n try:\n yield session\n session.commit()\n except: # noqa E722\n session.rollback()\n raise\n finally:\n session.close()", "def wrapwith(item, body, locref=None):\n locref = locref or body[0]\n wrapped = With(items=[withitem(context_expr=item, optional_vars=None)],\n body=body,\n lineno=locref.lineno, col_offset=locref.col_offset)\n return [wrapped]", "def __enter__(self):\r\n pass", "def context(self) -> CONTEXT:", "def __enter__(self):\n pass", "def __enter__(self):\n pass", "def with_(self):\n return \"With\"", "def session_scope(raise_exception=True):\n session = Session()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n if raise_exception:\n raise\n finally:\n session.close()", "def __enter__(self):\n\t\treturn self", "def env_wrap(self):\n\n old_env = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)", "def __enter__(self):\n return self.connection.__enter__", "def session_scope(raise_exception=True):\n session = cls.Session()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n if raise_exception:\n raise\n finally:\n session.close()", "def session_scope(dsn):\n factory = sessionmaker(bind=Engines[dsn])\n session = factory()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def beginScope():", "def session_scope():\n session = Session(bind=engine)\n try:\n yield session\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def execute_block(self, stmt: List[loxStmtAST.Stmt], environment: loxenvironment.Environment) -> None:\n previous_env: loxenvironment.Environment = self.environment\n try:\n self.environment = environment\n for statement in stmt:\n self.execute(statement)\n finally:\n self.environment = previous_env", "def __enter__(self):\n raise NotImplementedError", "def __enter__(self) -> None:\n raise NotImplementedError()", "def visit_With(self, node):\n assert hasattr(node, 'items')\n if node.items:\n withitem = node.items[0]\n assert isinstance(withitem, gast.withitem)\n if isinstance(withitem.context_expr, gast.Call):\n func = withitem.context_expr.func\n if isinstance(func, gast.Name):\n func.id += '_new'\n return node", "def set(**args):\n return Context(args)", "def context_local(context=None):\n class manager(object):\n def __init__(self, ctx):\n \"\"\"\n :type ctx: Context\n \"\"\"\n self.context = ctx.copy()\n\n def __enter__(self):\n self.orig_context = context_get()\n context_set(self.context)\n return self.context\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n context_set(self.orig_context)\n\n if context is None:\n context = context_get()\n return manager(context)", "def session_scope():\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def session_scope():\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def session_scope():\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def B():\n set_env()", "def session_context(func):\r\n def wrapper(*args, **kwargs):\r\n self = args[0]\r\n with self._create_db_session() as db:\r\n self.db = db\r\n return func(*args, **kwargs)\r\n return wrapper", "def enterScope(self, name):", "def context(self) -> Any:\n ...", "def db_session_context(db_name):\n try:\n db = psycopg2.connect(database=db_name)\n yield db\n finally:\n db.close()", "def session_scope(Session):\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def session_scope(engine):\n DBSession = sessionmaker(bind=engine)\n Session = DBSession()\n try:\n yield Session\n Session.commit()\n except:\n Session.rollback()\n raise\n finally:\n Session.close()", "def __enter__(self):\n # We pull in some useful bits\n import inspect\n from byteplay import Code,haslocal,SetLineno, \\\n SETUP_WITH,WITH_CLEANUP,\\\n STORE_FAST,STORE_NAME,STORE_GLOBAL,\\\n POP_TOP,POP_BLOCK\n\n frame = inspect.currentframe(1)\n self.__code = code = Code.from_code(frame.f_code)\n self.__line = frame.f_lineno\n self.__globals = frame.f_globals\n\n # The SetLineno instructions get in the way here\n # since I want to find the actual instruction\n # by offset. I'll just strip them out\n instructions = code.code\n nolines = [x for x in instructions if x[0] != SetLineno]\n instructions[:] = nolines\n pc = __pc_to_byteplay_offset__(instructions).get(frame.f_lasti)\n assert pc is not None,\"Found invalid offset for with\"\n\n # Strip off everything through the SETUP_WITH\n assert instructions[pc][0] == SETUP_WITH,\"LittleTimer must be invoked from a with statement\"\n end_label = instructions[pc][1]\n del instructions[:pc+1]\n\n # which is followed by a STORE_NAME, STORE_LOCAL,\n # STORE_GLOBAL, or POP_TOP\n assert instructions[0][0] in (\\\n STORE_NAME,\n STORE_FAST,\n STORE_GLOBAL,\n POP_TOP\n ),\"Only simple assignment is supported, no more complex than LittleTimer() as T\"\n if instructions[0][0] == POP_TOP: self.__oneshot = True\n del instructions[0]\n\n # Find the closing WITH_CLEANUP\n targets = [offset for offset,(opcode,arg) in enumerate(instructions)\n if opcode is end_label]\n assert targets,\"This with-statement was not formed the way I expected\"\n pc = targets[0]+1\n assert instructions[pc][0] == WITH_CLEANUP,\"This with-statement was not formed the way I expected\"\n\n # Reverse until we find a POP_BLOCK\n while pc >= 0:\n opcode = instructions[pc][0]\n if opcode == POP_BLOCK:\n break\n pc -= 1\n del instructions[pc:]\n self.__bytecodes = instructions\n\n # We may have some local values that we need to set up\n locals = set([x[1] for x in instructions if x[0] in haslocal])\n self.__locals = dict( (sym,frame.f_locals.get(sym,None))\n for sym in locals )\n return self\n\n return self", "def __enter__(self):\r\n return self", "def __enter__(self):\n self._in_context_block = True\n # TODO: create local backup of file in case we can't upload and have to roll back\n return self", "def __enter__(self):\n self.manual_enter()\n return self", "def ensure_context(self):\n with driver.get_active_context():\n oldctx = self._get_attached_context()\n newctx = self.get_or_create_context(None)\n self._set_attached_context(newctx)\n try:\n yield\n finally:\n self._set_attached_context(oldctx)", "def _analyse_stmt_AsyncWith(\n self, statement: ast.AsyncWith, *, next: CFNode\n ) -> CFNode:\n return self._analyse_with(statement, next=next)", "def context():\n return dict()", "def _prepare_env(self, graph, **kwargs):\n raise NotImplementedError", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self) -> Seat:\n return self", "def session_scope():\n # Create session.\n session = sessionmaker()\n session.configure(bind=engine)\n session = session()\n\n try:\n yield session\n session.commit()\n\n # Rollback on any exception.\n except Exception as e:\n logging.info('Rollback: %s', e)\n session.rollback()\n raise\n # Close no matter what.\n finally:\n session.close()", "def session_scope():\n session = Session()\n try:\n yield session\n session.commit()\n except InvalidRequestError:\n session.rollback()\n raise\n finally:\n session.close()", "def make_shell_context():\n return {'db': db, 'User': User, 'Post': Post}", "def cooked_mode(self) -> ContextManager[None]:", "def __enter__(self):\n return type(self)()", "def setContext(self, context: Any, /) -> Any:\n ...", "def wrap_transaction(self):\n new_script = self.__class__()\n new_script.append(\n [BeginStatement()] + self.statements + [CommitStatement()])\n\n return new_script", "def env(**kwargs) -> ContextManager:\n\n def update(target, source):\n updated = {}\n for k, v in source.items():\n if v is None:\n try:\n updated[k] = target.pop(k)\n except KeyError:\n pass\n else:\n updated[k] = target.get(k, None)\n target[k] = v\n return updated\n\n previous_env = update(os.environ, kwargs)\n try:\n yield\n finally:\n update(os.environ, previous_env)", "def _make_context():\n return {'app': app, 'db': db}", "def exec_in_context(self,arg):\n ## contains elaborate scheme to detect what is specified by\n ## -s, and to warn about any replacement\n current_ids = dict([(k,id(v)) for k,v in self.context.items()])\n\n exec arg in self.context\n\n for k,v in self.context.items():\n if k in self.unused_names and id(v)!=current_ids[k]:\n self.warning(\"Replacing previous value of '%s' with '%s'\"%(k,v))\n\n new_names = set(self.context.keys()).difference(set(current_ids.keys()))\n for k in new_names:\n self.unused_names.add(k)", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self" ]
[ "0.6312834", "0.61630744", "0.6097616", "0.60587364", "0.58667314", "0.58018786", "0.5615465", "0.5607413", "0.5506247", "0.5476283", "0.54457474", "0.5373202", "0.53038436", "0.5298539", "0.52976507", "0.52976507", "0.52566886", "0.5256446", "0.5254769", "0.52320963", "0.5220585", "0.5213544", "0.5202233", "0.5200399", "0.51965004", "0.5149359", "0.5142639", "0.514196", "0.5141089", "0.51023155", "0.5100797", "0.50906366", "0.50906366", "0.50906366", "0.50904804", "0.50850946", "0.50807333", "0.50722444", "0.503844", "0.50265807", "0.50226665", "0.5019193", "0.50119776", "0.50095373", "0.500424", "0.499031", "0.49892378", "0.49871328", "0.49837404", "0.49744552", "0.49744552", "0.49685454", "0.49646753", "0.49629244", "0.4953083", "0.4952969", "0.49518397", "0.49444416", "0.4938706", "0.49348322", "0.4930179", "0.49102718", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177" ]
0.49726027
52
Allows the environment to be used in a withstatement context.
def __exit__(self, unused_exception_type, unused_exc_value, unused_traceback): self.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_with(self: Parser, node: doc.With) -> None:\n with contextlib.ExitStack() as stack:\n stack.enter_context(self.var_table.with_frame())\n for item in node.items:\n frame = self.eval_expr(item.context_expr)\n if not isinstance(frame, Frame):\n self.report_error(\n item.context_expr, \"Invalid context expression in the with-statement.\"\n )\n rhs = stack.enter_context(frame)\n if item.optional_vars is not None:\n self.eval_assign(target=item.optional_vars, source=rhs, bind_value=bind_with_value)\n self.visit_body(node.body)", "def test_with(self):\n source = \"\"\"\n with guard():\n a = 1\n \"\"\"\n target = \"\"\"\n with guard_new():\n a = 1\n \"\"\"\n self._check_compatibility(source, target)", "def _analyse_stmt_With(self, statement: ast.With, *, next: CFNode) -> CFNode:\n return self._analyse_with(statement, next=next)", "def _analyse_with(\n self,\n statement: Union[ast.AsyncWith, ast.With],\n *,\n next: CFNode,\n ) -> CFNode:\n with_node = self._ast_node(\n statement,\n enter=self._analyse_statements(statement.body, next=next),\n error=self._raise,\n )\n return with_node", "def run401_02():\n\n class Context:\n def __init__(self):\n print('__init__()')\n\n def __enter__(self):\n print('__enter__()')\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # print(exc_type, exc_val, exc_tb)\n print('__exit__()')\n\n with Context():\n print('do something')", "def test_execute_with_context(self):\n pass", "def __enter__(self):\n self.__within_context = True\n if not self.__initialized:\n self.__initialization__()\n return self", "def __enter__(self):\n # mark the beginning of a transaction\n self.execute(*self.sql.transaction())\n # and hand me back to the caller\n return self", "def in_context(self):\n pass", "def context_set(context):\n global __context\n if context == DefaultContext:\n context = context.copy()\n __context = context", "def session_context(self):\n session = self.Session()\n try:\n yield session\n session.commit()\n except: # noqa E722\n session.rollback()\n raise\n finally:\n session.close()", "def wrapwith(item, body, locref=None):\n locref = locref or body[0]\n wrapped = With(items=[withitem(context_expr=item, optional_vars=None)],\n body=body,\n lineno=locref.lineno, col_offset=locref.col_offset)\n return [wrapped]", "def __enter__(self):\r\n pass", "def context(self) -> CONTEXT:", "def __enter__(self):\n pass", "def __enter__(self):\n pass", "def with_(self):\n return \"With\"", "def session_scope(raise_exception=True):\n session = Session()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n if raise_exception:\n raise\n finally:\n session.close()", "def __enter__(self):\n\t\treturn self", "def env_wrap(self):\n\n old_env = dict(os.environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_env)", "def __enter__(self):\n return self.connection.__enter__", "def session_scope(raise_exception=True):\n session = cls.Session()\n try:\n yield session\n session.commit()\n except Exception:\n session.rollback()\n if raise_exception:\n raise\n finally:\n session.close()", "def session_scope(dsn):\n factory = sessionmaker(bind=Engines[dsn])\n session = factory()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def beginScope():", "def session_scope():\n session = Session(bind=engine)\n try:\n yield session\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def execute_block(self, stmt: List[loxStmtAST.Stmt], environment: loxenvironment.Environment) -> None:\n previous_env: loxenvironment.Environment = self.environment\n try:\n self.environment = environment\n for statement in stmt:\n self.execute(statement)\n finally:\n self.environment = previous_env", "def __enter__(self):\n raise NotImplementedError", "def __enter__(self) -> None:\n raise NotImplementedError()", "def visit_With(self, node):\n assert hasattr(node, 'items')\n if node.items:\n withitem = node.items[0]\n assert isinstance(withitem, gast.withitem)\n if isinstance(withitem.context_expr, gast.Call):\n func = withitem.context_expr.func\n if isinstance(func, gast.Name):\n func.id += '_new'\n return node", "def set(**args):\n return Context(args)", "def context_local(context=None):\n class manager(object):\n def __init__(self, ctx):\n \"\"\"\n :type ctx: Context\n \"\"\"\n self.context = ctx.copy()\n\n def __enter__(self):\n self.orig_context = context_get()\n context_set(self.context)\n return self.context\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n context_set(self.orig_context)\n\n if context is None:\n context = context_get()\n return manager(context)", "def session_scope():\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def session_scope():\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def session_scope():\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def B():\n set_env()", "def session_context(func):\r\n def wrapper(*args, **kwargs):\r\n self = args[0]\r\n with self._create_db_session() as db:\r\n self.db = db\r\n return func(*args, **kwargs)\r\n return wrapper", "def enterScope(self, name):", "def context(self) -> Any:\n ...", "def db_session_context(db_name):\n try:\n db = psycopg2.connect(database=db_name)\n yield db\n finally:\n db.close()", "def session_scope(Session):\n session = Session()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def session_scope(engine):\n DBSession = sessionmaker(bind=engine)\n Session = DBSession()\n try:\n yield Session\n Session.commit()\n except:\n Session.rollback()\n raise\n finally:\n Session.close()", "def __enter__(self):\n # We pull in some useful bits\n import inspect\n from byteplay import Code,haslocal,SetLineno, \\\n SETUP_WITH,WITH_CLEANUP,\\\n STORE_FAST,STORE_NAME,STORE_GLOBAL,\\\n POP_TOP,POP_BLOCK\n\n frame = inspect.currentframe(1)\n self.__code = code = Code.from_code(frame.f_code)\n self.__line = frame.f_lineno\n self.__globals = frame.f_globals\n\n # The SetLineno instructions get in the way here\n # since I want to find the actual instruction\n # by offset. I'll just strip them out\n instructions = code.code\n nolines = [x for x in instructions if x[0] != SetLineno]\n instructions[:] = nolines\n pc = __pc_to_byteplay_offset__(instructions).get(frame.f_lasti)\n assert pc is not None,\"Found invalid offset for with\"\n\n # Strip off everything through the SETUP_WITH\n assert instructions[pc][0] == SETUP_WITH,\"LittleTimer must be invoked from a with statement\"\n end_label = instructions[pc][1]\n del instructions[:pc+1]\n\n # which is followed by a STORE_NAME, STORE_LOCAL,\n # STORE_GLOBAL, or POP_TOP\n assert instructions[0][0] in (\\\n STORE_NAME,\n STORE_FAST,\n STORE_GLOBAL,\n POP_TOP\n ),\"Only simple assignment is supported, no more complex than LittleTimer() as T\"\n if instructions[0][0] == POP_TOP: self.__oneshot = True\n del instructions[0]\n\n # Find the closing WITH_CLEANUP\n targets = [offset for offset,(opcode,arg) in enumerate(instructions)\n if opcode is end_label]\n assert targets,\"This with-statement was not formed the way I expected\"\n pc = targets[0]+1\n assert instructions[pc][0] == WITH_CLEANUP,\"This with-statement was not formed the way I expected\"\n\n # Reverse until we find a POP_BLOCK\n while pc >= 0:\n opcode = instructions[pc][0]\n if opcode == POP_BLOCK:\n break\n pc -= 1\n del instructions[pc:]\n self.__bytecodes = instructions\n\n # We may have some local values that we need to set up\n locals = set([x[1] for x in instructions if x[0] in haslocal])\n self.__locals = dict( (sym,frame.f_locals.get(sym,None))\n for sym in locals )\n return self\n\n return self", "def __enter__(self):\r\n return self", "def __enter__(self):\n self._in_context_block = True\n # TODO: create local backup of file in case we can't upload and have to roll back\n return self", "def __enter__(self):\n self.manual_enter()\n return self", "def ensure_context(self):\n with driver.get_active_context():\n oldctx = self._get_attached_context()\n newctx = self.get_or_create_context(None)\n self._set_attached_context(newctx)\n try:\n yield\n finally:\n self._set_attached_context(oldctx)", "def _analyse_stmt_AsyncWith(\n self, statement: ast.AsyncWith, *, next: CFNode\n ) -> CFNode:\n return self._analyse_with(statement, next=next)", "def context():\n return dict()", "def _prepare_env(self, graph, **kwargs):\n raise NotImplementedError", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self) -> Seat:\n return self", "def session_scope():\n # Create session.\n session = sessionmaker()\n session.configure(bind=engine)\n session = session()\n\n try:\n yield session\n session.commit()\n\n # Rollback on any exception.\n except Exception as e:\n logging.info('Rollback: %s', e)\n session.rollback()\n raise\n # Close no matter what.\n finally:\n session.close()", "def session_scope():\n session = Session()\n try:\n yield session\n session.commit()\n except InvalidRequestError:\n session.rollback()\n raise\n finally:\n session.close()", "def make_shell_context():\n return {'db': db, 'User': User, 'Post': Post}", "def cooked_mode(self) -> ContextManager[None]:", "def __enter__(self):\n return type(self)()", "def setContext(self, context: Any, /) -> Any:\n ...", "def wrap_transaction(self):\n new_script = self.__class__()\n new_script.append(\n [BeginStatement()] + self.statements + [CommitStatement()])\n\n return new_script", "def env(**kwargs) -> ContextManager:\n\n def update(target, source):\n updated = {}\n for k, v in source.items():\n if v is None:\n try:\n updated[k] = target.pop(k)\n except KeyError:\n pass\n else:\n updated[k] = target.get(k, None)\n target[k] = v\n return updated\n\n previous_env = update(os.environ, kwargs)\n try:\n yield\n finally:\n update(os.environ, previous_env)", "def _make_context():\n return {'app': app, 'db': db}", "def exec_in_context(self,arg):\n ## contains elaborate scheme to detect what is specified by\n ## -s, and to warn about any replacement\n current_ids = dict([(k,id(v)) for k,v in self.context.items()])\n\n exec arg in self.context\n\n for k,v in self.context.items():\n if k in self.unused_names and id(v)!=current_ids[k]:\n self.warning(\"Replacing previous value of '%s' with '%s'\"%(k,v))\n\n new_names = set(self.context.keys()).difference(set(current_ids.keys()))\n for k in new_names:\n self.unused_names.add(k)", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self", "def __enter__(self):\n return self" ]
[ "0.6312834", "0.61630744", "0.6097616", "0.60587364", "0.58667314", "0.58018786", "0.5615465", "0.5607413", "0.5506247", "0.5476283", "0.54457474", "0.5373202", "0.53038436", "0.5298539", "0.52976507", "0.52976507", "0.52566886", "0.5256446", "0.5254769", "0.52320963", "0.5220585", "0.5213544", "0.5202233", "0.5200399", "0.51965004", "0.5149359", "0.5142639", "0.514196", "0.5141089", "0.51023155", "0.5100797", "0.50906366", "0.50906366", "0.50906366", "0.50904804", "0.50850946", "0.50807333", "0.50722444", "0.503844", "0.50265807", "0.50226665", "0.5019193", "0.50119776", "0.50095373", "0.500424", "0.499031", "0.49892378", "0.49871328", "0.49837404", "0.49744552", "0.49744552", "0.49726027", "0.49726027", "0.49685454", "0.49646753", "0.49629244", "0.4953083", "0.4952969", "0.49518397", "0.49444416", "0.4938706", "0.49348322", "0.4930179", "0.49102718", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177", "0.49053177" ]
0.0
-1
Returns the environment info returned on the last step.
def get_info(self) -> types.NestedArray: raise NotImplementedError('No support of get_info for this environment.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def environment(self) -> dict:\n return self._environment_info", "def get_environment(self):\r\n return self.mcas[0].get_environment()", "def environment(self) -> pulumi.Output['outputs.EnvironmentResponse']:\n return pulumi.get(self, \"environment\")", "def getEnvironment(self):\n pass", "def get_env_info(self):\n self.reset()\n env_info = {\n \"n_agents\": self.n_agents,\n \"api_type\": self.api_type,\n \"action_type\": self.action_type\n }\n # update the agent ids, will used in the weights map.\n # default work well with the sumo multi-agents\n agent_ids = list(self.get_init_state().keys()) if self.n_agents > 1 else [0]\n env_info.update({\"agent_ids\": agent_ids})\n\n return env_info", "def get_environment(self):\n return self._environment", "def environment(self):\n return self._get_field(\"environment\")", "def env(self): # type: () -> t.List[str]\n return self.config['Env']", "def environment(self):\n return self._environ", "def environment(self):\n return self._environment", "def env(self):\n return self._env", "def get_environment() -> dict:\n go_path = subprocess.check_output([\"go\", \"env\", \"GOPATH\"]).decode().strip()\n setup_script_path = f\"{go_path}/src/github.com/harmony-one/harmony/scripts/setup_bls_build_flags.sh\"\n response = subprocess.check_output([\"bash\", setup_script_path, \"-v\"], timeout=5)\n environment = json.loads(response)\n environment[\"HOME\"] = os.environ.get(\"HOME\")\n return environment", "def get_current_environment():\n # Search for the environment variable set by the hutch python setup\n env = os.getenv('CONDA_ENVNAME')\n # Otherwise look for built-in Conda environment variables\n if not env:\n env = os.getenv('CONDA_DEFAULT_ENV')\n # Check the top level PYTHONPATH to see if we have packages installed in\n # development mode\n dev = os.getenv('PYTHONPATH')\n if dev:\n try:\n dev_pkgs = os.listdir(dev)\n except FileNotFoundError:\n logger.debug(\"No dev folder found\")\n dev_pkgs = list()\n else:\n dev_pkgs = list()\n return env, dev_pkgs", "def env_info(env, brain):\n\n # reset the environment\n env_info = env.reset(train_mode=True)[brain.brain_name]\n\n # number of agents in the environment\n print('Number of agents:', len(env_info.agents))\n\n # number of actions\n action_size = brain.vector_action_space_size\n print('Number of actions:', action_size)\n\n # examine the state space \n state = env_info.vector_observations[0]\n print('States look like:', state)\n state_size = len(state)\n print('States have length:', state_size)\n return state_size, action_size", "def environment(self) -> Optional[pulumi.Input['EnvironmentArgs']]:\n return pulumi.get(self, \"environment\")", "def get_server_environment(self):\n return self.client.getServerEnvironment(self.creds, self.transaction, self.environment)", "def environ(self):\r\n return self._environ", "def env(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"env\")", "def get_current_environment(self):\n for env in self.indicators:\n if self._is_env_indicator_in_url(self.indicators[env]):\n return env\n\n return Environment.PRODUCTION", "def test_get_environment_string(self):\n pass", "def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")", "def get_environment_map(self):\n return {'oxmeta': self.env}", "def env(self) -> Optional[Sequence['_core.v1.outputs.EnvVar']]:\n return pulumi.get(self, \"env\")", "def env(self) -> Optional[Sequence['_core.v1.outputs.EnvVarPatch']]:\n return pulumi.get(self, \"env\")", "def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EnvVarArgs']]]]:\n return pulumi.get(self, \"env\")", "def last_env_response(self) -> Union[List[EnvResponse], EnvResponse]:\n return squeeze_list(self._last_env_response)", "def get_os_env():\n env = os.environ\n# print(\"env \\n\" , env)\n return env", "def env(self):\n return spack.schema.environment.parse(self.conf.get(\"environment\", {}))", "def current_config():\n if os.environ[\"ENVIRONMENT\"] == \"production\":\n return Production()\n elif os.environ[\"ENVIRONMENT\"] == \"staging\":\n return Staging()\n elif os.environ[\"ENVIRONMENT\"] == \"testing\":\n return Testing()\n elif os.environ[\"ENVIRONMENT\"] == \"development\":\n return Development()\n else:\n raise KeyError(f\"Unknown environment '{os.environ['ENVIRONMENT']}'\")", "def get_current_environment(self, note=None):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tself.handle_note(note)\n\t\tres = self.get_current_shutit_pexpect_session_environment().environment_id\n\t\tself.handle_note_after(note)\n\t\treturn res", "def get_environ():\n # Manually set environment.\n if FLAGS.env is not None:\n return BasicEnvironment.from_json(open(FLAGS.env, \"r\").read())\n\n if FLAGS.data_folder is None:\n data_folder = FLAGS.dataset\n else:\n data_folder = FLAGS.data_folder\n exp_id = \"exp_\" + FLAGS.dataset + \"_\" + FLAGS.model\n if FLAGS.id is None:\n exp_id = gen_id(exp_id)\n else:\n exp_id = FLAGS.id\n return BasicEnvironment(\n device=get_device(FLAGS.gpu),\n dataset=FLAGS.dataset,\n data_folder=data_folder,\n logs_folder=FLAGS.logs,\n save_folder=FLAGS.results,\n run_validation=FLAGS.validation,\n verbose=FLAGS.verbose,\n exp_id=exp_id,\n description=FLAGS.description,\n valid_num_fold=FLAGS.valid_num_fold,\n valid_fold_id=FLAGS.valid_fold_id)", "def _get_env(self):\n env = {}\n for k, v in os.environ.items():\n k = k.decode() if isinstance(k, bytes) else k\n v = v.decode() if isinstance(v, bytes) else v\n env[k] = v\n return list(env.items())", "def get_env_vars(self):\n env_vars_api = '/injectedEnvVars/api/json'\n env_vars_json = self._api_request(self.url + env_vars_api)\n try:\n env_vars_json = json.loads(env_vars_json)\n return env_vars_json['envMap']\n except JSONDecodeError:\n return None", "def step_env(self):\n raise NotImplementedError\n # Not needed for this homework", "def environment_variables(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"environment_variables\")", "def environ(self):\n return dict(page='environ', environment=request.environ)", "def environ(self):\n return dict(page='environ', environment=request.environ)", "def getEnvironmentVars(self):\n my_env = os.environ.copy()\n my_env[\"MODE\"] = self.model.mode\n my_env[\"ALGORITHM\"] = self.model.algorithm\n my_env[\"INPUT_FILE\"] = self.model.inputFile\n my_env[\"TARGET_FILE\"] = self.model.targetFile\n my_env[\"DEVICE\"] = self.model.device\n my_env[\"SIZE\"] = str(self.model.size)\n my_env[\"ROLE\"] = self.model.role\n my_env[\"WAIT_SIZE\"] = str(self.model.waitSize)\n my_env[\"STEP_SIZE\"] = str(self.model.stepSize)\n my_env[\"TUI_CONNECTION\"] = str(True)\n return my_env", "def environment_variables(\n self,\n ) -> typing.Optional[\n typing.Mapping[str, aws_cdk.aws_codebuild.BuildEnvironmentVariable]\n ]:\n return self._values.get(\"environment_variables\")", "def environment_variables(\n self,\n ) -> typing.Optional[\n typing.Mapping[str, aws_cdk.aws_codebuild.BuildEnvironmentVariable]\n ]:\n return self._values.get(\"environment_variables\")", "def environment_variables(\n self,\n ) -> typing.Optional[\n typing.Mapping[str, aws_cdk.aws_codebuild.BuildEnvironmentVariable]\n ]:\n return self._values.get(\"environment_variables\")", "def environment_variables(\n self,\n ) -> typing.Optional[\n typing.Mapping[str, aws_cdk.aws_codebuild.BuildEnvironmentVariable]\n ]:\n return self._values.get(\"environment_variables\")", "def print_environment():\n import sys\n version = {}\n for pkg in 'moldesign IPython ipywidgets jupyter matplotlib numpy docker pyccc distutils' \\\n 'nbmolviz jupyter_client jupyter_core pint Bio openbabel simtk pyscf pip setuptools'\\\n .split():\n try:\n module = __import__(pkg)\n except ImportError as e:\n version[pkg] = str(e)\n else:\n try:\n version[pkg] = module.__version__\n except AttributeError as e:\n version[pkg] = str(e)\n env = {'platform': sys.platform,\n 'version': sys.version,\n 'prefix': sys.prefix}\n\n try:\n import platform\n env['machine'] = platform.machine()\n env['linux'] = platform.linux_distribution()\n env['mac'] = platform.mac_ver()\n env['windows'] = platform.win32_ver()\n env['impl'] = platform.python_implementation()\n env['arch'] = platform.architecture()\n env['system'] = platform.system()\n env['python_build'] = platform.python_build()\n env['platform_version'] = platform.version()\n\n except Exception as e:\n env['platform_exception'] = str(e)\n\n print(json.dumps({'env': env,\n 'versions': version}))", "def get_all_environments():\n return ENVIRONMENTS", "def get_environ_dict():\n return {\n 'os.environ': _get_os_environ_dict((\n 'AUTH_DOMAIN',\n 'CURRENT_CONFIGURATION_VERSION',\n 'CURRENT_MODULE_ID',\n 'CURRENT_VERSION_ID',\n 'DEFAULT_VERSION_HOSTNAME',\n 'FEDERATED_IDENTITY',\n 'FEDERATED_PROVIDER',\n 'GAE_LOCAL_VM_RUNTIME',\n 'HTTP_HOST',\n 'HTTP_PROXY',\n 'HTTP_X_APPENGINE_HTTPS',\n 'HTTP_X_APPENGINE_QUEUENAME',\n 'HTTP_X_ORIGINAL_HOST',\n 'HTTP_X_ORIGINAL_SCHEME',\n 'SERVER_NAME',\n 'SERVER_PORT',\n 'SERVER_SOFTWARE',\n 'USER_IS_ADMIN',\n )),\n 'app_identity': _get_app_identity_dict((\n 'get_service_account_name',\n 'get_application_id',\n 'get_default_version_hostname',\n )),\n 'modules': _get_modules_dict((\n 'get_current_module_name',\n 'get_current_version_name',\n 'get_current_instance_id',\n 'get_modules',\n 'get_versions',\n 'get_default_version',\n 'get_hostname',\n )),\n 'namespace_manager': _get_namespace_manager_dict((\n 'get_namespace',\n 'google_apps_namespace',\n )),\n }", "def get_execution_envs(self):\n return self.execution_envs", "def fetch_environ(environment):\n url = current_app.config['ENVIRONMENTS'][environment]\n response = requests.get(url)\n if response.status_code is not 200:\n raise FailedDependency(\"Github returned status code %s\" % response.status_code)\n return {environment: json.loads(response.text)}", "def environment_variables(self) -> Optional[pulumi.Input['GatewayPropertiesEnvironmentVariablesArgs']]:\n return pulumi.get(self, \"environment_variables\")", "def _get_environment():\n namespace = current_app.config.get('POD_NAMESPACE').lower()\n if namespace.endswith('dev'):\n return 'DEV'\n if namespace.endswith('test'):\n return 'TEST'\n if namespace.endswith('tools'):\n return 'SANDBOX'\n return ''", "def envs():\n\n # update and grab the envs from the metadata keys\n metadata = _init()\n return list(metadata.keys())", "def environment_properties(self) -> Optional[pulumi.Input['ApplicationApplicationConfigurationEnvironmentPropertiesArgs']]:\n return pulumi.get(self, \"environment_properties\")", "def get_details():\n if not hasattr(env, \"site_name\"):\n env.site_name = prompt(\"Enter site domain name:\")\n env.site_is_secure = confirm(\"Do you need SSL? (Yes/No)\", default=False)\n env.app_server = prompt(\"Enter app server you wish to use (apache/uwsgi/gunicorn):\")\n if env.site_is_secure:\n env.ip_address = prompt(\"Enter server IP address:\")\n else:\n env.ip_address = \"0.0.0.0\"\n\n # Find out project name\n project_name = env.site_name.split('.')\n try:\n if project_name[1] == 'com':\n # Sample case - abc.com\n env.project_name = project_name[0]\n else:\n # Sample case - shop.abc.com\n env.project_name = project_name[1]\n except IndexError:\n env.project_name = env.site_name", "def get_env(self, loop):\n env = getattr(self.app, 'env', None)\n if not env:\n env = self.environment(self.app, loop, self.host, self.port)\n self.app.env = env\n return env", "def ENVIRONMENT(self):\n return self._get_environment()", "def dumpenv(self):\n\n print('-------------------------------')\n pprint.pprint(dict(os.environ))\n print('-------------------------------')", "def _create_extra_environment(self):\n return {}", "def snapshot():\n return Env(os.environ)", "def _generate_environment(self):\n envvars = {}\n for key in self.envvars:\n try:\n envvars[key] = os.environ[key]\n except KeyError:\n continue\n\n # Warn the user that we cannot support secrets\n if envvars:\n logger.warning(\"This API does not support environment secrets.\")\n return envvars", "def current(self):\n\n config = self.alembic_config()\n script = ScriptDirectory.from_config(config)\n\n revision = 'base'\n\n def display_version(rev, context):\n for rev in script.get_all_current(rev):\n nonlocal revision\n revision = rev.cmd_format(False)\n\n return []\n\n with EnvironmentContext(config, script, fn=display_version):\n script.run_env()\n\n return revision", "def environment_muse(self) -> dict:\n return self._environment_muse", "def environments(self):\n envs = self.config[\"tox\"][\"envlist\"]\n #result = re.split(\"[^a-zA-Z0-9]\", envs)\n result = re.split(r'\\n| ,|,', envs)\n #print ([string for string in result if string != \"\"])\n result = (([string.strip() for string in result if string != \"\"]))\n print(list(dict.fromkeys(result)))\n return ((list(dict.fromkeys(result))))", "def _get_environment(self):\n if self._cache.get(\"_environment\") is None:\n name = self.get(\"environmentname\", \"default\")\n if name:\n db = self.session\n try:\n env = db.query(models.Environment).filter(models.Environment.name==name).one()\n except config.NoResultFound as err:\n raise config.ConfigError(\"Bad environmentname %r: %s\" % (name, err))\n username = self.get(\"username\") # username should be set by test runner\n if username:\n if env.is_owned():\n if env.owner.username != username:\n raise config.ConfigError(\"Environment is currently owned by: %s\" % (env.owner,))\n env.set_owner_by_username(db, username)\n env = EnvironmentRuntime(db, env, self.logfile)\n self._cache[\"_environment\"] = env\n else:\n raise config.ConfigError, \"Bad environmentname %r.\" % (name,)\n return self._cache[\"_environment\"]", "def environment(self) -> \"Environment\":\n return self._environment", "def augmented_environment(self):\n env = os.environ.copy()\n env.update(self.environ)\n return env", "def collect_env():\n env_info = mmengine_collect_env()\n\n # MMEngine does not add the hipcc compiler information when collecting\n # environment information, so it is added here. When MMEngine v0.3.0 is\n # released, the code here can be removed.\n cuda_available = torch.cuda.is_available()\n if cuda_available and env_info.get('NVCC') == 'Not Available':\n CUDA_HOME = env_info['CUDA_HOME']\n if CUDA_HOME is not None and osp.isdir(CUDA_HOME):\n if CUDA_HOME == '/opt/rocm':\n try:\n nvcc = osp.join(CUDA_HOME, 'hip/bin/hipcc')\n nvcc = subprocess.check_output(\n f'\"{nvcc}\" --version', shell=True)\n nvcc = nvcc.decode('utf-8').strip()\n release = nvcc.rfind('HIP version:')\n build = nvcc.rfind('')\n nvcc = nvcc[release:build].strip()\n except subprocess.SubprocessError:\n nvcc = 'Not Available'\n else:\n try:\n nvcc = osp.join(CUDA_HOME, 'bin/nvcc')\n nvcc = subprocess.check_output(f'\"{nvcc}\" -V', shell=True)\n nvcc = nvcc.decode('utf-8').strip()\n release = nvcc.rfind('Cuda compilation tools')\n build = nvcc.rfind('Build ')\n nvcc = nvcc[release:build].strip()\n except subprocess.SubprocessError:\n nvcc = 'Not Available'\n env_info['NVCC'] = nvcc\n\n env_info['MMCV'] = mmcv.__version__\n\n try:\n from mmcv.ops import get_compiler_version, get_compiling_cuda_version\n except ModuleNotFoundError:\n env_info['MMCV Compiler'] = 'n/a'\n env_info['MMCV CUDA Compiler'] = 'n/a'\n else:\n env_info['MMCV Compiler'] = get_compiler_version()\n env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()\n\n return env_info", "def total_env_steps(self):\n return self._stats.total_env_steps", "def getEnvironment(self, environment = {}):\r\n e = dict(environment, **self.environment)\r\n e.update(locals(), **globals())\r\n for x in dir(self):\r\n if not x.startswith('_'):\r\n e[x] = getattr(self, x)\r\n return e", "def environment(self, name):\n return self.environments[name]", "def get_env(self, *args):\n m = module(*args)\n return m.env", "def read_environ():\n\n environ = {}\n environ['BASEDIR'] = os.environ['TS_BASEDIR']\n environ['CONFIG_NL'] = os.environ['TS_CONFIG_NL']\n environ['NL_TS_SWITCH'] = os.environ['TS_NL_TS_SWITCH']\n environ['DT_FILE'] = os.environ['TS_DT_FILE']\n environ['REFOUTDIR'] = os.environ['TS_REFOUTDIR']\n environ['VERBOSE'] = os.environ['TS_VERBOSE']\n environ['RUNDIR'] = os.environ['TS_RUNDIR']\n environ['LOGFILE'] = os.environ['TS_LOGFILE']\n environ['NAMELISTDIR'] = os.environ['TS_NAMELISTDIR']\n environ['TOLERANCE'] = os.environ['TS_TOLERANCE']\n environ['FORCEMATCH'] = os.environ['TS_FORCEMATCH']\n environ['TUNING_ITERATIONS'] = os.environ['TS_TUNING_ITERATIONS']\n environ['TUNE_THRESHOLDS'] = os.environ['TS_TUNE_THRESHOLDS']\n environ['RESET_THRESHOLDS'] = os.environ['TS_RESET_THRESHOLDS']\n environ['ICON'] = os.environ['TS_ICON']\n environ['YUFILE'] = os.environ['TS_YUFILE']\n return environ", "def GetCurrent():\n global ENV\n return ENV[threading.current_thread().ident]", "def environ(self):\n return dict(environment=request.environ)", "def environ(self):\n return dict(environment=request.environ)", "def environ(self):\n return dict(environment=request.environ)", "def get_envdata(cls):\n\t\tif cls.__envdata is EnvData:\n\t\t\tcls.__envdata = cls.set_envdata()\n\t\treturn cls.__envdata", "def get(self):\n \n # read the env variables and store them\n for var in self._vars.itervalues():\n var.get()", "def get_state(self):\n return self._env.get_state()", "def env_tokens(self):\r\n\r\n # Find the env JSON file\r\n if self.SERVICE_VARIANT:\r\n env_path = self.REPO_ROOT.parent / \"{service}.env.json\".format(service=self.SERVICE_VARIANT)\r\n else:\r\n env_path = path(\"env.json\").abspath()\r\n\r\n # If the file does not exist, here or one level up,\r\n # issue a warning and return an empty dict\r\n if not env_path.isfile():\r\n env_path = env_path.parent.parent / env_path.basename()\r\n if not env_path.isfile():\r\n print(\r\n \"Warning: could not find environment JSON file \"\r\n \"at '{path}'\".format(path=env_path),\r\n file=sys.stderr,\r\n )\r\n return dict()\r\n\r\n # Otherwise, load the file as JSON and return the resulting dict\r\n try:\r\n with open(env_path) as env_file:\r\n return json.load(env_file)\r\n\r\n except ValueError:\r\n print(\r\n \"Error: Could not parse JSON \"\r\n \"in {path}\".format(path=env_path),\r\n file=sys.stderr,\r\n )\r\n sys.exit(1)", "def print_env_vars():\n print(\"Current process environment variables:\")\n for k, v in os.environ.items():\n print('{0}={1}'.format(k, v))", "def env(self) -> Optional[Env]:\n raise NotImplementedError", "def show_env():\n envs = [\"PATH\", \"ORACLE_HOME\", \"TNS_ADMIN\", \"NLS_LANG\"]\n result = {}\n for env in envs:\n if env in os.environ:\n result[env] = os.environ[env]\n return result", "def GetEnvironment(self):\n environ = super(ServiceHandlerTest, self).GetEnvironment()\n if self.remote_host:\n environ['REMOTE_HOST'] = self.remote_host\n if self.server_host:\n environ['SERVER_HOST'] = self.server_host\n return environ", "def get_env_copy(self):\n if self._env:\n return cloudpickle.loads(cloudpickle.dumps(self._env))\n else:\n return None", "def _get_environment(cls):\n return cls.__name__.lower()", "def get_environment():\n return GenericGymEnv(id=\"real-time-gym-v1\", gym_kwargs={\"config\": CONFIG_DICT})", "def get_env():\n from platform import python_version\n versions = {}\n versions['iris'] = iris.__version__\n versions['matplotlib'] = matplotlib.__version__\n versions['numpy'] = np.__version__\n versions['python'] = python_version()\n return versions", "def environment(self) -> typing.Optional[aws_cdk.aws_codebuild.BuildEnvironment]:\n return self._values.get(\"environment\")", "def environment(self) -> typing.Optional[aws_cdk.aws_codebuild.BuildEnvironment]:\n return self._values.get(\"environment\")", "def environment(self) -> typing.Optional[aws_cdk.aws_codebuild.BuildEnvironment]:\n return self._values.get(\"environment\")", "def environment(self) -> typing.Optional[aws_cdk.aws_codebuild.BuildEnvironment]:\n return self._values.get(\"environment\")", "def read_environment(self):\n # Setup credentials\n if os.getenv(\"DO_API_TOKEN\"):\n self.api_token = os.getenv(\"DO_API_TOKEN\")\n if os.getenv(\"DO_API_KEY\"):\n self.api_token = os.getenv(\"DO_API_KEY\")", "def env_spec(self):\n return self._env_spec", "def env_spec(self):\n return self._env_spec", "def env_spec(self):\n return self._env_spec", "def get_config_info() -> Dict[str, Any]:\n config_info = dict()\n base_tmp_dir = os.environ.get('BASE_TMP_DIR', None)\n if not base_tmp_dir:\n raise EnvironmentError('BASE_TMP_DIR is not set in environment variables, please set it '\n 'up!')\n else:\n config_info['BASE_TMP_DIR'] = base_tmp_dir\n\n return config_info", "def env_dict(self): # type: () -> t.Dict[str, str]\n return dict((item[0], item[1]) for item in [e.split('=', 1) for e in self.env])", "def environments(self):\n # get environment login parameters\n _environments = self._environments\n if _environments is None:\n # take from cookie\n _environments = self.cookie_io.get('login_parameters')\n # if cookie is None - init with defaults\n if _environments is None:\n # default\n _environments = DEFAULT_ENVIRONMENTS\n # save to local variable\n self.environments = _environments\n else:\n # save from cookie to ram\n self._environments = _environments\n return _environments", "def environment_created(self):\n\n pass", "def env_vars(self):\n if self._m.spec and self._m.spec.container:\n return k8s_object.ListAsDictionaryWrapper(\n self._m.spec.container.env, self._messages.EnvVar)", "def collect_env():\n env_info = {}\n env_info['sys.platform'] = sys.platform\n env_info['Python'] = sys.version.replace('\\n', '')\n\n cuda_available = torch.cuda.is_available()\n env_info['CUDA available'] = cuda_available\n\n if cuda_available:\n devices = defaultdict(list)\n for k in range(torch.cuda.device_count()):\n devices[torch.cuda.get_device_name(k)].append(str(k))\n for name, device_ids in devices.items():\n env_info['GPU ' + ','.join(device_ids)] = name\n\n from mmcv.utils.parrots_wrapper import _get_cuda_home\n CUDA_HOME = _get_cuda_home()\n env_info['CUDA_HOME'] = CUDA_HOME\n\n if CUDA_HOME is not None and osp.isdir(CUDA_HOME):\n try:\n nvcc = osp.join(CUDA_HOME, 'bin/nvcc')\n nvcc = subprocess.check_output(f'\"{nvcc}\" -V', shell=True)\n nvcc = nvcc.decode('utf-8').strip()\n release = nvcc.rfind('Cuda compilation tools')\n build = nvcc.rfind('Build ')\n nvcc = nvcc[release:build].strip()\n except subprocess.SubprocessError:\n nvcc = 'Not Available'\n env_info['NVCC'] = nvcc\n\n try:\n # Check C++ Compiler.\n # For Unix-like, sysconfig has 'CC' variable like 'gcc -pthread ...',\n # indicating the compiler used, we use this to get the compiler name\n import sysconfig\n cc = sysconfig.get_config_var('CC')\n if cc:\n cc = osp.basename(cc.split()[0])\n cc_info = subprocess.check_output(f'{cc} --version', shell=True)\n env_info['GCC'] = cc_info.decode('utf-8').partition(\n '\\n')[0].strip()\n else:\n # on Windows, cl.exe is not in PATH. We need to find the path.\n # distutils.ccompiler.new_compiler() returns a msvccompiler\n # object and after initialization, path to cl.exe is found.\n import locale\n import os\n from distutils.ccompiler import new_compiler\n ccompiler = new_compiler()\n ccompiler.initialize()\n cc = subprocess.check_output(\n f'{ccompiler.cc}', stderr=subprocess.STDOUT, shell=True)\n encoding = os.device_encoding(\n sys.stdout.fileno()) or locale.getpreferredencoding()\n env_info['MSVC'] = cc.decode(encoding).partition('\\n')[0].strip()\n env_info['GCC'] = 'n/a'\n except subprocess.CalledProcessError:\n env_info['GCC'] = 'n/a'\n\n env_info['PyTorch'] = torch.__version__\n env_info['PyTorch compiling details'] = get_build_config()\n\n try:\n import torchvision\n env_info['TorchVision'] = torchvision.__version__\n except ModuleNotFoundError:\n pass\n\n env_info['OpenCV'] = cv2.__version__\n\n env_info['MMCV'] = mmcv.__version__\n\n try:\n from mmcv.ops import get_compiler_version, get_compiling_cuda_version\n except ModuleNotFoundError:\n env_info['MMCV Compiler'] = 'n/a'\n env_info['MMCV CUDA Compiler'] = 'n/a'\n else:\n env_info['MMCV Compiler'] = get_compiler_version()\n env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()\n\n return env_info" ]
[ "0.72315943", "0.7073374", "0.6950367", "0.68680507", "0.67611235", "0.6643641", "0.66212034", "0.65775234", "0.65664876", "0.65062505", "0.64626306", "0.6376371", "0.6369222", "0.6312315", "0.6308322", "0.6302491", "0.62798655", "0.6228703", "0.62262505", "0.62212014", "0.618388", "0.6171716", "0.6166037", "0.6165821", "0.61110765", "0.6086622", "0.6070514", "0.605451", "0.60301965", "0.6026193", "0.6023852", "0.6013351", "0.60078555", "0.59852695", "0.5984478", "0.5983139", "0.5983139", "0.5973449", "0.59680986", "0.59680986", "0.59680986", "0.59680986", "0.5956625", "0.5928279", "0.5926319", "0.59191114", "0.59103346", "0.59026104", "0.58993196", "0.5889294", "0.58834285", "0.5876446", "0.5867135", "0.58638865", "0.58478993", "0.58472866", "0.5827847", "0.5827653", "0.57877946", "0.5779459", "0.57743335", "0.57704914", "0.5762549", "0.57620376", "0.57568085", "0.5755472", "0.57538927", "0.57487905", "0.5748752", "0.5743438", "0.5732599", "0.5723498", "0.5723498", "0.5723498", "0.5678931", "0.5678549", "0.5667941", "0.56659955", "0.56612873", "0.56599975", "0.5655634", "0.56391746", "0.5611733", "0.5605993", "0.5605928", "0.55943143", "0.55939925", "0.55939925", "0.55939925", "0.55939925", "0.55936015", "0.5583637", "0.5583637", "0.5583637", "0.5577953", "0.5575783", "0.5566066", "0.55461794", "0.5536218", "0.55184317" ]
0.5656913
80
Returns the `state` of the environment. The `state` contains everything required to restore the environment to the current configuration. This can contain e.g. The current time_step. The number of steps taken in the environment (for finite horizon MDPs). Hidden state (for POMDPs). Callers should not assume anything about the contents or format of the returned `state`. It should be treated as a token that can be passed back to `set_state()` later. Note that the returned `state` handle should not be modified by the environment later on, and ensuring this (e.g. using copy.deepcopy) is the responsibility of the environment.
def get_state(self) -> Any: raise NotImplementedError( 'This environment has not implemented `get_state()`.' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_state(self, state):\n return state", "def get_state(self):\n return self._env.get_state()", "def get_state(self) -> FrameState:\n assert self.__state is not None\n return self.__state", "def get_state(self):\n return self.env.sim.get_state()", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def get_state(self):\n return self._state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def get_state(self):\n return copy.deepcopy(self._state)", "def get_state(self):\n return self.state", "def get_state(self):\n return self.state", "def get_state(self) -> numpy.ndarray:\n if self.clone_seeds:\n return self.gym_env.unwrapped.clone_full_state()\n else:\n return self.gym_env.unwrapped.clone_state()", "def get_state(self, state, is_episode_done):\n self.state = state\n self.is_episode_done = is_episode_done\n # Signal the RL env that it gets the current state\n self.state_got.set()\n print(\"RL env: Get state\", state)\n self.rl_agent.is_rl_tuned_para_got = False", "def state(self) :\n\t\ttry :\n\t\t\treturn self._state\n\t\texcept Exception as e:\n\t\t\traise e", "def get_state(self):\n return self.controller.get_state()", "def return_state(self):\n\t\treturn self.state", "def getState(self):\n return self._state", "def getState(self):\n return self._state", "def get_state(self) -> ApplicationState:\n return self.state", "def GetState(self):\r\n \r\n return self.state", "def __getstate__(self) -> dict:\n return self.__handle__.get_state()", "def get_state(self):\n if self.state:\n return self.state\n\n from timon.state import TMonState\n self.state = state = TMonState(self.cfg['statefile'], config=self)\n return state", "def state(self):\n return get_state(self.context)", "def getState(self) -> None:\n return self.state", "def getState():\n # TODO: this isn't nearly as meaningful as it used to be", "def state(self):\n return self._state.copy()", "def parsed_state(self):\n if not hasattr(self, '_state'):\n if not self.terraform_state:\n return {}\n self._state = json.loads(self.terraform_state)\n return self._state" ]
[ "0.6438888", "0.63937896", "0.63231564", "0.6284206", "0.6247405", "0.6247405", "0.6247405", "0.6247405", "0.6247405", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61721283", "0.61139774", "0.6080872", "0.6080872", "0.60472685", "0.60037977", "0.5984524", "0.5949563", "0.58931243", "0.58746195", "0.58746195", "0.5872254", "0.58606255", "0.58484745", "0.5843699", "0.5837003", "0.5770978", "0.5767528", "0.57628906", "0.5762463" ]
0.60081613
85
Restores the environment to a given `state`. See definition of `state` in the documentation for get_state().
def set_state(self, state: Any) -> None: raise NotImplementedError( 'This environment has not implemented `set_state()`.' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restore_state(self, state: ale_py.ALEState):\n self.ale.restoreState(state)", "def restore_state(self, state):\n state_ref = self.ale.decodeState(state)\n self.ale.restoreState(state_ref)\n self.ale.deleteState(state_ref)", "def restore_full_state(self, state):\n state_ref = self.ale.decodeState(state)\n self.ale.restoreSystemState(state_ref)\n self.ale.deleteState(state_ref)", "def set_state(self, state):\n self._env.set_state(state)", "def restoreState(self, state):\n self.setVoltage(state['voltage'])\n if state['output'] == True:\n self.turnOn()\n else:\n self.turnOff()", "def _replace_state(self, state):\n logging.info('replace state')\n self._state = state", "def set_state(self, state: numpy.ndarray):\n state = state.astype(numpy.uint8)\n if self.clone_seeds:\n self.gym_env.unwrapped.restore_full_state(state)\n else:\n self.gym_env.unwrapped.restore_state(state)\n return state", "def reset_state(self, state):\n return self.manager.reset_state(self, state)", "def reset_state(self, state):\n return self.manager.reset_state(self, state)", "def reset_state(self, name=None):\n if name is None:\n self.solver.reset_solver(self.initial_state.copy())\n else:\n # TODO: Raise a nice ProtocolError if state not defined\n self.solver.reset_solver(self.saved_states[name].copy())", "def set_state(self, state=0):\r\n return self._arm.set_state(state=state)", "def __change_state(self, state):\n self.state = state", "def setstate(self, state):\r\n self.preferred_optimizer = state.pop()\r\n self.sampling_runs = state.pop()\r\n self.optimization_runs = state.pop()\r\n self.priors = state.pop()\r\n Parameterized.setstate(self, state)", "def set_state(self,state):\n self.__state = state", "def _reset_state(self):\n self.state = self.start_state.copy()", "def set_state(self, state):\n return self.update(current_state=state)", "def set_state(self, state):\n _modeller.mod_state_optimizer_state_set(self._modpt, self.__edat.modpt,\n state)", "def state(self, state: _State) -> None:\n prev_data = self._state.data\n self._state = state.with_data(prev_data)", "def restore_full_state(self, state: ale_py.ALEState):\n logger.warn(\n \"restore_full_state() is deprecated and will be removed in a future release of `ale-py`. \"\n \"Please use `restore_state(state)` which will restore the state regardless of being a full or partial state. \"\n )\n self.ale.restoreSystemState(state)", "def reset_to(self, state):\n should_ret = False\n if \"model\" in state:\n self.reset()\n xml = postprocess_model_xml(state[\"model\"])\n self.env.reset_from_xml_string(xml)\n self.env.sim.reset()\n if not self._is_v1:\n # hide teleop visualization after restoring from model\n self.env.sim.model.site_rgba[self.env.eef_site_id] = np.array([0., 0., 0., 0.])\n self.env.sim.model.site_rgba[self.env.eef_cylinder_id] = np.array([0., 0., 0., 0.])\n if \"states\" in state:\n self.env.sim.set_state_from_flattened(state[\"states\"])\n self.env.sim.forward()\n should_ret = True\n\n if \"goal\" in state:\n self.set_goal(**state[\"goal\"])\n if should_ret:\n # only return obs if we've done a forward call - otherwise the observations will be garbage\n return self.get_observation()\n return None", "def __setstate__(self, state) -> None:\n # TODO (sven): Validate that our config and the config in state are compatible.\n # For example, the model architectures may differ.\n # Also, what should the behavior be if e.g. some training parameter\n # (e.g. lr) changed?\n\n if hasattr(self, \"workers\") and \"worker\" in state:\n self.workers.local_worker().set_state(state[\"worker\"])\n remote_state = ray.put(state[\"worker\"])\n self.workers.foreach_worker(\n lambda w: w.set_state(ray.get(remote_state)),\n local_worker=False,\n healthy_only=False,\n )\n if self.evaluation_workers:\n # If evaluation workers are used, also restore the policies\n # there in case they are used for evaluation purpose.\n self.evaluation_workers.foreach_worker(\n lambda w: w.set_state(ray.get(remote_state)),\n healthy_only=False,\n )\n # If necessary, restore replay data as well.\n if self.local_replay_buffer is not None:\n # TODO: Experimental functionality: Restore contents of replay\n # buffer from checkpoint, only if user has configured this.\n if self.config.get(\"store_buffer_in_checkpoints\"):\n if \"local_replay_buffer\" in state:\n self.local_replay_buffer.set_state(state[\"local_replay_buffer\"])\n else:\n logger.warning(\n \"`store_buffer_in_checkpoints` is True, but no replay \"\n \"data found in state!\"\n )\n elif \"local_replay_buffer\" in state and log_once(\n \"no_store_buffer_in_checkpoints_but_data_found\"\n ):\n logger.warning(\n \"`store_buffer_in_checkpoints` is False, but some replay \"\n \"data found in state!\"\n )\n\n if self.train_exec_impl is not None:\n self.train_exec_impl.shared_metrics.get().restore(state[\"train_exec_impl\"])\n elif \"counters\" in state:\n self._counters = state[\"counters\"]\n\n if \"training_iteration\" in state:\n self._iteration = state[\"training_iteration\"]", "def _set_state(self, state):\n #print(\"** set state from %d to %d\" % (self.state, state))\n self.state = state", "def set_state(self, state):\n self.state = state", "def restore_state(self, ckpt):\n raise NotImplemented()", "def change_state(self,state):\n if self.__currentState:\n self.__currentState.stop()\n \n try:\n idler=self[state]\n except KeyError:\n raise \"%s is not a state of %s\" % (state,self)\n \n self.__currentState=idler()\n self.__currentState.idle()\n self.__currentState=None", "def restore(self, memento):\n self.state = memento.state", "def set_state(self, new_state):\n self.state = new_state", "def switch_to_state(self, state):\n self.switch_state = state", "def change_game_state(self, state):\n self._game_state = state", "def __setstate__(self, state):\n self.__dict__ = state\n self.freshly_loaded = True", "def set_state(self,s):\n self.state = s", "def _reinit(self):\n # If there are ready states still then it was a paused execution\n assert not self._ready_states\n assert not self._busy_states\n\n with self.locked_context(\"wasm.saved_states\", list) as saved_states:\n while saved_states:\n state_id = saved_states.pop()\n self._revive_state(state_id)", "def __setstate__(self, state):\n return None", "def setState(self, state):\n assert self.isValidState(state)\n self._state = state", "def setState(self, state):\n self.state = state", "def set_state(self, state):\n self.state = state\n self.config(fill=self.state)", "def SetState(self, new_state):\r\n\r\n self.state = new_state", "def switch_to_state(self, Rover, name):\n name.execute(Rover)\n self.curr_state = name", "def assign_state(self, state):\n raise NotImplementedError()", "def update_state(self, new_state):\n self.__state = new_state", "def set_workflow_state(self, state):\n self._gdb_interface.set_workflow_state(state)", "def set_state(self, state: int):\n self.state = state", "def set_state(self, state: ModelOptState):\n if self.model is None:\n assert self.opt is None\n self._state_to_load = state\n else:\n assert self.opt is not None\n self.model.load_state(state.model_state)\n self.opt.load_state(state.opt_state)", "def __setstate__(self, state):\n self.__dict__ = dict(state)\n self._init_compiled()", "def __setstate__(self, state):\n self.__dict__.update(state)", "def setstate(self, state):\r\n self._Xscale = state.pop()\r\n self._Xoffset = state.pop()\r\n self.output_dim = state.pop()\r\n self.likelihood = state.pop()\r\n self.kern = state.pop()\r\n self.input_dim = state.pop()\r\n self.num_data = state.pop()\r\n self.X = state.pop()\r\n Model.setstate(self, state)", "def resetEnv(self):\n obs = self.env.reset()\n self.state = torch.tensor(obs, device=self.device, dtype=torch.float).unsqueeze(0)\n return", "def _set_current_state(self, state):\n self._current_state = translate_state(\n rotate_state(state, -self._target_yaw), [0, -self._target_y])", "def revert_state(self):\n if self.previous_states > 0: # checks for empty\n self.update_status(self.previous_states.pop())", "def __setstate__(self, _state : dict):\n self.__init__(**_state)", "async def async_set_state(self, state):\n self._state = state", "def __setstate__(self, state: Dict[str, Any]):\n self.__dict__.update(state)\n self.__dict__['__db'] = None", "def restore_model_state(self, model: Block):\n if model in self.initial_state:\n from_json(model, sd=self.initial_state[model], wts=StoreState)\n else:\n self._update_summary(model, \"status\", InitializationStatus.Error)\n raise ValueError(\"No initial state stored.\")", "def set_current_state(self, s):\n self.current_state[:] = s[:]", "def state(self, state):\n self._state = state", "def setState(newState):\n global STATE\n if getattr(states, newState):\n STATE = newState\n else:\n raise Exception(\"State (%s) does not exist\" % newState)", "def reset(self):\n self.state = self.process_state(self.env.reset())\n return self.state", "def set_state(self, state):\n self.history = state", "def setstate(self, state=None):\n self.state = state or Pdod(self.datadir + os.sep + 'state')\n if self.state and not 'joinedchannels' in self.state.data: self.state.data.joinedchannels = []", "def __setstate__(self, state):\n # compatibility with data from previous versions\n self._name = \"\"\n self._user_data = dict()\n self.__loaded_from = None\n # Restore state. This overrides the above if contained in the data.\n self.__dict__.update(restore_dict(state))", "def savestate(self, state):\n pass", "def set_states(self, state_dict):\n self.trainer.get_model().load_state_dict(state_dict)", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def state(self, state):\n\n self._state = state", "def reset_state(self, snapshot, state):\n return self._action('os-reset_status', snapshot,\n {'status': state} if state else {})", "def reset(self, state):\n self.close(state)\n return self.open()", "def __setstate__(self, state):\n l, bl = state\n self.layers = l\n self.best_loss = bl", "def set_state(self, state: int):", "def set_state(self, state):\n for v in self._variables:\n self._state[self._mapping[v]] = state[v]\n if any(v not in {1, -1} for v in self._state):\n raise ValueError(\"State must contain only 1's and -1's\")", "def set_state(self, state):\n\n self.model = self.model_creator(self.config)\n self.epoch = state[\"epoch\"]\n self.model.set_weights(state[\"weights\"])", "def set_workflow_state(self, state):\n self._write_transaction(tx.set_workflow_state, state=state)", "def set_state(self, uState):\n self.strategy['state_handler'].set_state(self.state, uState)", "def __setstate__(self, state):\n state['_lock'] = Lock()\n self.__dict__.update(state)", "def set_state(self, state: int):\n self.splash.set_progress_value(self.state)\n self.state = state\n time.sleep(1)", "def reload_state(self):\n\n log.debug(\"Reload state from file %s\" % self.state_filename)\n if path.isfile(self.state_filename):\n with open(self.state_filename) as sf:\n self.state = yaml.safe_load(sf)\n\n if self.state is None:\n log.debug(\"Statefile returned none\")\n else:\n log.debug(\"Statefile does not exist\")\n self.state = {}", "def set_gamestate(self, state: GameState) -> None:\n self.root_state = deepcopy(state)\n self.root = Node()", "def repackage_state(self, state):\n state['hxs'] = state['hxs'].detach()\n state['cxs'] = state['cxs'].detach()\n return state", "def update_to_state(self, game_state):\n pass", "def state(self, state: str) -> None:\n self._state = state", "def load_state(self, state):\n raise NotImplemented", "def setstate(self,name,state):\n if (name not in KFNode.names):\n print ' state name ',name,' not in KNode!'\n self.states[name]=state.copy()\n self.status = name\n return", "def initialize_state(self, state):\n print 'state initialized'\n return state", "def setstate(self, state):\n\t\tif not self._input: raise PlumberExceptions.PipeTypeException(self)\n\t\tself._state.ustate = state", "def __setstate__(self, state: Dict[str, Any]) -> None:\n self.__dict__ = state.copy()\n # Once state is ingested - repopulate, NOT recursing.\n # Child segments will do it for themselves on unpickling.\n self.set_as_parent(recurse=False)", "def __setstate__(self, state):\n self.__dict__ = state\n self.get_esoh_solver = lru_cache()(self._get_esoh_solver)", "def set_classy_state(self, state: Dict[str, Any]) -> None:\n return self.load_state_dict(state)", "def restore_resume_state(self, key):\n new_state = self.statedb.get(key)\n if new_state is None:\n return False\n if \"_rev\" in self.value:\n backup_key = self.backup_resume_state(self.value)\n if backup_key is None:\n return False\n log.info(\"restoring iteration state: %s\", new_state)\n self.itr._save_state_json(new_state)\n return True", "def set_state(self, state: ProcessStateStr | core.QProcess.ProcessState):\n self.setProcessState(PROCESS_STATES.get_enum_value(state))", "def set_state(self, value):\n self.state = value" ]
[ "0.77453834", "0.7433805", "0.7283735", "0.72225296", "0.68740267", "0.68357325", "0.6653472", "0.6638538", "0.6638538", "0.65565914", "0.6460083", "0.6392536", "0.62604415", "0.6251954", "0.62414163", "0.6240911", "0.62085205", "0.6192636", "0.6182717", "0.6182548", "0.61741394", "0.6160149", "0.61569", "0.61424947", "0.611559", "0.60914224", "0.60886896", "0.60829705", "0.60418874", "0.60410625", "0.6028704", "0.60091877", "0.6003908", "0.6003528", "0.59803796", "0.59713763", "0.59629166", "0.59609944", "0.59450287", "0.5940008", "0.5926124", "0.59257114", "0.5920215", "0.5900574", "0.5897822", "0.5886518", "0.5879382", "0.5866519", "0.5862338", "0.586068", "0.5856512", "0.58544624", "0.5853142", "0.58487076", "0.5847937", "0.58459353", "0.5824786", "0.58242124", "0.5766279", "0.5732255", "0.5726712", "0.5703936", "0.5699086", "0.5699086", "0.5699086", "0.5699086", "0.5699086", "0.5699086", "0.5699086", "0.5699086", "0.5699086", "0.5699086", "0.5699086", "0.5699086", "0.5699086", "0.56876904", "0.56800985", "0.56770235", "0.5668451", "0.5631362", "0.562402", "0.5620619", "0.56194234", "0.5618887", "0.5616936", "0.56166816", "0.5598615", "0.55877477", "0.5587384", "0.5582544", "0.5581572", "0.5574231", "0.556446", "0.5558456", "0.5556727", "0.55513424", "0.5546342", "0.5537916", "0.55371946", "0.5529994" ]
0.6518785
10
Updates the environment according to action and returns a `TimeStep`. See `step(self, action)` docstring for more details.
def _step(self, action: types.NestedArray) -> ts.TimeStep:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self, action: types.NestedArray) -> ts.TimeStep:\n if self._current_time_step is None or self.should_reset(\n self._current_time_step\n ):\n return self.reset()\n\n self._current_time_step = self._step(action)\n return self._current_time_step", "def step(\n self,\n action: Union[Dict[str, Any], np.ndarray],\n env_id: Optional[np.ndarray] = None,\n ) -> Union[TimeStep, Tuple]:", "def step(self, action):\n self.action = action\n return self.env.step(action)", "def step(self, action):\n return self._env.step(action)", "def step(self, action):\n if self._reset_next_step:\n return self.reset()\n\n # Apply the game_rules\n for rule in self.game_rules:\n rule.step(self._state, self._meta_state)\n\n # Apply the action\n self.action_space.step(self._state, action)\n\n # Step the physics\n self.physics.step(self._state)\n\n # Compute reward\n self.step_count += 1\n reward, should_reset = self.task.reward(\n self._state, self._meta_state, self.step_count)\n\n # Take observation\n observation = self.observation()\n\n # Return transition\n if should_reset:\n self._reset_next_step = True\n return dm_env.termination(reward=reward, observation=observation)\n else:\n return dm_env.transition(reward=reward, observation=observation)", "def step(self, action):\n self.t += 1\n state, reward, done, info = self.env.step(action)\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info", "def step(self, action):\n return self.env.step(action)", "def step_env(self, action):\n return self.env.step(action)", "def apply_action(self, action):\n return self.__environment.step(action)", "def step(self, action):\n # Implement your step method here\n # return (observation, reward, done, info)\n self._state = self._state + action\n # print('Step state:', self._state)\n x, y = self._state\n reward = - (x ** 2 + y ** 2) ** 0.5\n done = abs(x) < 0.01 and abs(y) < 0.01\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=done)", "def step(self, action):\n self.t += 1\n if self.use_run_time_assurance:\n probe_state, unsafe = self.probe_step(action)\n # switch to safe controller if unsafe\n if unsafe:\n x, x_dot, theta, theta_dot = probe_state\n # go right\n if x <= -self.x_threshold: # go right\n action = 1\n elif x>= self.x_threshold: # go left\n action = 0 \n \n state, reward, done, info = self.env.step(action)\n # Could make a custom reward here if you want\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"actual_obs\": state,\n }\n else:\n obs = state\n \n if self.t >= 200: # ------ change if using v1\n done = True\n return obs, reward, done, info", "def _step(self, action):\n if self._episode_ended:\n # The last action ended the episode. Ignore the current action and start a new episode\n return self.reset()\n\n env_action = self.y_train[self.id[self.episode_step]]\n self.episode_step += 1\n\n if action == env_action: # Correct action\n if env_action: # Minority\n reward = 1 # True Positive\n else: # Majority\n reward = self.imb_rate # True Negative\n\n else: # Incorrect action\n if env_action: # Minority\n reward = -1 # False Negative\n self._episode_ended = True # Stop episode when minority class is misclassified\n else: # Majority\n reward = -self.imb_rate # False Positive\n\n if self.episode_step == self.X_len - 1: # If last step in data\n self._episode_ended = True\n\n self._state = self.X_train[self.id[self.episode_step]] # Update state with new datapoint\n\n if self._episode_ended:\n return ts.termination(self._state, reward)\n else:\n return ts.transition(self._state, reward)", "def step(self, action):\n if self._reset_next_step:\n return self.reset()\n\n self._step_count += 1\n \n reward = self._action_space.step(\n action, self._sprites, keep_in_frame=self._keep_in_frame)\n\n # Update sprite positions from their velocities\n for sprite in self._sprites:\n sprite.update_position(keep_in_frame=self._keep_in_frame)\n\n reward += self._task.reward(self._sprites)\n observation = self.observation()\n\n if self.should_terminate():\n self._reset_next_step = True\n return dm_env.termination(reward=reward, observation=observation)\n else:\n return dm_env.transition(reward=reward, observation=observation)", "def step(self, action: list) -> None:\n self._input = np.array(\n [self._thrust_surge(action[0]), self._moment_steer(action[1])]\n )\n w, q = odesolver45(\n self._state_dot, self._state, self.config.simulation.t_step_size\n )\n\n self._state = q\n self._state[2] = geom.princip(self._state[2])\n\n self._prev_states = np.vstack([self._prev_states, self._state])\n self._prev_inputs = np.vstack([self._prev_inputs, self._input])\n\n self._step_counter += 1", "def step(self, action):\n self.move_step(action) # Move.\n r, d = self.check_goal() # Check the reward and done state, and create\n # new environment.\n s_new= self.render_env() # Render the new environment.\n return s_new, r, d", "def step(self, action):\n observation, reward, done, _ = self.env.step(action)\n return np.array(observation), reward, done", "def step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n self.microgridPolicy.improveAction(action);\n\n self.microgrid.update();\n\n self.updateState();\n done = self.microgridPolicy.verifyStopConditions();\n reward = self.microgridPolicy.computeReward(done)\n if done: \n if self.steps_beyond_done is None:\n self.steps_beyond_done = 0\n else:\n logger.warn(\"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.\")\n self.steps_beyond_done += 1\n self.clock.increaseTimeStep();\n return self.state, reward, done, {}", "def step(self, action: np.ndarray) -> 'EnvStep':\n ...", "def step(self, action):\n # THIS WILL BE CALLED FROM ALL STEP DRIVERS\n self._world = self._action_wrapper.action_to_behavior(world=self._world,\n action=action)\n # 1. move the agent we set the action for\n controlled_agent_id = self._scenario._eval_agent_ids[self._action_wrapper._input_count-1]\n self._world.stepAgent(self._step_time, controlled_agent_id)\n\n # length of agents\n if self._action_wrapper._input_count >= len(self._scenario._eval_agent_ids):\n # CANNOT STEP WORLD IF NOT ALL ACTIONS ARE SET\n self._action_wrapper._input_count = 0\n \n # 2. move all other agent\n self._world.step(self._step_time)\n if self._render:\n self.render()\n\n # TODO needs to know the agents id\n return self.snapshot(\n world=self._world,\n controlled_agents=controlled_agent_id,\n action=action)", "def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool, dict]:\n next_state, reward, done, info = self.env.step(action)\n return next_state, reward, done, info", "def step(self, action):\n (self.state, self.reward, self.terminal, self.truncated,\n self.info) = self.env.step(action)\n\n return self.state, self.reward, self.terminal, self.truncated, self.info", "def takeAction(self, action):\n return self.env.step(action)", "def step(self, action: Union[np.ndarray, torch.Tensor]):\n if type(action) == torch.Tensor:\n action = action.squeeze().numpy()\n\n if not type(action) is np.ndarray:\n raise Exception(\"The action must be a Numpy array but is of type %s (value = %s)\" % (type(action), action))\n\n if self.increment_actions and not self.action_space.contains(action):\n action = action.clip(self.action_space.low, self.action_space.high)\n\n # Additionally, we must make sure the value will stay in the range\n # min <= x + action <= max\n if self.increment_actions:\n current_values = self.x[np.array([0, 1, 3, 5])]\n new_flow_values = current_values + action\n else:\n new_flow_values = action\n\n new_flow_values = np.clip(new_flow_values, self.flows_lower_bounds, self.flows_upper_bounds)\n self.update_all_flows(new_flow_values)\n\n if any([x < 0 for x in self.x]):\n pass\n # TODO: should I clip the actions to ensure the flows are always positive?\n # raise Exception(f\"Negative flows! x = {[round(x, 4) for x in self.x]}\")\n\n self.update_fitness()\n\n self.step_number += 1\n\n # reward = self.fitness - self.previous_fitness\n reward = self.fitness\n observation = self.get_observation()\n\n done = (self.step_number == self.total_number_of_episodes)\n info = {}\n return observation, reward, done, info", "def step(self, action):\n if self.space is None or self.spacecraft is None:\n raise NotImplementedError(\"The spacecraft must be initialized in the environment implementation\")\n\n self._simulate(action)\n\n obs = self.observation.observe()\n reward = self._reward(action)\n terminal = self._is_terminal()\n\n info = {\n \"velocity\": self.spacecraft.velocity,\n \"crashed\": self.spacecraft.crashed,\n \"action\": action,\n }\n try:\n info[\"cost\"] = self._cost(action)\n except NotImplementedError:\n pass\n\n return obs, reward, terminal, info", "def step(self, action):\n self._robot.send_command(action)\n\n obs = self.get_observation()\n\n reward = self.reward(obs.achieved_goal, self.goal)\n done = self.done(obs.achieved_goal, self.goal)\n next_observation = obs.observation\n return Step(observation=next_observation, reward=reward, done=done)", "def step(self, action):\n if self.platform is None:\n raise RuntimeError(\"Call `reset()` before starting to step.\")\n\n if not self.action_space.contains(action):\n raise ValueError(\n \"Given action is not contained in the action space.\")\n\n num_steps = self.frameskip\n\n # ensure episode length is not exceeded due to frameskip\n step_count_after = self.step_count + num_steps\n if step_count_after > self.episode_length:\n excess = step_count_after - self.episode_length\n num_steps = max(1, num_steps - excess)\n\n reward = 0.0\n for _ in range(num_steps):\n self.step_count += 1\n if self.step_count > self.episode_length:\n raise RuntimeError(\"Exceeded number of steps for one episode.\")\n\n # send action to robot\n robot_action = self._gym_action_to_robot_action(action)\n t = self.platform.append_desired_action(robot_action)\n\n # Use observations of step t + 1 to follow what would be expected\n # in a typical gym environment. Note that on the real robot, this\n # will not be possible\n observation = self._create_observation(t + 1)\n\n reward += self.compute_reward(observation, self.info)\n\n is_done = self.step_count == self.episode_length\n\n return observation, reward, is_done, self.info", "def performAction(self, action):\n self.action = action\n self.t += self.dt \n self.step()", "def step(self, action):\n done = self.cur_step >= self.max_steps_per_episode\n\n if done:\n raise RuntimeError(\"Episode is done\")\n\n self.cur_step += 1\n\n # Compute new state based on previous state and action\n new_state = self._take_action(action)\n\n # Compute reward value based on previous state and action\n reward = self._get_reward(action)\n\n # Update current state to new state\n self.cur_state = new_state\n\n # Compute observation from current state\n ob = self._get_obs() # Has to come after new state update\n\n # Update action, observation and reward histories\n self.action_episode_memory[self.cur_episode].append(action)\n self.observation_episode_memory[self.cur_episode].append(ob)\n self.reward_episode_memory[self.cur_episode].append(reward)\n\n # Recompute done since action may have modified it\n done = self.cur_step >= self.max_steps_per_episode\n\n return ob, reward, done, {}", "def step(self, action):\n obs, r, done, info = self.env.step(action)\n obs = self.get_observation(obs)\n return obs, r, self.is_done(), info", "def step(self, action):\n # check if suggested action is valid\n valid = self._take_action(action)\n if not valid:\n _, _ = self._simulate()\n response = self.worst_response\n target = 6*60\n else:\n # simulate until a TS response is needed\n response = np.inf\n while response == np.inf:\n response, target = self._simulate()\n if np.isnan(target): # prio 2 or 3 incident: no target exists\n target = response\n\n self.last_action = action if self.action_type == \"tuple\" else self.action_num_to_tuple[action]\n # calculate reward and new state\n self.reward = self._get_reward(response, target, valid=valid)\n self.state, self.is_done = self._extract_state()\n return self.state, self.reward, self.is_done, {\"note\": \"nothing to report\"}", "def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:\n next_state, reward, done, _ = self.env.step(action, resize=RESIZE, size = RESIZE_SIZE)\n\n if not self.is_test:\n self.transition += [reward, next_state, done]\n \n # N-step transition\n if self.use_n_step:\n one_step_transition = self.memory_n.store(*self.transition)\n # 1-step transition\n else:\n one_step_transition = self.transition\n\n # add a single step transition\n if one_step_transition:\n self.memory.store(*one_step_transition)\n \n return next_state, reward, done", "def step(self, action, update=True) -> tuple:\n if self.state.is_terminal():\n raise Exception('Cannot perform action on terminal state!')\n s = self.state if update else self.state.copy()\n if self.render:\n self.env.render()\n s.observation, reward, s.terminal, info = self.env.step(action)\n\n return s.copy() if update else s, reward", "def step(self, action: ActionType) -> EnvResponse:\n action = self.action_space.clip_action_to_space(action)\n if self.action_space and not self.action_space.contains(action):\n raise ValueError(\"The given action does not match the action space definition. \"\n \"Action = {}, action space definition = {}\".format(action, self.action_space))\n\n # store the last agent action done and allow passing None actions to repeat the previously done action\n if action is None:\n action = self.last_action\n self.last_action = action\n if self.visualization_parameters.add_rendered_image_to_env_response:\n current_rendered_image = self.get_rendered_image()\n\n self.current_episode_steps_counter += 1\n if self.phase != RunPhase.UNDEFINED:\n self.total_steps_counter += 1\n\n # act\n self._take_action(action)\n\n # observe\n self._update_state()\n\n if self.is_rendered:\n self.render()\n\n self.total_reward_in_current_episode += self.reward\n\n if self.visualization_parameters.add_rendered_image_to_env_response:\n self.info['image'] = current_rendered_image\n\n self.last_env_response = \\\n EnvResponse(\n reward=self.reward,\n next_state=self.state,\n goal=self.goal,\n game_over=self.done,\n info=self.info\n )\n\n # store observations for video / gif dumping\n if self.should_dump_video_of_the_current_episode(episode_terminated=False) and \\\n (self.visualization_parameters.dump_mp4 or self.visualization_parameters.dump_gifs):\n self.last_episode_images.append(self.get_rendered_image())\n\n return self.last_env_response", "def step(self, action):\n if self._backend_agent:\n self._backend_agent._on_gym_step_begin(self, action)\n\n result = self.env.step(action)\n (state, reward, done, info) = result\n self.steps_done_in_episode += 1\n self.steps_done_in_instance += 1\n self.total.steps_done_inc()\n if self.max_steps_per_episode and self.steps_done_in_episode >= self.max_steps_per_episode:\n done = True\n result = (state, reward, done, info)\n if not self.is_episode_done and done:\n self.is_episode_done = True\n self.episodes_done += 1\n self.total.episodes_done_inc()\n\n if self._backend_agent:\n self._backend_agent._on_gym_step_end(self, action, result)\n return result", "def step(self, action: ActionType) -> None:\n raise NotImplementedError", "def step(self, state, action, reward, done):\n\n self.memory.add(state, action, reward, done)\n if done and self.n_tau % self.update_freq == 0:\n self.n_tau += 1\n return self.update()\n return None", "def step(self, action: int):\n assert self.action_space.contains(action)\n loc = action\n if self.done:\n return self._get_obs(), 0, True, None\n\n reward = NO_REWARD\n # update bord\n self.board[loc] = to_code(self.mark)\n\n # check if game has ended\n status = check_game_status(self.board)\n if status >= 0:\n self.done = True\n if status in [1, 2]:\n reward = O_REWARD if self.mark == 'O' else X_REWARD\n\n # update mark\n self.mark = next_mark(self.mark)\n\n return self._get_obs(), reward, self.done, None", "def step(self, action):\n action = self.randomization.action_randomizer.randomize(\n action, self._random_state\n )\n\n robot_exception = None\n try:\n self._act(action)\n except RobotException as re:\n logger.error(\n f\"Robot raised exception: {str(re)}. This will finish the current episode.\"\n )\n robot_exception = re\n\n if not self.constants.physical:\n # We don't need to do stepping for physical roll out.\n self.mujoco_simulation.step()\n\n self._synchronize_step_time()\n self.t += 1\n\n obs, reward, done, info = self.get_observation(robot_exception=robot_exception)\n obs, reward, done, info = self.step_finalize(obs, reward, done, info)\n return obs, reward, done, info", "def step(self, action):\n assert self.action_space.contains(\n action), \"%r (%s) invalid\" % (action, type(action))\n self.time_step += 1\n reward = float(0)\n self.is_episode_done = False\n\n # For testing code\n current_edge_agg_num = self.time_step\n\n # Rescale the action from [-1, 1] to [1, 2, ... , 9]\n # The action is the number of aggregations on edge servers\n # current_edge_agg_num = int((action + 2) * (action + 2))\n\n logging.info(\"RL Agent: Start time step #%s...\", self.time_step)\n logging.info(\n \"Each edge server will run %s rounds of local aggregation.\",\n current_edge_agg_num)\n\n # Pass the tuned parameter to RL agent\n self.rl_agent.get_tuned_para(current_edge_agg_num, self.time_step)\n\n # Wait for state\n current_loop = asyncio.get_event_loop()\n get_state_task = current_loop.create_task(self.wait_for_state())\n current_loop.run_until_complete(get_state_task)\n #print('State:', self.state)\n\n self.normalize_state()\n #print('Normalized state:', self.state)\n\n reward = self.get_reward()\n info = {}\n\n self.rl_agent.cumulative_reward += reward\n\n # Signal the RL agent to start next time step (next round of FL)\n self.step_done.set()\n\n return np.array([self.state]), reward, self.is_episode_done, info", "def step(self, action):\n obs = self.gym.get_observations()\n all_actions = self.gym.act(obs)\n all_actions.insert(self.gym.training_agent, action)\n state, reward, terminal, info = self.gym.step(all_actions)\n agent_state = self.featurize(state[self.gym.training_agent])\n\n # agent_state_history = self.make_observation(agent_state, self.step)\n agent_reward = reward[self.gym.training_agent]\n\n # self.step += 1\n return agent_state, agent_reward, terminal, info", "def step(self, action):\n\n if not self._is_action_legal(action):\n return self.current_state, self.reward_illegal_action, self._is_terminal_state(), None\n else:\n # Change action passed if environment should behave random\n if self.stochastic:\n if not np.random.choice([True, False], 1, p=[self.p, 1 - self.p]):\n action = np.random.choice(self.possible_actions)\n\n # Needed for reward calculation (must be done before updating data structures)\n number_of_shifts = self._get_number_of_shifts(action)\n is_cargo_mandatory = int(self.vehicle_data[2][action] == 1)\n\n slot = self.end_of_lanes[self.current_Lane]\n self.loading_sequence += \"{}. Load Vehicle Type \\t {} \\t in Lane: \\t {} \\t Row: \\t {} \\n\" \\\n .format(self.sequence_no, action, self.current_Lane, slot)\n\n self.end_of_lanes[self.current_Lane] += self.vehicle_data[4][action]\n\n if self.vehicle_data[1][action] == -1 or \\\n self.number_of_vehicles_loaded[action] < self.vehicle_data[1][action]:\n self.number_of_vehicles_loaded[action] += 1\n\n self.loaded_vehicles[self.current_Lane][self.vehicle_Counter[self.current_Lane]] = action\n self.vehicle_Counter[self.current_Lane] += 1\n\n # Update grids\n for i in range(self.vehicle_data[4][action]):\n self.grid.T[self.current_Lane][slot + i] = self.sequence_no\n self.grid_destination.T[self.current_Lane][slot + i] = self.vehicle_data[3][action]\n self.grid_vehicle_type.T[self.current_Lane][slot + i] = self.vehicle_data[0][action]\n\n # Update lowest destination data structure\n if self.vehicle_data[3][action] < self.lowest_destination[self.current_Lane]:\n self.lowest_destination[self.current_Lane] = self.vehicle_data[3][action]\n\n self.sequence_no += 1\n # Update according to lane selection heuristic\n self.current_Lane = self._get_minimal_lanes()[0]\n\n self.possible_actions = self.get_possible_actions_of_state()\n self.current_state = self._get_current_state()\n\n if self._is_terminal_state():\n # Calculate reward for terminal state\n free_spaces = np.sum(self._get_free_capacity()) / np.sum(self.total_capacity)\n mandatory_vehicles_left_to_load = np.sum(self.vehicle_data[1][self.mandatory_cargo_mask]\n - self.number_of_vehicles_loaded[self.mandatory_cargo_mask])\n reward_features = np.array(\n [is_cargo_mandatory, number_of_shifts, free_spaces, mandatory_vehicles_left_to_load])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, True, {}\n else:\n # Calculate reward\n reward_features = np.array([is_cargo_mandatory, number_of_shifts, 0, 0])\n reward = np.dot(self.reward_system, reward_features) + self.zeta\n\n return self.current_state, reward, False, {}", "def step(\n self, action: Union[numpy.ndarray, int], state: numpy.ndarray = None, dt: int = None\n ) -> tuple:\n data = super(AtariEnvironment, self).step(action=action, state=state, dt=dt)\n if state is None:\n observ, reward, terminal, info = data\n observ = self.gym_env.unwrapped.ale.getRAM() if self.obs_ram else observ\n return observ, reward, terminal, info\n else:\n state, observ, reward, terminal, info = data\n observ = ale_to_ram(self.gym_env.unwrapped.ale) if self.obs_ram else observ\n return state, observ, reward, terminal, info", "def step(self, action):\n new_speed = self._state + action\n new_speed[np.where(new_speed < 0)] = 0\n for car_idx in range(self.num_cars):\n # almost instantaneous\n traci.vehicle.slowDown(self.controllable[car_idx], new_speed[car_idx], 1)\n traci.simulationStep()\n self._state = np.array([traci.vehicle.getSpeed(vID) for vID in self.controllable])\n reward = self.compute_reward(self._state)\n # done = np.all(abs(self._state-self.GOAL_VELOCITY) < self.delta)\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=False)", "def step(self, action, agent_index=0):\n return self.env.step(action)", "def step(self):\n updating_env = {} if self.independent_update else self.env\n for a in self.agents:\n if self.i % a.period == 0:\n action = a(self.env)\n if a.name is not None:\n updating_env[a.name] = action\n if self.independent_update:\n self.env.update(updating_env)\n self.i += 1", "def step(self, action):\n force = self.force_mag if action else -self.force_mag\n costheta = math.cos(self.theta)\n sintheta = math.sin(self.theta)\n temp = (\n force + self.polemass_length * self.theta_dot ** 2 * sintheta\n ) / self.total_mass\n thetaacc = (self.gravity * sintheta - costheta * temp) / (\n self.length\n * (4.0 / 3.0 - self.masspole * costheta * costheta / self.total_mass)\n )\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\n self.x += self.tau * self.x_dot\n self.x_dot += self.tau * xacc\n self.theta += self.tau * self.theta_dot\n self.theta_dot += self.tau * thetaacc\n\n return self.state", "def step(self, _action):\n action = np.hstack((np.zeros(6), _action/10.))\n self.ref_skel.set_positions(self.ref_state.angles)\n for i in range(self.step_per_frame):\n # self.skel.set_forces(self.skel.get_spd(self.ref_skel.q + action, self.world.time_step(), self.Kp, self.Kd))\n self.skel.set_forces(self.skel.get_spd(self.ref_state.angles + action, self.world.time_step(), self.Kp, self.Kd))\n self.world.step()\n\n self.ref_state_time += self.step_per_frame * self.world.time_step()\n if self.ref_state_time >= self.ref_state.dt:\n self.ref_state_time -= self.ref_state.dt\n self.ref_state = self.ref_state.get_next()\n\n return tuple([self.state(), self.reward(), self.is_done(), dict()])", "def step(self, action) -> (list, float, bool):\n if len(self.curr_stim) == 0:\n self.curr_stim += [action[0]] * action[1] + [-action[0]] * action[1]\n self.system_step()\n self.frame += 1 / self.config[\"Fs\"]\n self.history.append(self.x2-self.x1)\n return self.get_state(), 0, False", "def step(self, action: Action) -> Tuple[Observation, float, bool, bool, dict]:\n if self.road is None or self.vehicle is None:\n raise NotImplementedError(\"The road and vehicle must be initialized in the environment implementation\")\n\n self.time += 1 / self.config[\"policy_frequency\"]\n self._simulate(action)\n\n obs = self.observation_type.observe()\n reward = self._reward(action)\n terminated = self._is_terminated()\n truncated = self._is_truncated()\n info = self._info(obs, action)\n if self.render_mode == 'human':\n self.render()\n\n return obs, reward, terminated, truncated, info", "def step(self, action):\n # if self.current_turn<self.MAX_TURNS-1:\n # self.current_turn += 1\n \n\n self.current_turn += 1\n system_action = self.parseAction(action)\n \n # Used for logging and evaluation\n self.updateMetaState(system_action)\n\n self.processSystemAction(system_action)\n\n reward = self.calculateReward()\n\n user_action = self.user.respond(system_action)\n self.processUserAction(user_action)\n observation = self.generateObservation()\n done = self.isDone()\n if done:\n info = { \"successful\": self.user.goals[\"satisfied\"], \n \"first-appearance\": self.first_appearance, \n \"turn-penalty\": self.current_turn,\n \"sugg-all-penalty\":self.sugg_penalty,\n \"info-all-penalty\": self.info_penalty,\n \"eli-kw-used\": self.eli_kw_observed,\n \"eli-query-used\": self.eli_query_observed,\n }\n else:\n info = {}\n if self.training:\n if done and self.user.goals[\"satisfied\"]: reward+=30\n return observation, reward, done, info", "def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % PARAM.UPDATE_EVERY\n if self.t_step == 0:\n if len(self.memory) > PARAM.BATCH_SIZE:\n experiences = self.memory.sample()\n self.learn(experiences, PARAM.GAMMA)", "def step(self, action):\n raise NotImplementedError", "def step(self, action):\n self.timestep += 1\n self.actions = action.ravel()\n\n # Figure out which action was taken\n self.acted = False\n self.eat = False\n self.discard = False\n if action[0] > .5:\n self.eat = True\n self.acted = True\n elif action[1] > .5:\n self.discard = True\n self.acted = True\n\n # Check whether the appropriate action was taken, and assign reward.\n # There is a small punishment for doing nothing.\n self.reward = -.1\n if ((self.eat and self.edible) or\n (self.discard and not self.edible)):\n self.reward = 1.\n elif ((self.eat and not self.edible) or\n (self.discard and self.edible)):\n self.reward = -.9\n\n if self.acted:\n self.grab_fruit()\n\n return self.sensors, self.reward", "def env_step(self, action):\n state, reward, done, info = self.env.step(action)\n state = self.feature_extractor.extract_features(state)\n\n return state, reward, done, info", "def step(self, action: Action) -> Feedback: # type: ignore\n self._action_counter += 1\n step_id = self._action_counter\n\n self._encode_and_send_action(action, step_id)\n\n # Wait (blocking!) for the response envelope from the environment\n in_envelope = self._queue.get(block=True, timeout=None) # type: Envelope\n\n msg = self._decode_percept(in_envelope, step_id)\n\n observation, reward, done, info = self._message_to_percept(msg)\n\n return observation, reward, done, info", "def step(self, action):\n\n \"\"\"\n Here we should convert the action num to movement action, execute the action in the\n simulation and get the observations result of performing that action.\n \"\"\"\n #if self.step_number > 200:\n #self.reset()\n rospy.logdebug(\"START STEP OpenAIROS\")\n\n self.gazebo.unpauseSim()\n self._set_action(action)\n #self._prey_step()\n self.gazebo.pauseSim()\n obs = self._get_obs()\n done = self._is_done(obs)\n info = {}\n reward = self._compute_reward(obs, done)\n \n self.cumulated_episode_reward = self.cumulated_episode_reward+ reward\n self.step_number += 1\n rospy.logdebug(\"END STEP OpenAIROS\")\n\n return obs, reward, done, info", "def step(self, action: nx.Graph):\n # Get the SMILES string associated with this action\n self._state = action\n if self.record_path:\n self._path.append(self._state)\n\n # Update the action space\n self.action_space.update_actions(self._state, self.observation_space)\n self._counter += 1\n\n # Check if we have finished\n # Out of steps or no more moves\n done = len(self.action_space.get_possible_actions()) == 0\n\n # Compute the fingerprints for the state\n return self._state, self.reward(), done, {}", "def step(self, action):\n raise NotImplementedError()", "def step(self, action):\r\n new_img, reward, done, info = self.env.step(action)\r\n self.update_buffer(new_img)\r\n return self.framebuffer, reward, done, info", "def step(self, action):\n assert self.action_space.contains(action)\n\n if self.done:\n return self._get_obs(), 0, True, None\n\n reward = NO_REWARD\n\n # place\n col = action\n row = get_row(self.board, col)\n\n self.board[row, col] = tocode(self.mark)\n self.turn += 1\n self.status = check_game_status(self.board, row, col)\n\n if self.status >= 0:\n self.done = True\n if self.status in [1, 2]:\n # always called by self\n reward = O_REWARD if self.mark == 'O' else X_REWARD\n\n # switch turn\n self.mark = next_mark(self.mark)\n return self._get_obs(), reward, self.done, None", "def step(self, state, action, reward, next_state, done):\n \n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % self.params.update_every\n if self.t_step == 0:\n # If enough samples are available in memory, get random subset and learn\n if len(self.memory) > self.params.batch_size:\n experiences = self.memory.sample()\n self.learn(experiences, self.params.gamma)", "def step(self, action):\n if self._reset_next_step:\n self._reset_next_step = False\n return self.reset()\n\n self._hooks.before_step(self._physics_proxy, action, self._random_state)\n self._observation_updater.prepare_for_next_control_step()\n\n try:\n for i in range(self._n_sub_steps):\n self._hooks.before_substep(self._physics_proxy, action,\n self._random_state)\n self._physics.step()\n self._hooks.after_substep(self._physics_proxy, self._random_state)\n # The final observation update must happen after all the hooks in\n # `self._hooks.after_step` is called. Otherwise, if any of these hooks\n # modify the physics state then we might capture an observation that is\n # inconsistent with the final physics state.\n if i < self._n_sub_steps - 1:\n self._observation_updater.update()\n physics_is_divergent = False\n except control.PhysicsError as e:\n if not self._raise_exception_on_physics_error:\n logging.warning(e)\n physics_is_divergent = True\n else:\n raise\n\n self._hooks.after_step(self._physics_proxy, self._random_state)\n self._observation_updater.update()\n\n if not physics_is_divergent:\n reward = self._task.get_reward(self._physics_proxy)\n discount = self._task.get_discount(self._physics_proxy)\n terminating = (\n self._task.should_terminate_episode(self._physics_proxy)\n or self._physics.time() >= self._time_limit\n )\n else:\n reward = 0.0\n discount = 0.0\n terminating = True\n\n obs = self._observation_updater.get_observation()\n\n if not terminating:\n return dm_env.TimeStep(dm_env.StepType.MID, reward, discount, obs)\n else:\n self._reset_next_step = True\n return dm_env.TimeStep(dm_env.StepType.LAST, reward, discount, obs)", "def step(self, action):\r\n reward = self.__execute(action)\r\n self.__totalReward += reward\r\n status = self.__status()\r\n state = self.__observe()\r\n logging.debug(\"action: {:10s} | reward: {: .2f} | status: {}\".format(Action(action).name, reward, status))\r\n return state, reward, status", "def step(self, action):\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {\n \"play\": {\n \"landlord\": self._get_reward(\"landlord\"),\n \"landlord_up\": self._get_reward(\"landlord_up\"),\n \"landlord_down\": self._get_reward(\"landlord_down\")\n },\n \"bid\": {\n \"landlord\": self._get_reward_bidding(\"landlord\")*2,\n \"landlord_up\": self._get_reward_bidding(\"landlord_up\"),\n \"landlord_down\": self._get_reward_bidding(\"landlord_down\")\n }\n }\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}", "def update(self, action):\n self._update_noise()\n self._update_state(action)\n self._update_status()\n self.steps += 1\n # Return current state and error\n return self.observation, self.error # returns two unctions when called", "def step(self, state, action, reward, next_state, done):\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\n # Learn every UPDATE_EVERY time steps.\n self.t_step = (self.t_step + 1) % UPDATE_EVERY\n if self.t_step == 0:\n # If enough samples are available in memory, get random subset and learn\n if len(self.memory) > BATCH_SIZE:\n experiences = self.memory.sample()\n self.learn(experiences, GAMMA)", "def _step(self, action: np.ndarray):\n self.robot.step({\n 'dkitty': action,\n })", "def step(self, action):\n # get the instruction indicated by the action\n instr = self.instrs[action]\n # extend the program\n self.program.inst(instr)\n # run and get some measured bitstrings\n self.bitstrings, info = self._run_program(self.program)\n # compute the avg score of the bitstrings\n reward = self._prob_score(self.bitstrings)\n self.running_episode_reward += reward\n\n info[\"instr\"] = instr\n info[\"reward-nb\"] = reward\n self.current_step += 1\n\n # are we done yet?\n done = False\n if self.current_step >= MAX_PROGRAM_LENGTH:\n done = True\n if reward >= self.reward_threshold:\n reward += MAX_PROGRAM_LENGTH - self.current_step\n done = True\n\n return self.observation, reward, done, info", "def step(self):\n old_state = self.state\n action = self.get_next_action()\n new_state, reward, failed = self.env.step(action)\n if self.training_mode:\n if self.violated_soft_constraint or failed:\n self.Q_model.update(old_state, action, new_state, reward,\n failed)\n self.safety_model.update(old_state, action, new_state, reward,\n failed)\n self.updated_safety = True\n else:\n self.Q_model.update(old_state, action, new_state, reward,\n failed)\n self.updated_safety = False\n else:\n self.updated_safety = False\n self.state = new_state\n self.last_action = action\n return new_state, reward, failed", "def step(self, action):\n self._action = action\n if action[0] < 0: # Only allow forward direction\n action[0] = 0\n\n # Publish action via ROS\n msg = ackermann_msgs.msg.AckermannDriveStamped()\n msg.drive.speed = action[0]\n msg.drive.steering_angle = action[1]\n msg.header.stamp = self._sensor_stamp\n self.publisher.publish(msg)\n\n # Wait for next state readings\n self._num_states_received = self._num_states_needed\n while self._num_states_received > 0:\n time.sleep(0.00001)\n nextstate = self._current_state\n next_observation = self._state_to_observation(nextstate)\n self._state = nextstate\n\n reward, info = self.get_reward(nextstate, action)\n return Step(observation=next_observation, reward=reward,\n done=False, dist=info['dist'], vel=info['vel'],\n kappa=self._model.kappa)", "def step(self):\n self.step_n += 1\n self.step_t += 1\n # TODO: directly calling agent.act will by-pass BaseDeepAgent, which\n # checks and assigns 'sess' arugment. So we manually set sess here. But\n # is there a better way to do this?\n self.action = self.agent.act(\n state=self.state, sess=self.agent.sess\n )\n next_state, vec_reward, done, _ = self.env.step(self.action)\n reward, done = func_compile_exp_agent(self.action, vec_reward, done)\n self.total_reward = reward + self.reward_decay * self.total_reward\n info = self.agent.step(\n state=self.state, action=self.action, reward=reward,\n next_state=next_state, episode_done=done\n )\n self.record(info)\n flag_success = True if done and reward > 0.0 else False\n if self.savedir is not None:\n self.steps_saver.save(self.episode_n, self.step_t, self.state, self.action,\n vec_reward, reward, done, self.total_reward, flag_success)\n self.state = next_state\n if done:\n self.step_t = 0\n return done", "def _step(self, action):\n\n # action is generated from the action_policy (external to the environment)\n if len(action) == 4:\n object_index, new_location, action_means, action_stds = action\n if len(action) == 2:\n \"\"\"\n Action is not generated from a Gaussian distribution\n \"\"\"\n object_index, new_location = action\n action_means = action_stds = None\n \n position = new_location[:2]\n rotation = new_location[2]\n\n prev_transform = self.e.objects[object_index].transform\n\n if len(self.action_storage) > 0:\n last_progress = self.action_storage[-1][4]\n else:\n last_progress = 0\n\n info = {}\n if self.e.act(object_index, Command(position, rotation)):\n # print ('Action accepted')\n cur_transform = self.e.objects[object_index].transform\n # I need to call self.action_storage.append before get_observation_and_progress\n self.action_storage.append( [object_index, prev_transform, cur_transform, None, None, True, action_means, action_stds] )\n observation, progress = self.get_observation_and_progress()\n self.action_storage[-1][3:5] = [observation, progress]\n\n info['action_accepted'] = True\n else:\n \"\"\"\n Action failed\n We can reduce the progress to avoid falling out of the table\n \"\"\"\n if len(self.action_storage) > 0:\n # Just return observation and progress of last action\n _, _, _, observation, progress, _, _, _ = self.action_storage[-1]\n progress -= self.config.failed_action_penalty\n else:\n # First action failed\n observation, _ = self.get_observation_and_progress()\n progress = -self.config.failed_action_penalty\n \n self.action_storage.append( [object_index, prev_transform, prev_transform, observation, progress, False, action_means, action_stds] )\n\n \n info['action_accepted'] = False\n\n # Typical threshold approach\n if progress > self.progress_threshold:\n # Finish action\n done = True\n else:\n done = False\n \n reward = progress - last_progress\n #print ('Progress = %.2f ; reward = %.2f' % (progress, reward))\n\n return (observation, reward, done, info)", "def step(self, action):\n reward = 0\n pose_all = []\n self.rotor_speeds = np.array([action]*4)\n for _ in range(self.action_repeat):\n done = self.sim.next_timestep(self.rotor_speeds) # update the sim pose and velocities\n reward += self.get_reward()\n pose_all += [self.sim.pose]\n if self.sim.crashed:\n reward = -5\n done = True\n #if (np.square(self.sim.pose[:3] - self.target_pos)).sum() < 1: # Close enough!\n #done = True\n next_state = np.concatenate(pose_all)\n return next_state, reward, done", "def step(self, action):\n x, y = self._move(action, *self._currentPos)\n\n if chr(self._grid[x, y]) == CASE_TYPES.Wall:\n # error - previous state was already a wall\n self._done = True\n self._trajectory.append(self._currentPos)\n return self._currentPos, -1, self._done, {}\n\n reward = {\n CASE_TYPES.Water: self.waterReward,\n CASE_TYPES.Sand: self.sandReward,\n CASE_TYPES.Open: self.stepReward,\n CASE_TYPES.Termination: self.successReward,\n CASE_TYPES.Trap: (\n -(self.maxSteps - len(self._trajectory)) + self.failureReward +\n self.trapReward)\n }[chr(self._grid[x, y])]\n\n # termination state\n if chr(self._grid[x, y]) in [CASE_TYPES.Termination, CASE_TYPES.Trap]:\n self._done = True\n\n self._currentPos = (x, y)\n\n self._trajectory.append(self._currentPos)\n self._nbSteps += 1\n\n if self._nbSteps >= self.maxSteps and not self._done:\n reward += self.failureReward\n\n return self._currentPos, reward, self._done, {}", "def step(self, state, action, reward, next_state, done):\n\n # Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n \n # Learn every self.update_every time steps\n self.t_step = (self.t_step + 1) % self.update_every\n\n # Get random subset & learn if enough samples available in memory\n if self.t_step == 0:\n if len(self.memory) > self.batch_size:\n experiences = self.memory.sample()\n self.learn(experiences, self.gamma)\n \n return", "def step(self, action):\r\n s = self.get_state()\r\n\r\n elements = np.arange(self.S)\r\n # weights = np.squeeze(self.nextStateProbability[s,action])\r\n weights = self.nextStateProbability[s, action]\r\n nexts = choices(elements, weights, k=1)[0]\r\n\r\n # p = self.nextStateProbability[s,action]\r\n # reward = self.rewardsTable[s,action, nexts][0]\r\n reward = self.rewardsTable[s, action, nexts]\r\n\r\n # fully observable MDP: observation is the actual state\r\n self.currentObservation = nexts\r\n\r\n gameOver = False\r\n if self.currentIteration > np.Inf:\r\n ob = self.reset()\r\n gameOver = True # game ends\r\n else:\r\n ob = self.get_state()\r\n\r\n history = {\"time\": self.currentIteration, \"state_t\": s, \"action_t\": action,\r\n \"reward_tp1\": reward, \"state_tp1\": nexts}\r\n # history version with actions and states, not their indices\r\n # history = {\"time\": self.currentIteration, \"action_t\": self.actionListGivenIndex[action],\r\n # \"reward_tp1\": reward, \"observation_tp1\": self.stateListGivenIndex[self.get_state()]}\r\n self.currentIteration += 1\r\n return ob, reward, gameOver, history", "def _step(self, action: np.ndarray):\n # TODO: How do deal with goal changing?\n denormalize = False if self.use_raw_actions else True\n current_pos = self.sim.data.mocap_pos.copy()\n meanval = (self.mocap_pos_clip_upper + self.mocap_pos_clip_lower)/2.0\n rng = (self.mocap_pos_clip_upper - self.mocap_pos_clip_lower)/2.0\n new_pos = action[:3]*rng + meanval #current_pos + action[:3]*self.range\n # new_pos = current_pos + action[:3]*self.range\n new_pos = np.clip(new_pos, self.mocap_pos_clip_lower, self.mocap_pos_clip_upper)\n self.sim.data.mocap_pos[:] = new_pos.copy()\n self.robot.step({\n 'gripper': action[-2:]\n }, denormalize)", "def step(self, action_index):\n\n x_t1, r_t, terminal, info = self.env.step(self.gym_actions[action_index])\n s_t1 = self.get_preprocessed_RAM(x_t1)\n\n \n return s_t1, r_t, terminal, info", "def step(self, action):\n observation, reward, done, info = self.env.step(action)\n observation = cv2.resize(observation, (self.size, self.size))\n observation = np.array(observation, dtype=np.uint8)\n observation = observation.transpose(2, 0, 1)\n info = \"\"\n return observation, reward, done, info", "def step(self, action):\n \n # Check if the given position is empty\n if self.mat[action[0], action[1]] != 0:\n return (self.mat, -0.9, False)\n \n # Update\n self.mat[action[0], action[1]] = 1\n\n # Check if User won\n if self._check_win(1):\n return (self.mat, 1.0, True)\n\n # Check for game end\n acts = self.action_space\n if len(acts) == 0:\n return (self.mat, 0, True)\n\n # If not done, then randomly spawn an 'O' on the board and recalculate the reward\n spawn_point = acts[np.random.choice(acts.shape[0])]\n self.mat[spawn_point[0], spawn_point[1]] = 2\n\n # Check if User lost\n if self._check_win(2):\n return (self.mat, -1.0, True)\n \n # If nothing wrong happens\n else:\n return (self.mat, -0.1, False)", "def step(self, action):\n state, reward, done, debug_info = self.sample_transition(action)\n self.set_state(state)\n if \"next_state_heuristic\" in debug_info:\n self._current_heuristic = debug_info[\"next_state_heuristic\"]\n return state, reward, done, debug_info", "def step(self, observation, **extra_feed):\n\n action = self._evaluate(self.action, observation, **extra_feed)\n return action", "def step(self, observation, **extra_feed):\n\n action = self._evaluate(self.action, observation, **extra_feed)\n return action", "def step(self, action):\n reward_all = 0\n pose_all = []\n raw_states = []\n for _ in range(self.action_repeat):\n state, reward, done, _ = self.env.step(action) # run up the mountain\n\n processed_state = self.preprocess_state(state)\n raw_states.append(state)\n\n if done and self.i < 200:\n self.success = True\n\n reward_all += reward\n pose_all.append(processed_state)\n\n self.i += 1\n\n if done:\n missing = self.action_repeat - len(pose_all)\n pose_all.extend([pose_all[-1]] * missing)\n break\n\n next_state = np.concatenate(pose_all)\n return next_state, reward_all, done, raw_states", "def step(self, action, render=False):\n if self.scale == 1:\n # Scale the action\n action = np.multiply(action, self.scale_mult) + self.scale_add\n elif self.scale == 0:\n action = np.minimum(np.maximum(action, self.min_action), self.max_action)\n else:\n raise NotImplementedError\n\n # Publish action\n vel_cmd = 1.0 # action[0]\n steer_cmd = action # [1]\n self.__publish_cmd(vel_cmd, steer_cmd)\n\n # Wait specified time\n self.rate.sleep()\n\n # Collect new state\n next_state = self.get_state()\n reward, min_dist = self.__calculate_reward()\n # print(reward)\n done, exit_cond = self.__is_terminal(reward)\n\n # if not render:\n # # Reward for following the waypoints closely\n # reward = max((1. - min_dist) / 10., 0.0) + self.prev_reward\n # self.prev_reward = reward\n # if exit_cond:\n # reward = 0.0 # -10.0\n\n # else:\n # if exit_cond:\n # reward = -1.0\n \n if exit_cond:\n reward = 0.0\n\n # reward *= 10\n # print(reward)\n return next_state, reward, done, exit_cond", "def env_step(action: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n state, reward, done, _ = env.step(action)\n return (\n state.astype(np.float32),\n np.array(reward, np.float32),\n np.array(done, np.int32),\n )", "def step(self, action):\n self._last_base_position = self.rex.GetBasePosition()\n self._last_base_orientation = self.rex.GetBaseOrientation()\n if self._is_render:\n # Sleep, otherwise the computation takes less time than real time,\n # which will make the visualization like a fast-forward video.\n time_spent = time.time() - self._last_frame_time\n self._last_frame_time = time.time()\n time_to_sleep = self.control_time_step - time_spent\n if time_to_sleep > 0:\n time.sleep(time_to_sleep)\n base_pos = self.rex.GetBasePosition()\n # Keep the previous orientation of the camera set by the user.\n [yaw, pitch, dist] = self._pybullet_client.getDebugVisualizerCamera()[8:11]\n self._pybullet_client.resetDebugVisualizerCamera(dist, yaw, pitch, base_pos)\n\n for env_randomizer in self._env_randomizers:\n env_randomizer.randomize_step(self)\n\n # change up swing and stance ratio and desired speeds randomly for robustness\n if np.random.randint(300) == 0:\n self.ratio = np.random.uniform(self.min_swing_ratio, self.max_swing_ratio)\n\n if np.random.randint(300) == 0:\n self.speed = np.random.uniform(self.min_speed, self.max_speed)\n self.speed_des[0] = self.speed\n\n if np.random.randint(300) == 0:\n self.side_speed = np.random.uniform(self.min_side_speed, self.max_side_speed)\n self.speed_des[1] = self.side_speed\n\n self.base_vel_curr_trans, self.base_vel_curr_rot = self.get_base_velocity()\n action = self._transform_action_to_motor_command(action)\n self.rex.Step(action)\n self.base_vel_next_trans, self.base_vel_next_rot = self.get_base_velocity()\n \n self._env_step_counter += 1\n self.phase += self._action_repeat # the cycle length is CYCLE_TIME/time_step so can add \n # how many times an action was repeated\n\n if self.phase > self.cycle_len:\n self.phase = self.phase % self.cycle_len \n self.cycle_complete += 1\n\n reward = self._reward()\n done = self._termination()\n\n if done:\n self.rex.Terminate()\n\n return np.array(self._get_observation_np()), reward, done, {'action': action}", "def step(self, action):\n \n success = False\n self.curr_step += 1\n self._take_action(action)\n self._take_action(action)\n self._take_action(action)\n\n # initialize reward and get state \n reward = 0.0\n ob = self._get_state()\n\n # give dense rewards \n if not self.sparse_reward:\n reward = self._get_reward()\n\n # bad terminal conditions\n if self.curr_step >= self.max_steps \\\n or self.target_distance >= self.max_distance \\\n or self.mean_radius_sheep >= self.max_radius:\n self.finish = True\n if self.sparse_reward:\n reward = -1.0\n\n # good terminal conditions\n if self.target_distance <= 1.0:\n success = True\n self.finish = True\n if self.sparse_reward:\n reward = 1.0\n\n # update rl parameters\n self.episode_length += 1\n self.episode_reward += reward\n\n # generate info return parameter\n if self.info_mode == 1 and self.finish:\n info = {'r':self.episode_reward, 'l':self.episode_length, \n 's': success}\n else:\n info = {'n':self.num_sheep, 's': success}\n\n return ob, reward, self.finish, info", "def run_one_step(self):\n # Get the current state, action and initialise the reward\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n # Check if the environment has reached a terminal state\n if self.env.check_terminal() is False:\n # Save the initial state and action to an 'experience'\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n # Update the environment using the chosne action\n self.env.update(action)\n # Get the reward to attribute to the agent and save to the experience to save\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n # Get the updated state\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n # If the new state isn't terminal, save the next action and the 'done' flag to the experience\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n # If the new state is terminal, save a dummy action and the 'done' flag to the experience\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n # Update the history with the latest experience\n self.agent.update_history(copy.copy(latest_experience))\n # Update the agents policy using a batch of experiences chosen from the history\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count += 1\n # Update the target network if appropriate\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.agent.policy.learner.update_target_network()\n else:\n # If the environment is in a terminal state, record this and perform a policy update\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count = 0\n return reward", "def step(self,action):\n observation, reward, done, info = self.env.step(action)\n if info[\"health\"] <= 0 or info[\"enemy_health\"] <= 0:\n self.player_hp = 120\n self.enemy_hp = 120\n reward = 0\n else:\n self.player_hp = info['health']\n self.enemy_hp = info[\"enemy_health\"]\n reward = self.player_hp - self.enemy_hp\n\n\n if info[\"enemy_rounds_won\"] == 2 or info[\"rounds_won\"] == 2:\n self.player_hp = 120\n self.enemy_hp = 120\n reward = 0\n done = True\n\n obs = self.observation(observation)\n if self.current_frame_number == self.frame_skipping:\n self.q.append(obs)\n self.current_frame_number = 0 \n self.current_frame_number += 1\n reward = reward / 120 +1\n return np.array(list(self.q)), reward, done, info", "def step(self, action):\n res = self.reward_table.get(self.curr_state, action)\n\n self.curr_state = res['result']\n\n return res", "def step(self, action):", "def step(self, action):\n # TODO: code here\n y, x = self.state\n dy, dx = self.moves[action]\n next_x, next_y = x+dx, y+dy\n\n next_x = np.clip(next_x, 0, self.width-1) # clip the values to the world\n next_y = np.clip(next_y, 0, self.height-1) # clip the values to the world\n\n if next_y == 1:\n rand = np.random.uniform()\n if rand < 0.2:\n next_x += 1\n elif rand < 0.7:\n next_x += 2\n else:\n next_x += 3\n\n next_x = np.clip(next_x, 0, self.width - 1)\n\n if next_x == 4 and next_y == 1:\n reward = -1\n done = True\n elif next_x == 4 and next_y == 2:\n reward = 1\n done = True\n else:\n reward = 0\n done = False\n\n next_state = (next_y, next_x)\n self.state = next_state\n\n return next_state, reward, done, {}", "def step(self, action_history, observations):\n return self.call(action_history, observations)", "def run_one_step(self):\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n if self.env.check_terminal() is False:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n self.env.update(action)\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n self.agent.update_history(copy.copy(latest_experience))\n self.count += 1\n # If the latest history has a large enough batch, perform an update\n # CHECK IF THIS IS THE RIGHT METHOD\n if self.count % self.batch_size == 0:\n self.agent.update_policy_ordered(max(1, self.batch_size))\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.count = 0\n self.agent.policy.learner.update_target_network()\n else:\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n # Perform an update on all of the previous experiences that haven't been updated\n if self.count % self.batch_size > 0:\n self.agent.update_policy_ordered((self.count % self.batch_size) + 1)\n self.count = 0\n return reward", "def step(self, reward, observation):\n self._last_observation = self._observation\n self._record_observation(observation)\n\n if not self.eval_mode:\n self._store_transition(self._last_observation, self.action, reward, False)\n self._train_step()\n\n self._rng, self.action = select_action(self.network_def,\n self.online_params,\n self.state,\n self._rng,\n self.num_actions,\n self.eval_mode,\n self.epsilon_eval,\n self.epsilon_train,\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_fn)\n self.action = onp.asarray(self.action)\n return self.action", "def step(self, action):\n \"\"\" Action is a motion command \"\"\"\n rich_obs, reward, done, info = super(ColoredEgoCostmapRandomAisleTurnEnv, self).step(action)\n obs = self._extract_egocentric_observation(rich_obs)\n return obs, reward, done, info", "def doAction(self, gameState, action):\n self.lastState = gameState\n self.lastAction = action", "def make_step(self, action_index):\n # Randomly sample action_index if world is stochastic\n if np.random.uniform(0, 1) < self.random_move_probability:\n action_indices = np.arange(self.num_actions, dtype=int)\n action_indices = np.delete(action_indices, action_index)\n action_index = np.random.choice(action_indices, 1)[0]\n\n action = self.actions[action_index]\n\n # Determine new position and check whether the agent hits a wall.\n old_position = self.agent_position\n new_position = self.agent_position\n if action == \"UP\":\n candidate_position = old_position + self.num_cols\n if candidate_position < self.num_fields:\n new_position = candidate_position\n elif action == \"RIGHT\":\n candidate_position = old_position + 1\n if candidate_position % self.num_cols > 0: # The %-operator denotes \"modulo\"-division.\n new_position = candidate_position\n elif action == \"DOWN\":\n candidate_position = old_position - self.num_cols\n if candidate_position >= 0:\n new_position = candidate_position\n elif action == \"LEFT\": # \"LEFT\"\n candidate_position = old_position - 1\n if candidate_position % self.num_cols < self.num_cols - 1:\n new_position = candidate_position\n else:\n raise ValueError('Action was mis-specified!')\n\n # Update the environment state\n self.agent_position = new_position\n \n # Calculate reward\n reward = self.rewards[self.agent_position]\n reward -= 1\n return reward, new_position", "def perform_step(self, action):\n pass" ]
[ "0.78822833", "0.7701546", "0.74653417", "0.735205", "0.73112786", "0.7296633", "0.72683334", "0.72642654", "0.72118324", "0.71811354", "0.7162281", "0.71551687", "0.71234053", "0.7118485", "0.70683175", "0.6997027", "0.6996414", "0.699088", "0.6952764", "0.6945095", "0.6939528", "0.6913101", "0.69109845", "0.68902975", "0.6889493", "0.6849144", "0.6844795", "0.6794719", "0.67794347", "0.67557055", "0.6746276", "0.6744473", "0.67318475", "0.67211956", "0.67033035", "0.67009604", "0.6668397", "0.6667357", "0.6614929", "0.6614293", "0.6611076", "0.6589347", "0.658554", "0.6584596", "0.65840155", "0.6571545", "0.6560315", "0.65325266", "0.6499401", "0.648296", "0.64809316", "0.6478797", "0.64636064", "0.64452255", "0.6443007", "0.6429109", "0.64073884", "0.6387944", "0.6346282", "0.63398856", "0.63303745", "0.63221216", "0.6320005", "0.628055", "0.62735206", "0.6271264", "0.62662584", "0.6263516", "0.6263119", "0.62606966", "0.62561554", "0.62558955", "0.6253868", "0.6247598", "0.6224966", "0.62091815", "0.6203728", "0.62018174", "0.6192084", "0.6185942", "0.61664414", "0.61564064", "0.61564064", "0.6154217", "0.615042", "0.61267275", "0.6117984", "0.61089015", "0.6099026", "0.6096507", "0.60923266", "0.60876334", "0.6087335", "0.6086161", "0.6076458", "0.6075029", "0.6073301", "0.60476965", "0.60413754", "0.6027932" ]
0.70150334
15
Starts a new sequence, returns the first `TimeStep` of this sequence. See `reset(self)` docstring for more details
def _reset(self) -> ts.TimeStep:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self) -> ts.TimeStep:\n self._current_time_step = self._reset()\n return self._current_time_step", "def start(self):\n\t\tself._start = time.clock()\n\t\tif self._initial is None:\n\t\t\tself._initial = self._start\n\t\treturn self", "def get_first_step(self):\n return self.get_step_by_index(0)", "def step(self, action: types.NestedArray) -> ts.TimeStep:\n if self._current_time_step is None or self.should_reset(\n self._current_time_step\n ):\n return self.reset()\n\n self._current_time_step = self._step(action)\n return self._current_time_step", "def start(self, step=None):\n\n if step is None:\n while True:\n next_event = self._pop_next_event()\n if next_event:\n self.current_time = next_event.datetime\n next_event.call()\n else:\n break\n else:\n # TODO: this is not right...\n while True:\n run_to = self.current_time + step\n while True:\n next_event = self._pop_next_event(run_to)\n if next_event:\n next_event.call()\n else:\n break\n print \"{time} Simulation Finished\".format(time=self.current_time)", "def start(self):\n return self.reset(\n starting=1,\n stopped=0,\n )", "def reset(self):\n # Initialize the timestep\n self.timestep = 0\n self.state = self.starting_state\n\n if self.from_data:\n self.episode_num += 1\n\n\n return self.starting_state", "def set_first_machine_time_step(self, first_machine_time_step):", "def new_task(self):\n self.true_trajectory = self.simulate()\n self.x0 = self.true_trajectory[0]\n self.xT = self.true_trajectory[-1]\n return self.reset()", "def _reset(self):\n self.spot_supervisor.reset()\n return ts.TimeStep(ts.StepType.FIRST, np.float32(0.0), DISCOUNT,\n np.zeros(23, dtype=np.float32))", "def start(self):\n return self.trial.start + timedelta(seconds=self.start_checkpoint)", "def before(self, time: float) -> 'Trajectory':\n return self.split(time)[0]", "def next(self):\n steps = self.context.get('process.steps', [])\n\n if len(steps) < 1:\n return None\n\n if self._index is None:\n self._index = 0\n elif self._index < len(steps)-1:\n self._index += 1\n\n return Step(attributes=steps[self._index], index=self._index)", "def current_time_step(self) -> ts.TimeStep:\n return self._current_time_step", "def StartTimer(self):\n self._start_time = time.time()", "def first_loop_start(self) -> int:\n return self.__first_loop_start", "def setStartTime(self, *args):\n return _osgAnimation.Animation_setStartTime(self, *args)", "def seek_to_start_time(self):\n return 0", "def go_to_start(self):\n self.go_to(0)", "def getStartTime(self):\n return _osgAnimation.MatrixLinearSampler_getStartTime(self)", "def reset(self):\n self._timestep = np.array([0])", "def next(self):\n last_time = self.next_time\n current_time = time.time()\n delta = last_time + self.interval - current_time\n\n if last_time > current_time + self.interval:\n # Clock appears to have moved backwards. Reset\n # the timer to avoid waiting for the clock to\n # catch up to whatever time it was previously.\n self.next_time = current_time + self.interval\n elif delta < 0 and abs(delta) > self.interval * self.max_catchup:\n # Execution of jobs is too far behind. Give up on\n # trying to catch up and reset the time, so that\n # will only be repeated a maximum of\n # self.max_catchup times.\n self.next_time = current_time - \\\n self.interval * self.max_catchup\n else:\n self.next_time = last_time + self.interval\n\n return self", "def startNextAnim(self):\n self.notify.debug(\"startNextAnim self.okToStartNextAnim=%s\" % self.okToStartNextAnim)\n #import pdb; pdb.set_trace()\n self.curIval = None\n if self.okToStartNextAnim:\n self.notify.debug(\"got pass okToStartNextAnim\")\n whichAnim = self.chooseAnimToRun()\n self.notify.debug(\"whichAnim=%s\" % whichAnim)\n self.lastPlayingAnimPhase = whichAnim # merely for debugging\n self.curIval = self.createAnimSequence(whichAnim)\n self.notify.debug(\"starting curIval of length %s\" % self.curIval.getDuration())\n self.curIval.start()\n else:\n self.notify.debug(\"false self.okToStartNextAnim=%s\" %self.okToStartNextAnim)", "def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tstartticks = self._startticks if self.startticks else _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = startticks\n\t\t\tanim.start()\n\t\t\tstartticks += anim.duration\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def start(self):\n try:\n return self.index[0]\n except:\n pass", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tif not self._startticks:\n\t\t\tself._startticks = _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = self._startticks\n\t\t\tanim.start()\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def __next__(self):\n temp = timescale()\n try:\n temp.MJD = np.atleast_1d(self.MJD)[self.__index__].copy()\n except IndexError as exc:\n raise StopIteration from exc\n # add to index\n self.__index__ += 1\n return temp", "def start_record_trajectory(self):\r\n return self._arm.start_record_trajectory()", "def step(self):\n try:\n return next(self.generator)\n except StopIteration:\n return None", "def start(self):\r\n return self.schedule()", "def reset(self):\n self.state = self.start\n return self.start", "def RespStart(builder):\n return Start(builder)", "def begin(cls, timer=None):\r\n t = Timer(timer)\r\n try:\r\n yield t\r\n finally:\r\n t.finish()", "def getStartTime(self):\n return _osgAnimation.Vec3LinearSampler_getStartTime(self)", "def getStartTime(self):\n return _osgAnimation.Vec2LinearSampler_getStartTime(self)", "def reset(self, setup=False):\n self._done = False\n self._nbSteps = 0\n\n x = None\n if (self.startPosX == 'random' and setup) or (\n self.startPosX == 'episodeRandom'):\n x = random.randint(0, self._width - 1)\n elif (self.startPosX == 'random' and not setup):\n x = self._initState[0]\n elif self.startPosX == 'center':\n x = self._width - 1\n else:\n x = int(self.startPosX)\n\n y = None\n if (self.startPosX == 'random' and setup) or (\n self.startPosX == 'episodeRandom'):\n y = random.randint(0, self._height - 1)\n elif (self.startPosY == 'random' and not setup):\n y = self._initState[1]\n elif self.startPosX == 'center':\n y = self._height - 1\n else:\n y = int(self.startPosX)\n\n self._currentPos = (x, y)\n self._trajectory = [(x, y)]\n\n return (x, y)", "def start_timer(self):\n self.start_time = time.time()", "def getStartTime(self):\n return _osgAnimation.Vec4LinearSampler_getStartTime(self)", "def next_step(self):\n logging.debug(u\"Moving to next step\")\n\n if not self.steps or len(self.steps) < 1:\n logging.debug(u\"- no steps have ben set\")\n return None\n\n index = self.get('_index')\n\n if index is None:\n index = 0\n elif index < len(self.steps)-1:\n index += 1\n else:\n logging.debug(u\"- all steps have ben consumed\")\n return None\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- triggering step #{}\".format(index+1))\n self.set('_index', index)\n step = self.steps[index]\n step.trigger(bot=self.bot)\n return step", "def getStartTime(self):\n return _osgAnimation.Animation_getStartTime(self)", "def get_first_task(self):\n return self.get_first_step().get_last_task()", "def take_step(self):\n choices_of_steps = [(0,1), (1,0), (0,-1), (-1,0)]\n return random.choices(choices_of_steps)[0]", "def first(self) -> Task:\n return self._tasks[0]", "def step(self, time):\n raise \"use method step of class ReactorNet\" \n #return _cantera.reactor_step(self.__reactor_id, time) ", "def reset_next_step(self):\n return self._reset_next_step", "def _setup_next_sequence(cls):\n return 0", "def step(self):\n\n e = self.event_queue.get()\n self.current_time = e.time\n component = e.component\n component.output(self.current_time)\n component.input(self.current_time)\n component.fire()\n\n self.event_queue.put(VirtualTimeScheduler.Event(self.current_time + component.interval, component))\n\n return self.current_time", "def __call__ (self, t):\n #if t <= self.last_t:\n #raise SpaceTimeContinuumError(\n #\"We're moving back in time! Last t = {}, now = {}\".format(\n #self.last_t, t))\n\n #samp = self._sample(t)\n #self.last_t = t\n #self.last_samp = samp\n #return samp\n pass", "def start(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"start\")", "def setStartTime(self, t0):\n self._simulator_.update(t0=t0)\n return", "def start(self) -> pos.Pos:\n return self.__start", "def first(seq):\n try: # try iterator interface\n return seq.next()\n except AttributeError:\n pass\n try: # seq is no iterator, try indexed lookup\n return seq[0]\n except IndexError:\n pass\n raise TypeError(\n \"Argument to `first()` method needs to be iterator or sequence.\")", "def start_pose():\n global start_pose\n while start_pose is None:\n pass\n return start_pose", "def first(seq):\n return next(iter(seq))", "def __pos__(self):\n ts = self._fsm.get(self._id)\n return SMTimeSeries(ts._time, ts._value, self._fsm)", "def start(self):\r\n self.start_time = time.time()", "def first_order_posint(self, timestep):\n self.prev_pos = self.position\n self.position = self.position + (self.velocity * timestep)", "def start_transition(self, next=None):\n if next is not None:\n self._playlist.set_next_preset_by_name(next)\n\n self._in_transition = True\n self._start_transition = True\n self._elapsed = 0.0\n self.transition_starting.emit()", "def resume(self) -> None:\n if not self.started:\n TimerError(\"A timer should be started before to be resumed\")\n super().start()", "def start(self):\n moment = self.tz_match(self.moment)\n\n delta_to_start = timedelta(minutes=moment.minute % self.freq_minutes,\n seconds=moment.second,\n microseconds=moment.microsecond)\n\n start = moment - delta_to_start\n return start", "def get_time_step(self):\n return self._time_step", "def getStartTime(self):\n return _osgAnimation.QuatSphericalLinearSampler_getStartTime(self)", "def start(self):\n\t\tif self.__start_time is not None:\n\t\t\traise TimerError(f\"Timer is running. Use .stop() to stop the timer.\")\n\n\t\tself.__start_time = time.perf_counter()", "def t0(self):\n return self._time_axis.start", "def step(self, time):\n return _cantera.reactornet_step(self.__reactornet_id, time)", "def onTimeStepStart(self, timeStep):\n self.queuedInTimeStep = 0\n self.dequeuedInTimeStep = 0\n \n pass", "def min_time(self):\n #{{{ function to return time of first sample\n\n return self.mintime", "def first_tick(self, time):\n pass", "def load_first_ts_after(self, time):\n\n # get time step list\n df_ts = self.doc.c.sim.df.time_steps()\n \n if type(time) in [float, int]:\n if len(df_ts[df_ts.simulation_time > time]) == 0:\n raise RuntimeError(\"{} contains no timestep after {} d\".format(self.doc.c.original_filename, time))\n else:\n ts_no = int(df_ts[df_ts.simulation_time > time].reset_index().iloc[0].file_index)\n self.doc.loadTimeStep(ts_no)\n return df_ts[df_ts.simulation_time > time].reset_index().iloc[0]\n elif type(time) == datetime:\n if len(df_ts[df_ts.simulation_date>time])==0:\n raise RuntimeError(\"{} contains no timestep after {}\".format(self.doc.c.original_filename, time))\n else:\n ts_no = int(df_ts[df_ts.simulation_date > time].reset_index().iloc[0].file_index)\n self.doc.loadTimeStep(ts_no)\n return df_ts[df_ts.simulation_date > time].reset_index().iloc[0]\n else:\n raise ValueError(\"parameter 'time' must be of type float (simulation time in days) \")", "def start_timer(self):\n self.start_time = datetime.now()", "def getStartTime(self):\n return _osgAnimation.Channel_getStartTime(self)", "def initial_step(self, state, action):\n next_state = self.state_transition(state, action)\n env_action = random.choice(list(self.action_space(next_state)[1]))\n next_state = self.state_transition(next_state, env_action)\n return next_state", "def start(self):\n return self.__start", "def start(self):\n if self._start_time is not None:\n raise TimerError(\"Timer is running. Use stop() to stop it\")\n\n self._start_time = time.perf_counter()", "def time_step_spec(self) -> ts.TimeStep:\n return ts.time_step_spec(self.observation_spec(), self.reward_spec())", "def step ( self ) :\n return self.__step", "def start(self):\n self.start_time = time.time()", "def start_time(self):\n # TODO: use pd.Timestamp instead\n return self.time[0].to_pydatetime()", "def step(self):\n return self._step", "def get_next_batch_start(self):\n return None", "def set_start(self, ts):\n base_key = self.floor_time(ts)\n if self.first_timestamp is None or base_key < self.first_timestamp:\n self.first_timestamp = base_key", "def start(self):\n # type: () -> datetime\n return self._start", "def start(self) -> int:\n return self._start", "def start(self) -> int:\n return self._start", "def start(self) -> pdarray:\n return self._starts", "def start(self):\n# if self._start_time is not None:\n self._start_time = time.perf_counter()", "def initialize_simulator(self, startTime=None):\n \n # Load the inputs and check if any problem. If any exits.\n # Align inputs while loading.\n if not self.load_input(align = True):\n return False\n \n # Load the outputs and check if any problems. If any exits.\n if not self.load_outputs():\n return False\n \n # Take the time series: the first because now they are all the same (thanks to alignment)\n time = self.inputs[0].get_data_series().index\n \n # Define the initial time for the initialization\n if startTime == None:\n # Start time not specified, start from the beginning\n index = 0\n else:\n \n # Check that the type of start time is of type datetime\n if not isinstance(startTime, datetime.datetime):\n raise TypeError(\"The parameter startTime has to be of datetime.datetime type\")\n \n # Start time specified, start from the closest point\n if (startTime >= time[0]) and (startTime <= time[-1]):\n index = 0\n for t in time:\n if t < startTime:\n index += 1\n else:\n break\n else:\n index = 0\n raise IndexError(\"The value selected as initialization start time is outside the time frame\")\n \n # Once the index is know it can be used to define the start_time\n # If the offset is specified then use it as start time\n start_time = time[index]\n \n # Take all the data series\n Ninputs = len(self.inputs)\n start_input = numpy.zeros((1, Ninputs))\n start_input_1 = numpy.zeros((1, Ninputs))\n start_input_2 = numpy.zeros((1, Ninputs))\n i = 0\n if index == 0:\n for inp in self.inputs:\n dataInput = numpy.matrix(inp.get_data_series().values).reshape(-1,1)\n start_input[0, i] = dataInput[index,0]\n i += 1\n else:\n for inp in self.inputs:\n dataInput = numpy.matrix(inp.get_data_series().values).reshape(-1,1)\n start_input_1[0, i] = dataInput[index-1,0]\n start_input_2[0, i] = dataInput[index,0]\n \n # Linear interpolation between the two values\n dt0 = (time[index] - start_time).total_seconds()\n dT1 = (start_time - time[index-1]).total_seconds()\n DT = (time[index] - time[index-1]).total_seconds()\n \n # Perform the interpolation\n start_input[0, i] = (dt0*start_input_1[0, i] + dT1*start_input_2[0, i])/DT\n \n i += 1\n \n # Initialize the model for the simulation\n self.opts[\"initialize\"] = True\n \n try:\n # Simulate from the initial time to initial time + epsilon\n # thus we have 2 points\n \n # Create the input objects for the simulation that initializes\n input_u = numpy.hstack((start_input, start_input))\n input_u = input_u.reshape(2, -1)\n \n time = pd.DatetimeIndex([start_time, start_time])\n\n # Run the simulation, remember that\n # time has to be a dateteTimeIndex and Input has to be a numpy.matrix\n self.simulate(time=time, input=input_u)\n self.opts[\"initialize\"] = False\n \n # Initialize the selected variables and parameters to the values indicated \n # Done after very small simulation because there can be some internal parameters that defines\n # the initial value and may override the initialization with the indicated values\n # THIS DOESN'T WORK WITH MODELICA CONSTANTS!\n for v in self.variables:\n v.modify_initial_value_in_fmu(self.fmu)\n for p in self.parameters:\n p.modify_initial_value_in_fmu(self.fmu)\n \n return True\n \n except ValueError:\n logger.error(\"First simulation for initialize the model failed\")\n return False", "def start_run(self):\n return mlflow.start_run(\n run_id=self.run_id,\n experiment_id=self.experiment_id,\n run_name=self.run_name,\n nested=self.nested)", "def start(self):\n return self._start", "def start(self):\n return self._start", "def start(self):\n return self._start", "def start(self):\n return self._start", "def start(self):\n return self._start", "def __init__(self, start_time=None):\n if start_time is None:\n self.started = time.time()\n else:\n self.started = start_time", "def start(self):\n\t\treturn self._start", "def __nextRun(self, t1, t2):\n if self.t1==t1:\n # rerun from t1\n if self.t2!=t2:\n raise Exception(\"bad t2 (%f!=%f)\" % (t2, self.t2)) \n \n loader = fac.FacManager(self.metafor)\n nt = loader.lookForFile(self.nbFacs) #(0)\n loader.eraseAllFrom(nt)\n self.runOK = self.metafor.getTimeIntegration().restart(nt)\n else:\n # new time step\n tsm = self.metafor.getTimeStepManager()\n dt=t2-t1\n dtmax=dt\n tsm.setNextTime(t2, 1, dtmax) \n \n loader = fac.FacManager(self.metafor)\n nt1 = loader.lookForFile(self.nbFacs) #(0)\n nt2 = loader.lookForFile(self.nbFacs+1) #(1)\n if not self.saveAllFacs:\n loader.erase(nt1) # delete first fac\n self.runOK = self.metafor.getTimeIntegration().restart(nt2)\n if self.saveAllFacs:\n self.nbFacs+=1", "def time_step(self):\n return self._time_step", "def start(self) -> Vertex:\n return self._start", "def start_epoch(self):\n raise NotImplementedError" ]
[ "0.6659816", "0.64686", "0.61162376", "0.6072993", "0.60596496", "0.58604884", "0.56744826", "0.5647901", "0.5628664", "0.5623361", "0.56153256", "0.55805075", "0.5575825", "0.5544547", "0.54956526", "0.5452446", "0.5385656", "0.53372717", "0.53334737", "0.5326875", "0.5326443", "0.53258085", "0.5321996", "0.53214514", "0.5320048", "0.5317264", "0.53057706", "0.5291941", "0.5262118", "0.5258211", "0.52464443", "0.52440506", "0.5242724", "0.52323174", "0.521823", "0.52111644", "0.52083904", "0.5208232", "0.5194536", "0.5186112", "0.5183841", "0.5175628", "0.5163699", "0.51602775", "0.5157639", "0.5156871", "0.5153506", "0.51434803", "0.51422906", "0.5132119", "0.5131187", "0.51303834", "0.512412", "0.51196176", "0.5118226", "0.51146865", "0.51048005", "0.50964034", "0.5095496", "0.50954235", "0.50854105", "0.5077474", "0.50708294", "0.5062518", "0.50498277", "0.5040777", "0.50402147", "0.50400084", "0.5036319", "0.50323796", "0.5031477", "0.50241804", "0.5020389", "0.5018687", "0.50172174", "0.5015429", "0.5010081", "0.50090694", "0.50014466", "0.4999466", "0.4999081", "0.49967983", "0.4995986", "0.49893212", "0.49893212", "0.49831688", "0.49723807", "0.49721608", "0.4967185", "0.49595496", "0.49595496", "0.49595496", "0.49595496", "0.49595496", "0.4955533", "0.49447453", "0.49414524", "0.4927057", "0.49229914", "0.49175102" ]
0.5530532
14
This endpoint is used by the CLI to determines if the API is available or not.
def root(): return {}, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apicheck():\n\n async def predicate(ctx: commands.Context):\n travitia_keys = await ctx.bot.get_shared_api_tokens(\"travitia\")\n key = travitia_keys.get(\"api_key\") is None\n if ctx.invoked_with == \"help\" and key:\n return False\n if key:\n await ctx.send(\"The API key is not registered, the command is unavailable.\")\n return False\n return True\n\n return commands.check(predicate)", "def test_api() -> bool:\r\n weather = False\r\n news = False\r\n covid = False\r\n if check_weather_version():\r\n logging.info(\"Weather API version is up to date (check_weather_version())\")\r\n weather = True\r\n else:\r\n logging.info(\"Weather API version is not up to date (check_weather_version()) - ACTION REQUIRED\")\r\n if check_news_version():\r\n logging.info(\"News API version is up to date (check_news_version())\")\r\n news = True\r\n else:\r\n logging.info(\"News API version is not up to date (check_news_version()) - ACTION REQUIRED\")\r\n if check_covid_version():\r\n logging.info(\"Covid-19 API version is up to date (check_covid_version())\")\r\n covid = True\r\n else:\r\n logging.info(\"Covid-19 API version is not up to date (check_covid_version()) - ACTION REQUIRED\")\r\n return bool(weather and news and covid)", "async def _check_api(self) -> None:\n await self._api_request(\"devices\")", "def have_api(self, *apis):\n if not all(apis):\n logger.log('DEBUG', f'{self.source} module is not configured')\n return False\n return True", "def ping_missing_api(request):\r\n return _api_response(request, {\r\n 'success': False,\r\n 'message': 'The API url should be /api/v1'\r\n })", "def api(self) -> Optional[pulumi.Input['ApplicationApiArgs']]:\n return pulumi.get(self, \"api\")", "def api(self) -> Optional[pulumi.Input['ApplicationApiArgs']]:\n return pulumi.get(self, \"api\")", "def __nonzero__(self):\n return self.has_apicalls", "def api_id():\n if 'port' in request.args:\n port = int(request.args['port'])\n else:\n return \"Error: No port field provided. Please specify a port.\"\n\n if 'name' in request.args:\n name = request.args['name']\n else:\n return \"Error: No name field provided. Please specify a name.\"\n\n result = is_pvserver_available(name, port)\n return jsonify(result)", "def available(self) -> bool:\n return self._api.available", "def probe_api():\n\n info = loads(get(url).text)\n return info", "def apiName(self, name):\n return self.genOpts.conventions.is_api_name(name)", "def check_api(self):\n catalog = self.service_catalog\n for service in catalog:\n if service['name'] not in self.RESOURCE_MAP:\n self.logger.notice(\"Don't know how to check service '%s'\" %\n service['name'])\n status = self.UNKNOWN\n else:\n r = self.get(service['name'],\n self.RESOURCE_MAP[service['name']])\n if not r or r.status_code < 200 or r.status_code > 299:\n status = self.FAIL\n else:\n status = self.OK\n\n yield {\n 'service': service['name'],\n 'status': status,\n 'region': service['region']\n }", "def test_api_ping_failed_missing_api(self):\r\n res = self.testapp.get('/ping?api_key=' + API_KEY,\r\n status=200)\r\n ping = json.loads(res.body)\r\n\r\n self.assertTrue(not ping['success'])\r\n self.assertEqual(ping['message'], \"The API url should be /api/v1\")\r\n self._check_cors_headers(res)", "def api(self) -> pulumi.Output[Optional['outputs.ApplicationApi']]:\n return pulumi.get(self, \"api\")", "def use_in_api_documentation(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"use_in_api_documentation\")", "def test_basic_api_with_mode(self):\n # Create an API with get and put\n self.create_and_verify_stack(\"single/basic_api_with_mode\")\n\n stack_output = self.get_stack_outputs()\n api_endpoint = stack_output.get(\"ApiEndpoint\")\n\n self.verify_get_request_response(f\"{api_endpoint}/get\", 200)\n\n # Removes get from the API\n self.update_and_verify_stack(file_path=\"single/basic_api_with_mode_update\")\n\n # API Gateway by default returns 403 if a path do not exist\n self.verify_get_request_response.retry_with(\n stop=stop_after_attempt(20),\n wait=wait_exponential(multiplier=1, min=4, max=10) + wait_random(0, 1),\n retry=retry_if_exception_type(StatusCodeError),\n after=after_log(LOG, logging.WARNING),\n reraise=True,\n )(self, f\"{api_endpoint}/get\", 403)\n\n LOG.log(msg=f\"retry times {self.verify_get_request_response.retry.statistics}\", level=logging.WARNING)", "async def test_api_status(hass: HomeAssistant, mock_api_client: TestClient) -> None:\n resp = await mock_api_client.get(\"/api/\")\n assert resp.status == HTTPStatus.OK\n json = await resp.json()\n assert json[\"message\"] == \"API running.\"", "def __get_status_api(self):\r\n try:\r\n return Call_shelly_api(url=self.__api_address + \"/status\")\r\n except ShellyException as err:\r\n _LOGGER.warning(err)", "def test_all_endpoint_status():\n r = client.get('/openapi.json')\n assert r.status_code == 200\n for e in r.json()['paths'].keys():\n r = client.get(e)\n assert r.status_code == 200\n\n for e in ['plot']:\n r = client.get(e)\n assert r.status_code == 200", "def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False", "def is_valid_api(self, url):\n output = self.api.download_is_valid_api_url(url, non_blocking=False)\n error = ''\n if not output:\n error = 'Invalid Anaconda API url.'\n return output, error", "def test_open_api(self):\n response = self.client.get(self.initiatives_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def api_get(self, name):\n try:\n r = self._get(['apis', name])\n except requests.HTTPError:\n return None\n else:\n return r", "def check_eapi(self, eapi):\n\t\treturn True", "def _get_api():\n return os.environ.get(\"MAPBOX_API\", \"https://api.mapbox.com\")", "def test_api_lookup(self):\n\n # Set up the url for the api call\n\n expected_url = 'https://www.gov.uk/api/content{}'.format(self.urlsclass.dedupurls[0])\n\n # Make request and extract json.\n\n expected = requests.get(expected_url).json()\n\n assert api_lookup(self.urlsclass.dedupurls[0], 'https://www.gov.uk/api/content') == expected", "def use_in_api_documentation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_in_api_documentation\")", "async def api_healthcheck(self) -> Optional[Exception]:\n try:\n await self._client.get(\"/health\")\n return None\n except Exception as exc:\n return exc", "def test_get_api_resources(self):\n pass", "def test_service_api_get(service_app):\n response = service_app.get('/')\n assert response.headers['Content-Type'] == 'application/json'\n assert response.status_code == 200\n assert json.loads(response.data) == {'description': 'service is up', 'status': 200}", "def check_schema_existence_api_call(context, schema, version):\n check_schema_existence(context, schema, version, \"api\")", "def running_gemini_api(context):\n return context.is_gemini_api_running", "def test_format_price_api_url_exists(self):\n self.assertIsNotNone(format_price_api_url)", "def test_get_api(self):\n # Get metadata list\n _logger.info('Get sequencerun API')\n response = self.client.get('/sequencerun/')\n self.assertEqual(response.status_code, 200, 'Ok status response is expected')\n\n _logger.info('Check if API return result')\n result_response = response.data['results']\n self.assertGreater(len(result_response), 0, 'A result is expected')\n\n _logger.info('Check if unique data has a single entry')\n response = self.client.get('/sequencerun/?msg_attr_action=statuschanged')\n results_response = response.data['results']\n self.assertEqual(len(results_response), 1, 'Single result is expected for unique data')\n\n _logger.info('Check Invalid keyword')\n response = self.client.get('/sequencerun/?foo=bar')\n results_response = response.data['results']\n self.assertEqual(len(results_response), 0, 'No results are expected for unrecognized query parameter')", "def test_api_base(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url()))\n j = r.json()\n self.assertIn('gages', j)\n self.assertIn('sections', j)\n self.assertIn('regions', j)\n self.assertIn('rivers', j)\n self.assertIn('sensors', j)\n self.assertIn('samples', j)", "def api_check_status():\n (success, status) = AppStatus.check_status()\n if success: return jsonify({\"success\": True})\n abort(503, status)", "def is_available(self):\n return bool(FileUtil(\"curl\").find_exec())", "def status_api(request):\n if request.method == 'GET':\n return JsonResponse({\n 'status': 'OK',\n 'version': __version__\n }, status=200)", "def test_00_api_get(self):\r\n # GET as Anonymous\r\n url = '/api/'\r\n action = 'get'\r\n self.check_limit(url, action, 'app')", "def _query_api(\n master_url=settings.OPENSHIFT_API['NP']['OPENSHIFT_MASTER'],\n api_token=settings.OPENSHIFT_API['NP']['API_TOKEN'],\n endpoint='/oapi/v1/buildconfigs'):\n\n openshift_api_url = 'https://' + master_url\n openshift_api_get_endpoint = openshift_api_url + endpoint\n bearer_token_header = {'Authorization': 'Bearer ' + api_token }\n\n try:\n response = requests.get(openshift_api_get_endpoint,headers=bearer_token_header, timeout=2.0)\n except requests.ConnectTimeout as e:\n logger.error(e)\n return None\n except requests.ConnectionError as e:\n logger.error(e)\n return None\n\n if not response.ok:\n logger.error(response.status_code)\n return None\n else:\n return response", "def is_api_method(obj, name):\n try:\n method = getattr(obj, name)\n except AttributeError:\n return False\n return (ismethod(method) and hasattr(method, \"__api_call\"))", "def test_service_status(self, api_instance):\n params = api_instance.get_service_status()\n # Only key we care about here is GetServiceStatus\n assert params[\"Action\"] == \"GetServiceStatus\"", "def test_GET_call_api_and_return_200Ok(client):\n\n url = '/api/v1/calls/'\n\n response = client.get(url)\n\n assert response.status_code == status.HTTP_200_OK", "def is_available():", "def test_endpoint_status(self) -> None:\n status = self.client.endpoint_status\n self.assertIsInstance(status, dict)", "def test_view_url_exists_api_alerts(self):\n response = self.client.get('/api/alerts/')\n self.assertEqual(response.status_code, 200)", "def test_GET(self):\n if not self.url:\n return\n response = self.client.get(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])", "def api_health(self):\n return messages.SUCCESS_JSON, 200", "def get_api(self):\n return self.api", "def _running_locally(coreapi_url, jobs_api_url):\n return not (coreapi_url and jobs_api_url)", "def check_status():\n try:\n return HTTPClient().fetch(\"https://api.random.org/\").code == 200\n except Exception: # pylint: disable=broad-except\n return False", "def api():\n\treturn \"The API call\"", "def health_check():\n return dict(api_status='OK')", "def test_health_endpoint(self):\n url = f\"{BASE_URL}/health\"\n response = requests.get(url)\n response_json = response.json()\n assert response.status_code == 200\n assert response_json['status'] == 200", "def _get_installed_api() -> Optional[str]:\n # Fix [AttributeError: module 'importlib' has no attribute 'util']\n # See https://stackoverflow.com/a/39661116/13452582\n from importlib import util\n\n for api in _API_LIST:\n if util.find_spec(api) is not None:\n return api\n return None", "def test_root_public_api(self):\n\n # GIVEN public API\n # WHEN fetching available applications and models\n response = self.api.root_api(public=True)\n\n # THEN it should succeed\n self.assertTrue(response.success)\n\n # AND it should have valid data\n self.assertEqual(len(response.data), 2)\n self.assertEqual(response.data[0]['app_label'], 'auth')\n self.assertEqual(response.data[1]['app_label'], 'admin')", "def fusion_api_get_appliance_status(self, api=None, headers=None):\n return self.info.get_status(api=api, headers=headers)", "def check_configuration_server(self) -> bool:\n return (\n self.container is not None\n and self.container.exec_run(\n \"bash -c 'curl -s --head http://localhost:19071/ApplicationStatus'\"\n )\n .output.decode(\"utf-8\")\n .split(\"\\r\\n\")[0]\n == \"HTTP/1.1 200 OK\"\n )", "def api_endpoint():\n return 'localhost'", "def test_status_ok(api_client):\n response = api_client.get()\n assert response.ok", "def available(self) -> bool:\n return self._tm_client.api.available", "def api_call():\n\tresponse = requests.get(URL_API)\n\treturn response", "def test_root_api(self):\n\n # GIVEN API\n\n # WHEN fetching available applications and models\n response = self.api.root_api()\n\n # THEN it should succeed\n self.assertTrue(response.success)\n\n # AND it should have valid data\n for item in response.data:\n self.assertEqual(len(item.keys()), 3)\n self.assertEqual(set(item.keys()), set(['model', 'actions', 'app_label']))\n\n # AND it contains also UI application models\n self.assertTrue(any('test' in d['app_label'] for d in response.data))\n\n # AND public applications are also available\n data = [item for item in response.data if item['app_label'] == 'admin']\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0]['model'], None)\n self.assertEqual(len(data[0]['actions'].keys()), 2)", "def url_was_found(url=\"localhost:5000/health\"):\n res = requests.get(url).json()\n\n if res['status_code'] == 200:\n return True\n elif res['status_code'] == 404:\n return False\n else:\n raise UnexpectedResponseError(\"Expected 200 OK or 404, got {}.\\n\".format(res['status']), \"Full response : {}\".format(res))", "async def check_config(self) -> None:\n try:\n await self._check_api()\n except aiohttp.ClientError as e:\n raise ConnectionError(str(e))", "def api(self) -> str:", "def check_api_keys(self, request):\n app_id, api_obj = request.META.get(\"HTTP_APP_ID\"), None\n api_secret_key = request.META.get(\"HTTP_API_SECRET_KEY\")\n if app_id and api_secret_key:\n # validate app_id and api_secret_key\n app_id_bool = self._validate_app_id(app_id)\n if not app_id_bool:\n return False, self.app_id_message\n api_secret_key_bool = self._validate_api_secret_key(api_secret_key)\n if not api_secret_key:\n return False, self.api_secret_key_message\n try:\n api_obj = ApiApp.objects.get(app_id=app_id, api_secret_key=api_secret_key, active=True)\n if api_obj:\n self.app(request, api_obj)\n return True, ''\n except ApiApp.DoesNotExist:\n self.app(request, api_obj)\n return False, self.message\n else:\n self.app(request, api_obj)\n return False, self.message", "def test_anything_else_is_accessible(api_client):\n\n assert api_client().get(\"/anything/else\").status_code == 200", "def test_api_versioning(self):\n response = self.request_knox(\n self.url,\n media_type=views_api.CORE_API_MEDIA_TYPE,\n version=views_api.CORE_API_DEFAULT_VERSION,\n )\n self.assertEqual(response.status_code, 200)", "def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.stats, [\"query\"], parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output", "def __virtual__():\n if _apikey():\n return True\n return (\n False,\n 'The API key was not specified. Please specify it using the \"apikey\" config.',\n )", "def ping():\n \"\"\"Get the estimator object for this instance, loading it if it's not already loaded.\"\"\"\n checker = os.listdir('/opt/ml')\n health = checker is not None # health check here\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "def available(self) -> bool:\n if self._coordinator and not self._coordinator.last_update_success:\n return False\n return self.rest.data is not None", "def meraki_api_enable():\n token = str(os.environ[\"MERAKI_API_KEY\"])\n if token in [\"\"]:\n logger.warning('API Key for Meraki is missing. check ENV')\n return False\n else:\n return True", "def test_get_status(self):\n resp = self.build_api.getStatus().json()\n assert 'status' in resp\n assert 'message' in resp", "def test_api_version(self):\n from supvisors.rpcinterface import API_VERSION, RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertEqual(API_VERSION, rpc.get_api_version())", "def running(self):\n\n return can_connect_to(APIConsumer.host, APIConsumer.port)", "def test_api(test_name, endpoint, method, body, expected_response, expected_status_code, validation, params):\n response = None\n with allure.step(' '.join(['getting API response on endpoint:', str(endpoint)])):\n response = APIRequestor().request(method=method, url_path=endpoint, body=body, params=params)\n with allure.step(' '.join(['Asserting API status code expected:', str(expected_status_code), ', with response:', str(response.status_code)])):\n Compare.equal.__call__(a=expected_status_code, b=response.status_code, free_text=f\"Status code is not as expected: {response.status_code} instead of expected: {expected_status_code}\")\n with allure.step('starting API validation'):\n validation = 'equal' if not validation else validation\n with allure.step(' '.join(['Validation with method:', str(validation)])):\n Compare.__dict__[validation](a=str(response), b=str(expected_response),\n free_text=f\"Failed to compare, Response is not as expected: {response} instead of {expected_response}\")", "def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def check_availability(self):\n pass", "def check_api_access(info):\n\n try:\n file = open(PATH + '/../DB/access.json', 'r')\n accessData = json.load(file)\n except:\n return False\n\n try:\n application = info['application_name']\n applicationData = accessData.get(application)\n\n if applicationData is None:\n return False\n\n timestamp = applicationData[\"timestamp\"]\n if info['timestamp'] == timestamp:\n return True \n return False\n except:\n return False", "def test_59_help_api(self):\r\n Fixtures.create()\r\n url = \"/help/api\"\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"There should be a help api page\"\r\n assert \"API Help\" in res.data, err_msg", "def CheckRapi(options, args):\n if args: # rapi doesn't take any arguments\n print(\"Usage: %s [-f] [-d] [-p port] [-b ADDRESS]\" %\n sys.argv[0], file=sys.stderr)\n sys.exit(constants.EXIT_FAILURE)\n\n if options.max_clients < 1:\n print(\"%s --max-clients argument must be >= 1\" %\n sys.argv[0], file=sys.stderr)\n sys.exit(constants.EXIT_FAILURE)\n\n ssconf.CheckMaster(options.debug)\n\n # Read SSL certificate (this is a little hackish to read the cert as root)\n if options.ssl:\n options.ssl_params = http.HttpSslParams(ssl_key_path=options.ssl_key,\n ssl_cert_path=options.ssl_cert,\n ssl_chain_path=options.ssl_chain)\n else:\n options.ssl_params = None", "def fusion_api_check_authorization(self, body=None, api=None, headers=None, sessionID=None):\n return self.auth.check(body=body, api=api, headers=headers, sessionID=sessionID)", "def test_basic_api(self):\n self.create_and_verify_stack(\"single/basic_api\")\n\n first_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(first_dep_ids), 1)\n\n self.set_template_resource_property(\"MyApi\", \"DefinitionUri\", self.get_s3_uri(\"swagger2.json\"))\n self.update_stack()\n\n second_dep_ids = self.get_stack_deployment_ids()\n self.assertEqual(len(second_dep_ids), 1)\n\n self.assertEqual(len(set(first_dep_ids).intersection(second_dep_ids)), 0)", "def noauth(self):\n try:\n # some endpoints dont return json\n return self.json['response'].get('error_id') == 'NOAUTH'\n except:\n return False", "def available(self):\n raise ClixxException(\"Not implemented.\")", "def is_gappa_installed():\n dev_null = open(\"/dev/null\", \"w\")\n gappa_test = subprocess.call(\"gappa --help 2> /dev/null\", shell=True)\n return (gappa_test == 0)", "def test_is_system(self):\n\n url = '/%s/job-types/?is_system=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 5)\n\n url = '/%s/job-types/?is_system=true' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)", "def check(self):\n try:\n response = requests.head(self.url)\n except requests.exceptions.RequestException:\n return False, \"darkRed\", \"🛑 Connection Error\"\n return self._status_is_okay(response.status_code)", "def check_restconf(address):\n\n # RESTCONF enabled device's address and default entry level\n restconf_api = 'http://' + address + '/api'\n\n # Parameter passed during the call\n params = dict(verbose='')\n\n # Necessary headers to make an API call\n headers = dict(accept='application/vnd.yang.api+json')\n\n # Actual REST call\n restconf_response = requests.get(restconf_api, headers=headers,\n auth=(RC_USER, RC_PASS),\n params=params, timeout=5.0)\n return restconf_response.ok", "def test_request(self):\n self.assertIn('list', self.api.request('sys.settings.get').data,\n msg=\"request() doesn't work properly. 'list' is not found in the response\")", "def ping():\n health = ScoringService.get_model() is not None # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "def api_endpoint(self) -> str:\n return pulumi.get(self, \"api_endpoint\")", "def test_api_404(self):\n r = requests.get('{server}/api/0.1/sam'.format(\n server=self.get_server_url()),\n headers={'accept': 'application/json'})\n self.assertEquals(404, r.status_code)\n self.assertIn('error', r.json())", "def api(self):\r\n return self._api", "def test_is_active(self):\n\n url = '/%s/job-types/?is_active=false' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 2)", "def get(self, version):\n version_found = False\n api_spec = self._create_api_spec(version)\n for base_api in current_app.appbuilder.baseviews:\n if isinstance(base_api, BaseApi) and base_api.version == version:\n base_api.add_api_spec(api_spec)\n version_found = True\n if version_found:\n return self.response(200, **api_spec.to_dict())\n else:\n return self.response_404()", "def test_simple_request(self):\n urls = [\"https://api.omniture.com/admin/1.4/rest/\",\n \"https://api2.omniture.com/admin/1.4/rest/\",\n \"https://api3.omniture.com/admin/1.4/rest/\",\n \"https://api4.omniture.com/admin/1.4/rest/\",\n \"https://api5.omniture.com/admin/1.4/rest/\"]\n self.assertIn(self.analytics.request('Company', 'GetEndpoint'),urls, \"Company.GetEndpoint failed\" )", "def test_ping(self):\n response = self.client.get(reverse(\"api_hello:ping\"))\n self.assertTrue(response.json()[\"status\"])" ]
[ "0.7066539", "0.6811271", "0.6799337", "0.6644127", "0.65873927", "0.6556331", "0.6556331", "0.6400296", "0.6316991", "0.62912995", "0.6280422", "0.6267303", "0.62298983", "0.6227067", "0.61904055", "0.61815757", "0.6173037", "0.6171916", "0.61550844", "0.6096815", "0.60944444", "0.6089251", "0.60829616", "0.60556805", "0.60388774", "0.6016431", "0.5992117", "0.59852874", "0.5972008", "0.59557337", "0.59555626", "0.59509546", "0.59467345", "0.59316367", "0.5921116", "0.5916567", "0.59109735", "0.58961093", "0.58913714", "0.5890519", "0.58897614", "0.5883771", "0.58836704", "0.587526", "0.5864794", "0.5852881", "0.58489186", "0.5836844", "0.58330697", "0.5821847", "0.5802467", "0.5800348", "0.5771192", "0.5767018", "0.5748971", "0.5744335", "0.5742079", "0.5732026", "0.57034", "0.57026774", "0.5695279", "0.5693809", "0.5685275", "0.5676303", "0.5670562", "0.5658785", "0.5649533", "0.5649323", "0.56471264", "0.5641847", "0.56141895", "0.56132954", "0.56079197", "0.55904764", "0.55857253", "0.5580589", "0.55682284", "0.55634725", "0.5560062", "0.55563134", "0.5546911", "0.5545295", "0.55445415", "0.55364376", "0.5535787", "0.55356026", "0.55327165", "0.55269945", "0.5525607", "0.55195826", "0.55170393", "0.55170006", "0.55087984", "0.5505156", "0.55033517", "0.550324", "0.5502138", "0.5501551", "0.550075", "0.5499853", "0.5494534" ]
0.0
-1
Check whether an access token is blacklisted or not.
def check_token_in_blacklist(decrypted_token): from .models import BlacklistToken jti = decrypted_token['jti'] if BlacklistToken.check_blacklist(jti): raise InvalidToken("Token is blacklisted. Please log in again.") return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_blacklisted(token):\n if Revoked.query.filter_by(token=token).first():\n return True\n return False", "def check_if_token_in_blacklist(decrypted_token):\n return (\n decrypted_token[\"jti\"] in BLACKLIST\n ) # if True, go to revoked_token_callback", "def check_if_token_in_blacklist(decrypted_token):\n jti = decrypted_token['jti']\n return model.revoked_token.RevokedToken.is_blacklisted(jti)", "def check_blacklisted_token(token):\n token = models.TokenBlackList.query.filter_by(token=token).first()\n if token:\n return True\n return False", "def check_if_token_in_blacklist(decrypted_token):\n jti = decrypted_token['jti']\n return RevokedTokenModel.is_jti_blacklisted(jti)", "def check_blacklist(auth_token):\n token = BlacklistToken.query.filter_by(token=str(auth_token)).first()\n if token:\n return True\n\n return False", "def is_blacklisted(self):\r\n \r\n in_blacklist = False \r\n if self.chrompos in parser.blacklist:\r\n in_blacklist = True\r\n \r\n return in_blacklist", "def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))", "async def check_for_blacklist(ctx):\n if ctx.guild is None:\n # raise commands.NoPrivateMessage\n return True\n return db.is_blacklisted(ctx)", "def blacklist_token(token):\n\n refresh_token = RefreshToken(token)\n refresh_token.blacklist()", "def is_blacklisted(self, url):\n return urlparse.urlsplit(url).netloc in URLFinder.BLACKLIST_DOMAINS", "def verify_token(auth_token):\n blacklisted_token = TokenBlacklisting.query.filter_by(\n token=str(auth_token)).first()\n if blacklisted_token:\n return True\n return False", "def is_blacklisted(self, fkey):\n return fkey in self.blacklist", "def available(self):\n return self.access_token is not None", "def is_blacklisted(\n self, request, credentials: dict = None\n ) -> bool: # pylint: disable=unused-argument\n\n if is_client_ip_address_blacklisted(request):\n return True\n\n return False", "def is_blacklisted(cls, msg):\n return is_blacklisted(msg.fields.get('from_addr'))", "def is_whitelisted(self, instance_id):\n return self.is_tagged(instance_id, 'Whitelisted')", "async def check_access_token(self, token):\n async with self._session.get(\n 'https://eu.battle.net/oauth/check_token',\n params={'token': token}) as resp:\n self.request_count += 1\n valid = resp.status == 200\n if valid:\n json = await resp.json()\n exp = datetime.fromtimestamp(json['exp'])\n valid = valid and exp - datetime.now() >= timedelta(hours=1)\n self._access_token_checked = valid\n return self._access_token_checked", "def is_whitelisted(self, instance_id):\n item = self.get_whitelist_instance(instance_id)\n if item is None:\n return False\n else:\n return True", "def is_forbidden(self, request):\n return common.get_extension(str(request.url().toString())) in self.banned_extensions", "def is_token_revoked(decoded_token):\n jti = decoded_token['jti']\n token = BlacklistedToken.query.filter_by(jti=jti).first()\n return token is not None", "def is_whitelisted(self, fkey):\n return fkey in self.whitelist", "def _is_blacklisted_user(email):\n blacklisted_user_emails = (db_config.get_value('blacklisted_users') or\n '').splitlines()\n return any(\n utils.emails_equal(email, blacklisted_user_email)\n for blacklisted_user_email in blacklisted_user_emails)", "def auth_allowed(self, response, details):\n emails = self.setting('WHITELISTED_EMAILS', [])\n domains = self.setting('WHITELISTED_DOMAINS', [])\n teams = self.setting('WHITELISTED_TEAM_NAMES', [])\n team = details.get('team_name')\n email = details.get('email')\n allowed = True\n if email and (emails or domains):\n domain = email.split('@', 1)[1]\n allowed = email in emails or domain in domains\n if allowed and team and teams:\n allowed = team in teams\n return allowed", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403", "def is_blacklisted_username(username):\n settings = api.config.get_settings()\n return username in settings.get(\n \"username_blacklist\", api.config.default_settings[\"username_blacklist\"]\n )", "def check_token_invalidate(self, token):\n payload = {'key': self._lr_object._get_api_key(), 'secret': self._lr_object._get_api_secret(), 'access_token': token}\n url = SECURE_API_URL + \"api/v2/access_token/invalidate/\"\n return self._lr_object._get_json(url, payload)", "def checkLogin():\n if 'access_token' in login_session:\n return True\n else:\n return False", "def check_token_validate(self, token):\n payload = {'key': self._lr_object._get_api_key(), 'secret': self._lr_object._get_api_secret(), 'access_token': token}\n url = SECURE_API_URL + \"api/v2/access_token/Validate/\"\n return self._lr_object._get_json(url, payload)", "def test_cannot_view_all_users_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def IsWhitelistedClientId(client_id):\n return client_id in constants.WHITELISTED_CLIENT_IDS", "def is_whitelisted(\n self, request, credentials: dict = None\n ) -> bool: # pylint: disable=unused-argument\n\n if is_user_attempt_whitelisted(request, credentials):\n return True\n\n if is_client_ip_address_whitelisted(request):\n return True\n\n if is_client_method_whitelisted(request):\n return True\n\n return False", "def _is_oauth_token_valid(token: dict, time_key=\"expires_on\") -> bool:\n if \"access_token\" not in token or token.get(\"token_type\", \"\") != \"Bearer\" or time_key not in token:\n raise AirflowException(f\"Can't get necessary data from OAuth token: {token}\")\n\n return int(token[time_key]) > (int(time.time()) + TOKEN_REFRESH_LEAD_TIME)", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/readiness'\n response = self.perform_get_request(endpoint)\n\n if response.status_code != 200:\n self.print_error_response(response, \"error\")\n return response.status_code == 200", "def is_valid(self):\n return self.access_token is not None \\\n and time.time() < self._expiration_timestamp", "def not_blacklisted_channel(blacklist):\n async def predicate(ctx):\n channel = ctx.message.channel\n server = bot.get_guild(SERVER_ID)\n for c in blacklist:\n if channel == discord.utils.get(server.text_channels, name=c):\n raise CommandNotAllowedInChannel(channel, \"Command was invoked in a blacklisted channel.\")\n return True\n \n return commands.check(predicate)", "def isTestCfgBlacklisted(self, asTestCfg):\n fBlacklisted = False;\n\n for asTestBlacklist in self.aasTestsBlacklist:\n iLvl = 0;\n fBlacklisted = True;\n while iLvl < len(asTestBlacklist) and iLvl < len(asTestCfg):\n if asTestBlacklist[iLvl] != asTestCfg[iLvl] and asTestBlacklist[iLvl] != '*':\n fBlacklisted = False;\n break;\n\n iLvl += 1;\n\n if not fBlacklisted and self.fnIsCfgSupported is not None:\n fBlacklisted = not self.fnIsCfgSupported(asTestCfg);\n\n return fBlacklisted;", "def is_blacklisted(self, user_id, blacklist_user_id):\n try:\n result = self.table.select(and_(\n self.table.c.user_id == user_id,\n self.table.c.blacklisted_id == blacklist_user_id)).execute()\n if result.rowcount >= 1:\n return True\n elif result.rowcount == 0:\n return False\n except Exception as e:\n self.log(e, self.identifier)\n raise egg_errors.QueryNotPossible", "def is_token_revoked(decoded_token):\r\n jti = decoded_token['jti']\r\n try:\r\n token = TokenBlacklist.query.filter_by(jti=jti).one()\r\n return token.revoked\r\n except NoResultFound:\r\n return True", "async def bot_check(self, ctx):\n blocked = await self.db.fetchrow(\n \"\"\"\n SELECT *\n FROM blocks\n WHERE user_id=$1\n \"\"\",\n ctx.author.id,\n )\n if blocked is None:\n return True\n raise BlackListed", "def _has_ingress_white_list(ingress):\n return bool(ingress.metadata.annotations.get(\"ingress.kubernetes.io/whitelist-source-range\", False))", "def is_allowed(self, request, credentials: dict = None) -> bool:\n\n if self.is_admin_site(request):\n return True\n\n if self.is_blacklisted(request, credentials):\n return False\n\n if self.is_whitelisted(request, credentials):\n return True\n\n if self.is_locked(request, credentials):\n return False\n\n return True", "async def is_blacklisted(user_id: int) -> bool:\n async with aiosqlite.connect(DATABASE_PATH) as db:\n async with db.execute(\n \"SELECT * FROM blacklist WHERE user_id=?\", (user_id,)\n ) as cursor:\n result = await cursor.fetchone()\n return result is not None", "def check_token(self):\n return config.outlook_token is not None", "def allow_unresolved_secret_tokens(self):\n return self._allow_unresolved_secret_tokens", "def test_http_issuer_ban(self):\n self.assertEqual(\n self._token_checker._check_token_not_revoked(None,\n 'http://idc.org'),\n None\n )\n\n self.assertFalse(\n self._token_checker._verify_token(None,\n 'http://idc.org')\n )", "def is_whitelisted(cls, msg):\n return is_whitelisted(msg.fields.get('from_addr'))", "def is_blacklisted(fname):\n return is_dot(fname) or is_excluded_filetype(fname)", "def isSourceBlacklisted(self, source):\n components = source.split(\".\")\n i = 0\n for component in components:\n i += 1\n testing = components[:i]\n if \".\".join(testing) in GameConsole.blacklistedSources:\n return True\n return False", "async def check_token_works(self) -> bool:\n async with self.web_session.get(url=self._user_endpoint, headers=self._headers) as resp:\n self._expired_token = not resp.status == 200\n return not self._expired_token", "def is_blacklisted(self, string='') -> int:\n try:\n for word in string.split(' '):\n if word in self.blacklist:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_blacklisted({string}) -> {error}\")", "def verify_token(token):\n if config.API_TOKEN is None:\n logger.error(\n 'API token is not configured, auth will fail!')\n return token == config.API_TOKEN", "def is_public_token(token):\n return (\n token.payload\n and token.payload.get(\"roles\") == [NONE]\n and token.payload.get(\"context_id\") is None\n and token.payload.get(\"consumer_site\") is None\n )", "def check_permission(self, token):\n decoded_token = jwt.decode(token, os.getenv('SECRET_KEY'))\n if decoded_token['roles'] != ['Admin']:\n return True\n return False", "def check_restricted_allowed_in_deck(deck_format, current_deck, card_name):\n # TODO: Do this\n return False", "def disallow_tokens(self, user_id):\n\n url = self.api_base_url + \"user/\" + str(user_id) + \"/tokenAllowed\"\n\n body = {\n \"allowed\": False\n }\n\n try:\n self.request_handler.make_request(ApiRequestHandler.POST, url, body=body)\n success = True\n except RequestFailed:\n raise\n\n return success", "def verify_access_token(self, token: str) -> bool:\n try:\n data = crypt.verify_token(token)\n except crypt.jwt_exceptions.PyJWTError as e:\n raise FileAccessError() from e\n if data['uuid'] != str(self.pk) or data['space_id'] != str(self.space_id):\n raise FileAccessError()\n\n return True", "def is_missing_token_service(request):\n if request.json == {}:\n return True\n schema = schema_utils.get_auth_schema()\n validator = Validator(schema, require_all=True)\n result = validator.validate(request.json)\n if validator.errors:\n logging.error(str(validator.errors))\n return not result", "def check_blacklist(repo):\n blacklisted = [ # NOTE: keep this list up to date!\n 'builder', 'cache', 'controller', 'database', 'logger', 'logspout',\n 'publisher', 'registry', 'router', 'store-admin', 'store-daemon',\n 'store-gateway', 'store-metadata', 'store-monitor', 'swarm', 'mesos-master',\n 'mesos-marathon', 'mesos-slave', 'zookeeper',\n ]\n if any(\"deis/{}\".format(c) in repo for c in blacklisted):\n raise PermissionDenied(\"Repository name {} is not allowed\".format(repo))", "def test_check_org_on_whitelist_false(self):\n\n org_name = 'AS15169 Google LLC'\n\n result = check_org_on_whitelist(org_name, self.pattern_org)\n\n self.assertFalse(result)", "def check_if_token_is_valid(token):\n if token is None:\n return\n try:\n jwt.decode(\n token,\n key=current_app.config['JWT_KEY'],\n audience=current_app.config['AUTH0_BASE_URL'] + '/api/v2/',\n issuer=current_app.config['AUTH0_BASE_URL'] + '/')\n except (jwt.JWTError,\n jwk.JWKError,\n jwt.ExpiredSignatureError,\n jwt.JWTClaimsError,\n AttributeError,\n AssertionError,\n IndexError):\n return False\n else:\n return True", "def verify_token(self, token):\n return False", "def accessCheck(self) -> None:\n\n if self.access_token:\n return\n self.access_token = self.login()", "def is_anonymous_access_allowed(self):\n return self._is_anonymous_access_allowed", "def is_allowed(self) -> bool:\n return self.effect == ALLOW_ACCESS", "def test_unused_token_is_valid(self):\n assert self.token.is_valid()", "def verifyAccessToken(state, access_token, user_id):\n # Check if the provided state token is valid\n if state != user_session['state']:\n return False\n # Check if the provided access token is valid\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n http = httplib2.Http()\n result = json.loads(http.request(url, 'GET')[1])\n if result.get('error') is not None:\n return False\n # Check if the provided user_id is valid\n elif user_id != result['user_id']:\n return False\n # Check if the access token corresponds to correct client id\n elif result['issued_to'] != CLIENT_ID:\n return False\n else:\n return True", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/jobs'\n response = requests.get(endpoint, headers=self.authorization())\n if response.status_code != 200:\n self.print_error_response(response, \"detail\")\n return response.status_code == 200", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/jobs'\n response = requests.get(endpoint, headers=self.authorization())\n if response.status_code != 200:\n self.print_error_response(response, \"detail\")\n return response.status_code == 200", "def check_banned(deck_format, card_name):\n if card_name in consts.BANNINGS[deck_format]:\n return True\n return False", "def is_authorized(self) -> bool:\n\t\tif \"access_token\" in session:\n\t\t\tif session.get(\"access_token\") is not None:\n\t\t\t\tif \"user\" in session:\n\t\t\t\t\treturn True\n\t\treturn False", "def token_valid_check(start_time):\n #calculate the time elapsed since token was last refreshed\n elapsed_time = time.time() - start_time\n #take action if token is expired\n if elapsed_time > 3540:\n return False\n return True", "def test_cannot_logout_with_blacklisted_token(self):\n reply = self.admin_register()\n user = dict(\n username='jonnie',\n password='Andela8'\n )\n resp = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Login sucessful!')\n self.assertTrue(reply['token'])\n self.assertEqual(resp.status_code, 200)\n\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are already logged out!')\n self.assertEqual(resp.status_code, 404)", "def is_expired(self):\n if self.access_token is None:\n logging.debug('Access token not found')\n return True\n else:\n return (self.expiration <= datetime.now())", "def _is_ticket_blocked(self, registration, **kwargs):\n if not self._is_ticketing_handled(registration.registration_form):\n return False\n req = registration.cern_access_request\n return not req or not req.is_active or not req.has_identity_info", "def blacklist_token(token):\n curr_token = BlacklistToken(token=token)\n try:\n db.session.add(curr_token)\n db.session.commit()\n except:\n return False\n return True", "def _is_link_allowed(self, link):\n denied = [re.match(r, link) for r in self.crawl_rules_deny]\n denied = [x for x in denied if x is not None]\n\n crawl_rules_allow = self.crawl_rules_allow\n if not self.crawl_rules_allow:\n crawl_rules_allow = (\".*\",)\n \n allowed = [re.match(r, link) for r in crawl_rules_allow]\n allowed = [x for x in allowed if x is not None]\n\n return not bool(denied) and bool(allowed)", "def blacklist_token(token, user):\r\n user = User.query.filter_by(username=user).first()\r\n user.login_status = False\r\n token = Token.query.filter_by(token=token).first()\r\n token.blacklist = True\r\n db.session.commit()\r\n return {'Message': 'You have successfully logged out', \"Status\": \"Success\"}, 201", "def _validate_token(self):\n if not self.token:\n self.login()\n if not self.token:\n # TODO: create exception for this\n # Access is denied!!\n raise Exception(\"AccessDenied\")", "def test_tenant_secret_page_on_marketing_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=\"landingpage.com\")\n self.assertEqual(response.status_code, 403)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def ignore_listings(name_key):\n # for blacklist_str in models_blacklist:\n # if blacklist_str in name_key:\n # return True\n return False", "def test_verifies_bearer_token(self):\n\n badgr = self.get_badgr_setup()\n\n # _token_data isn't meant to be exposed; pylint: disable=W0212\n self.assertEqual(badgr._token_data['token_type'], \"Bearer\")\n self.assertEqual(badgr._token_data['access_token'],\n self._sample_token)", "def test_get_all_tokens_anonymous_user(self):\r\n\r\n # Anonymoues users should be unauthorized, no matter which kind of token are requesting\r\n res = self.app.get('/api/token')\r\n err = json.loads(res.data)\r\n\r\n assert res.status_code == 401, err\r\n assert err['status'] == 'failed', err\r\n assert err['status_code'] == 401, err\r\n assert err['exception_cls'] == 'Unauthorized', err\r\n assert err['target'] == 'token', err", "def test_visible_blacklisted(self):\n\n self.feature_test.set_percentage(100)\n self.feature_test.add_to_blacklist(3)\n self.assertFalse(self.feature_test.is_visible(3))", "def is_accessible(url: str) -> bool:\n try:\n return requests.get(url).status_code == requests.codes.ok\n except Exception:\n return False", "def is_authorized():\n return CentralStorageClient.token is not None", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.DISABLE,\n ]:\n return False\n\n return True", "def verify_token(token):\n try:\n idinfo = client.verify_id_token(token, app.config['GOOGLE_CLIENT_ID'])\n if idinfo['iss'] not in [\n 'accounts.google.com',\n 'https://accounts.google.com'\n ]:\n raise crypt.AppIdentityError(\"Wrong issuer.\")\n except crypt.AppIdentityError:\n return False\n return True", "def heartbeat(self) -> bool:\n access_token = self.get_access_token()\n\n if not access_token:\n return False\n\n response = get(\n f\"{self.endpoint}/hb/\",\n headers={\"Authorization\": f\"Bearer {access_token}\"},\n )\n\n if response.status_code == 401:\n return False\n\n is_valid = response.json().get(\"valid\")\n\n if not is_valid:\n return False\n\n return True", "def is_telescope_off_allowed(self):\n handler = self.get_command_object(\"TelescopeOff\")\n return handler.check_allowed()", "def test_check_org_short_on_whitelist_false(self):\n\n org_name = 'AS10429'\n\n result = check_org_on_whitelist(org_name, self.pattern_org)\n\n self.assertFalse(result)", "def is_valid(url):\n\n HAVERFORD_TOKEN = 'Haverford users only'\n INVALID_TOKENS = [HAVERFORD_TOKEN, \"Site Intel\", \"SITE Institute\"]\n content = urlopen(url).read()\n\n for token in INVALID_TOKENS:\n if token in content:\n return False\n return True", "def has_user(self):\n\t\treturn len( self.a_token ) > 0 and len( self.a_secret ) > 0", "def test_need_login_to_see_usagelist(self):\n response = self.client.get(reverse('api_v1:usage-list'), follow=True)\n self.assertEqual(response.status_code, 403)", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def disable_access_token(self):\n url, params, headers = self.request(\"/disable_access_token\", method='POST')\n\n return self.rest_client.POST(url, params, headers)" ]
[ "0.7900954", "0.74636424", "0.74054307", "0.73200846", "0.7160083", "0.71336967", "0.6999018", "0.697642", "0.67577994", "0.6653733", "0.6649965", "0.66363907", "0.66145855", "0.64979804", "0.6386341", "0.636993", "0.63569194", "0.6339204", "0.63004833", "0.62949896", "0.62450945", "0.6209427", "0.6158428", "0.6086632", "0.6080808", "0.60647404", "0.6051357", "0.6022116", "0.60079026", "0.59871256", "0.59343904", "0.59270805", "0.5900796", "0.58851445", "0.5862862", "0.586079", "0.58302563", "0.5823252", "0.5816491", "0.58098143", "0.5782112", "0.57656705", "0.57574594", "0.57276595", "0.57208824", "0.5719351", "0.5715683", "0.57111454", "0.5705007", "0.5671394", "0.56618476", "0.5657375", "0.5653217", "0.56444085", "0.5634838", "0.56178117", "0.56134564", "0.5579422", "0.5549546", "0.55326277", "0.5532017", "0.549032", "0.5490047", "0.547196", "0.5468187", "0.5465556", "0.5450472", "0.5443549", "0.5439098", "0.5439098", "0.5431678", "0.54177773", "0.5411162", "0.5406254", "0.53875786", "0.53808004", "0.5360327", "0.5358049", "0.53543866", "0.53505695", "0.5348177", "0.5347439", "0.5342594", "0.5341971", "0.53243273", "0.53198624", "0.5315739", "0.5306164", "0.5302101", "0.5302101", "0.52980196", "0.52978766", "0.5296097", "0.5290443", "0.5290193", "0.5289387", "0.5270906", "0.52690166", "0.52520394", "0.52417713" ]
0.69158584
8
{get} / Easy check Easy for health check. Healthy 1.0.0
def index(): logging.debug('Healthy check.') pass # healthy check
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def health_check():\n app.logger.info(\"Health Check!\")\n return Response(\"All Good!\", status=200)", "def health_check(request):\n return Response(\"OK\",\n status=status.HTTP_200_OK)", "def health_check():\n # TODO: implement any other checking logic.\n return '', 200", "def healthcheck():\n return make_response(jsonify(status=200, message='Healthy'), status.HTTP_200_OK)", "def _healthcheck():\n return '', 200", "def health_check():\n return \"Comet-API\"", "def url_health():\n return \"OK\"", "def test_health_get(self):\n pass", "def get_health_check(self):\n return util.create_response(output=\"OK\")", "async def health(request):\n return web.Response(text=\"ok\")", "def health_check():\n return dict(api_status='OK')", "def health_check():\n now = datetime.datetime.now()\n return make_response(jsonify({'Alive': f'{now.strftime(\"%Y-%m-%d %H:%M\")}'}), 200)", "def test_simple_health_check(self):\n response = self.client.open(\n '/awadallah/VaultsManager/1.0.0/health',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_health(self):\n self.assert_request('get', '/_health')", "def test_health_check(self):\n result = self.app.get('/v1/health')\n\n # assert the status code of the response 200 (OK)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.data, b'UP')", "def health_check(request):\n response = {\"Status\": True}\n return JsonResponse(response, safe=False)", "async def check_health():\n return {\"healthy\": True}", "def health_check():\n ret = {\"Status\": 200, \"Msg\": \"Service is Up\"}\n return jsonify(ret)", "def health():\n global _is_healthy\n template = render_template('health.html', healthy=_is_healthy)\n return make_response(template, 200 if _is_healthy else 500)", "def healthcheck(self):\n url = urljoin(self.url, \"/.well-known/healthcheck.json\")\n r = requests.get(url)\n return r.json()", "def test_health(self) -> None:\n self._response = self._app.get('/health')\n\n self.assertEqual(self._response.status, '200 OK')", "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "def test_fake_health_get(self):\n pass", "def test_health_endpoint(client):\n\n result = client.get('/health')\n\n assert result.status_code == 200\n assert result.json == {'status': 'Ok'}", "def get_health(self):\n return {'status': 'ok'}", "def test_health_endpoint(self):\n url = f\"{BASE_URL}/health\"\n response = requests.get(url)\n response_json = response.json()\n assert response.status_code == 200\n assert response_json['status'] == 200", "def healthcheck(url):\n try:\n r = requests.get('http://localhost:5000/healthcheck')\n output = r.json()\n _ = output['Success']\n return True\n except:\n return False", "def health(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.health)", "def test_healthz(client):\n response = client.get(\"/healthz\")\n assert response.status_code == 200", "def test_get_healthz(self):\n response = self.client.open(\n '/v1/healthz',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def handle_health():\n return flask.jsonify(status=\"up\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def test_health_check(self):\n self.url = reverse(\"health-check\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "async def health(request):\n\n r = dict(\n health='green'\n )\n\n return json(r)", "async def api_healthcheck(self) -> Optional[Exception]:\n try:\n await self._client.get(\"/health\")\n return None\n except Exception as exc:\n return exc", "def healthcare():", "def health_check(cls):\n cb = cls.CACHE_BACKEND()\n return cb.health_check()", "def health():\n content = Markup(markdown.markdown(\"The server is healthy!\"))\n return content", "def test_health(self):\n res = self.client().get('/')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn('health', data)\n self.assertEqual(data['health'], 'Running!!')", "def health():\n return jsonify({\n 'status': 'UP',\n 'dependencies': {\n 'predixpy': predix.version,\n 'python': sys.version,\n }\n })", "def health_check(self, *, scope: Scope) -> HealthCheckStatus:", "def ping():\r\n health1 = ScoringService.get_model1() is not None # You can insert a health check here\r\n ping_response = \"Docker for Discover non prime users\"\r\n status = 200 if (health1) else 404\r\n return flask.Response(response=ping_response, status=status, mimetype='application/json')", "def health(self):\n return \"I'm Alive\"", "async def test_health():\n response = health()\n assert response\n assert {'status': 'ok'} == response", "def test_check_health(self):\n cache = DummyCache()\n ok, msg = cache.check_health()\n self.assertTrue(ok)", "def test_get_hyperflex_health_list(self):\n pass", "def check_health(self):\n return defer.succeed(True)", "def ping():\n requestor = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n logger.info(f\"Health check requested by ip='{requestor}'\")\n return make_response(\n jsonify(status=\"Serving\",\n body=\"pong\"), 200)", "def api_health(self):\n return messages.SUCCESS_JSON, 200", "def on_get(self, req, resp):\n hc = HealthCheckCombined(state_manager=self.state_manager,\n orchestrator=self.orchestrator,\n extended=True)\n return hc.get(req, resp)", "def on_get(self, req, resp):\n hc = HealthCheckCombined(state_manager=self.state_manager,\n orchestrator=self.orchestrator,\n extended=False)\n return hc.get(req, resp)", "def ping():\n health = ScoringService.get_model() is not None # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "def test_dbhealth_check(client):\n res = client.get(\"/v0/dbcheck\")\n assert res.data == b\"Comet-API-v0\"", "def ping():\n \"\"\"Get the estimator object for this instance, loading it if it's not already loaded.\"\"\"\n checker = os.listdir('/opt/ml')\n health = checker is not None # health check here\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "def check():\n hokusai.check()", "def health():\n return jsonify(hostname=hostname, uptime=uptime(), \\\n cpu_percent=int(cpu_percent(interval=None, percpu=False)))", "def get_healthcheck() -> Response:\n\n try:\n with get_cursor(db_creds, commit=False) as cur:\n cur.execute(\"SELECT * FROM events.healthchecks\")\n data = cur.fetchall()\n return jsonify(status_code=200, data=data)\n except psycopg2.Error as e:\n return jsonify(\n message=f\"Psycopg2 driver error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )\n except Exception as e:\n return jsonify(\n message=f\"Internal Server Error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )", "def get_health(self):\n return {\n 'api_name': 'BrightHive Master Client Index API',\n 'current_time': str(datetime.utcnow()),\n 'current_api_version': '1.0.0',\n 'api_status': 'OK'\n }, 200", "async def test_health_check(client: AsyncClient):\n\n response = await client.get(f\"/health-check\")\n assert response.status_code == 200\n\n data = response.json()\n assert data[\"service\"][\"status\"] == \"healthy\"\n assert data[\"service\"][\"error\"] is None\n assert data[\"database\"][\"status\"] == \"healthy\"\n assert data[\"database\"][\"error\"] is None", "def health_checks(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"health_checks\")", "def test_healthcheck(self):\n self.assertEqual(\"OK\", \"OK\")", "def health_check(name, target='TCP:22', healthy_threashold=2, unhealthy_threashold=3, interval=30, timeout=3):\n hc = HealthCheck(title=name + 'healthcheck')\n hc.HealthyThreshold = healthy_threashold\n hc.UnhealthyThreshold = unhealthy_threashold\n hc.Interval = interval\n hc.Target = target\n hc.Timeout = timeout\n return hc", "async def health(self) -> Health:\n response = await self._http_requests.get(build_url(Paths.HEALTH))\n return Health(**response.json())", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def StreamHealth(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")", "def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")", "def ping():\n\treturn HTTPResponse(status=200)", "def health_checks(self):\n return [self.check_device_connected, self.check_clear_flags]", "def get_health(self):\n return self.__healthy", "def get_health(self):\n return self.bot_client.send_command(_Command.GetHealth)", "def poll_health():\n global timesCalled\n\n # Poll /health\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n response = session.get(health_url)\n\n # Check HTTP status code\n status_code = response.status_code\n if status_code != status_ok:\n exit(1)\n\n # Get metrics values\n metrics = response.json()['metrics']\n requestLatencyValues.append(metrics['requestLatency'])\n dbLatencyValues.append(metrics['dbLatency'])\n cacheLatencyValues.append(metrics['cacheLatency'])\n\n # If 60 seconds has passed, send data to STDOUT\n timesCalled += 1\n if timesCalled == 6:\n output_data()\n\n timesCalled = 0\n requestLatencyValues.clear()\n dbLatencyValues.clear()\n cacheLatencyValues.clear()", "def make_healthy():\n global _is_healthy\n _is_healthy = True\n\n template = render_template('index.html',\n hostname=gethostname(),\n zone=_get_zone(),\n template=_get_template(),\n healthy=True,\n working=_is_working())\n response = make_response(template, 302)\n response.headers['Location'] = '/'\n return response", "def status_check():\n return {\"status\": \"OK\"}", "def __check(self):\n status = '200 OK'\n try:\n response = get(self.__url)\n status = '{} {}'.format(\n response.status_code,\n http.client.responses[response.status_code]\n )\n except Exception as e:\n status = e.__class__.__name__\n \n if status[:3] == '200':\n self.__notify_up()\n else:\n if not self.downtime_info:\n self.downtime_info = DowntimeInfo(status)\n self.__notify_down()", "def test_get_hyperflex_health_by_moid(self):\n pass", "def get(self, req, resp):\n health_check = HealthCheck()\n # Test database connection\n try:\n now = self.state_manager.get_now()\n if now is None:\n raise Exception('None received from database for now()')\n except Exception:\n hcm = HealthCheckMessage(msg='Unable to connect to database',\n error=True)\n health_check.add_detail_msg(msg=hcm)\n\n # Test MaaS connection\n try:\n task = self.orchestrator.create_task(\n action=hd_fields.OrchestratorAction.Noop)\n maas_validation = ValidateNodeServices(task, self.orchestrator,\n self.state_manager)\n maas_validation.start()\n if maas_validation.task.get_status() == ActionResult.Failure:\n raise Exception('MaaS task failure')\n except Exception:\n hcm = HealthCheckMessage(msg='Unable to connect to MaaS',\n error=True)\n health_check.add_detail_msg(msg=hcm)\n\n if self.extended:\n resp.text = json.dumps(health_check.to_dict())\n\n if health_check.is_healthy() and self.extended:\n resp.status = falcon.HTTP_200\n elif health_check.is_healthy():\n resp.status = falcon.HTTP_204\n else:\n resp.status = falcon.HTTP_503", "def ping():\n health = AutoGluonClassifierService.load_model() is not None # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')", "def GetHealthStatus(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get(self):\n log.debug('/x-tree/FSMonitor.html: invoked')\n try:\n log.info('application health check...')\n host_name = socket.gethostname()\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\": \"SELECT count(*) FROM \\\"ttd_devices\\\" \"}\n response = requests.request(\"GET\", url, params=querystring)\n D=json.loads(response.text)\n total_recs=str(max(D['results'][0]['series'][0]['values'][0][1:]))\n except:\n result = {}\n log.exception('Exception while doing HealthCheck')\n return Response ('<html><body>THE SERVER IS DOWN</body></html>', mimetype=\"text/html\", status=500)\n return Response('<html><body>INFLUX DB <p/> Count:' + total_recs + '</body></html>', mimetype=\"text/html\")", "def report_health(self):\n return True", "def GetHealthz(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def health_status(self) -> str:\n return pulumi.get(self, \"health_status\")", "def health_status(self) -> str:\n return pulumi.get(self, \"health_status\")", "def get_health(self):\n return self.health", "def is_healthy():\n status = HEALTH_AGGREGATOR.is_healthy()\n\n if status is True:\n status_code = 200\n else:\n status_code = 503\n\n return Response({}, status_code, mimetype='application/json')", "def health_check_host(self) -> str:\n return pulumi.get(self, \"health_check_host\")", "def is_alive():\n return jsonify({'message': 'Service is alive'}), 200", "def test_dashboard_is_up(dashboard_address):\n response = requests.get(f\"{dashboard_address}/health\")\n assert response.status_code == 200\n assert response.text == \"ok\"", "def check_status(self):", "def test_get_readiness(self):\n response = self.client.open('/api/v1//readiness',\n method='GET',\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def liveness():\n return '', 200", "def test_check_health_success(self):\n ok, msg = self.db.check_health()\n self.assertTrue(ok)" ]
[ "0.80769634", "0.78025705", "0.7788893", "0.7768372", "0.7759578", "0.769682", "0.76605624", "0.75939524", "0.75455374", "0.7503516", "0.7423095", "0.7414183", "0.7406174", "0.73959845", "0.7341213", "0.73356587", "0.73113114", "0.72746754", "0.72720677", "0.7227638", "0.7220962", "0.71524346", "0.7123918", "0.70922554", "0.7074463", "0.7058329", "0.6993246", "0.69549745", "0.6945253", "0.691792", "0.6898133", "0.6852287", "0.6852287", "0.6852287", "0.6852287", "0.6852287", "0.6852287", "0.68514115", "0.68372446", "0.6812295", "0.68119067", "0.68062484", "0.67969", "0.67825896", "0.6751185", "0.67237455", "0.670644", "0.67007804", "0.66985536", "0.6685693", "0.66813254", "0.6665845", "0.66509795", "0.6629719", "0.65989715", "0.6592443", "0.6589616", "0.65766907", "0.655154", "0.65492237", "0.65483534", "0.6490019", "0.6486222", "0.6444027", "0.64416", "0.643345", "0.64257604", "0.6420918", "0.6352851", "0.62809527", "0.6278815", "0.6278815", "0.62583596", "0.6250711", "0.62369925", "0.62353075", "0.6234593", "0.6224623", "0.62117827", "0.62028813", "0.619176", "0.61906916", "0.6152536", "0.6150153", "0.61452097", "0.61108786", "0.60934055", "0.607822", "0.6077672", "0.6077672", "0.60752517", "0.60666424", "0.6048611", "0.6043346", "0.6029362", "0.60276026", "0.5991903", "0.59915674", "0.59595466" ]
0.76294434
7
{get} /healthy Another easy check A path for another health check. Healthy 1.0.0
def index(): logging.debug('Healthy check.') pass # healthy check
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def healthcheck():\n return make_response(jsonify(status=200, message='Healthy'), status.HTTP_200_OK)", "def health_check():\n app.logger.info(\"Health Check!\")\n return Response(\"All Good!\", status=200)", "def health():\n global _is_healthy\n template = render_template('health.html', healthy=_is_healthy)\n return make_response(template, 200 if _is_healthy else 500)", "def health_check():\n # TODO: implement any other checking logic.\n return '', 200", "def health_check(request):\n return Response(\"OK\",\n status=status.HTTP_200_OK)", "def _healthcheck():\n return '', 200", "def url_health():\n return \"OK\"", "async def check_health():\n return {\"healthy\": True}", "def make_healthy():\n global _is_healthy\n _is_healthy = True\n\n template = render_template('index.html',\n hostname=gethostname(),\n zone=_get_zone(),\n template=_get_template(),\n healthy=True,\n working=_is_working())\n response = make_response(template, 302)\n response.headers['Location'] = '/'\n return response", "def get_health_check(self):\n return util.create_response(output=\"OK\")", "def health_check():\n now = datetime.datetime.now()\n return make_response(jsonify({'Alive': f'{now.strftime(\"%Y-%m-%d %H:%M\")}'}), 200)", "def is_healthy():\n status = HEALTH_AGGREGATOR.is_healthy()\n\n if status is True:\n status_code = 200\n else:\n status_code = 503\n\n return Response({}, status_code, mimetype='application/json')", "def test_health(self):\n self.assert_request('get', '/_health')", "def test_health(self) -> None:\n self._response = self._app.get('/health')\n\n self.assertEqual(self._response.status, '200 OK')", "def health(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.health)", "def test_health_endpoint(client):\n\n result = client.get('/health')\n\n assert result.status_code == 200\n assert result.json == {'status': 'Ok'}", "async def health(request):\n return web.Response(text=\"ok\")", "def test_healthz(client):\n response = client.get(\"/healthz\")\n assert response.status_code == 200", "def test_simple_health_check(self):\n response = self.client.open(\n '/awadallah/VaultsManager/1.0.0/health',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_health_get(self):\n pass", "def health_check(request):\n response = {\"Status\": True}\n return JsonResponse(response, safe=False)", "def health_check():\n ret = {\"Status\": 200, \"Msg\": \"Service is Up\"}\n return jsonify(ret)", "def health_check():\n return dict(api_status='OK')", "def test_health_check(self):\n result = self.app.get('/v1/health')\n\n # assert the status code of the response 200 (OK)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.data, b'UP')", "def test_fake_health_get(self):\n pass", "def on_get(self, req, resp):\n hc = HealthCheckCombined(state_manager=self.state_manager,\n orchestrator=self.orchestrator,\n extended=True)\n return hc.get(req, resp)", "def on_get(self, req, resp):\n hc = HealthCheckCombined(state_manager=self.state_manager,\n orchestrator=self.orchestrator,\n extended=False)\n return hc.get(req, resp)", "def test_health_endpoint(self):\n url = f\"{BASE_URL}/health\"\n response = requests.get(url)\n response_json = response.json()\n assert response.status_code == 200\n assert response_json['status'] == 200", "def health_check():\n return \"Comet-API\"", "def handle_health():\n return flask.jsonify(status=\"up\")", "def get_health(self):\n return self.__healthy", "def get_health(self):\n return {'status': 'ok'}", "def test_health_check(self):\n self.url = reverse(\"health-check\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "async def api_healthcheck(self) -> Optional[Exception]:\n try:\n await self._client.get(\"/health\")\n return None\n except Exception as exc:\n return exc", "def ping():\n requestor = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n logger.info(f\"Health check requested by ip='{requestor}'\")\n return make_response(\n jsonify(status=\"Serving\",\n body=\"pong\"), 200)", "def healthcheck(self):\n url = urljoin(self.url, \"/.well-known/healthcheck.json\")\n r = requests.get(url)\n return r.json()", "def healthcheck(url):\n try:\n r = requests.get('http://localhost:5000/healthcheck')\n output = r.json()\n _ = output['Success']\n return True\n except:\n return False", "def test_get_healthz(self):\n response = self.client.open(\n '/v1/healthz',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def health_check(name, target='TCP:22', healthy_threashold=2, unhealthy_threashold=3, interval=30, timeout=3):\n hc = HealthCheck(title=name + 'healthcheck')\n hc.HealthyThreshold = healthy_threashold\n hc.UnhealthyThreshold = unhealthy_threashold\n hc.Interval = interval\n hc.Target = target\n hc.Timeout = timeout\n return hc", "def healthcheck(parameters): \n\n print(\"In healthcheck module\")", "async def health(request):\n\n r = dict(\n health='green'\n )\n\n return json(r)", "def make_unhealthy():\n global _is_healthy\n _is_healthy = False\n\n template = render_template('index.html',\n hostname=gethostname(),\n zone=_get_zone(),\n template=_get_template(),\n healthy=False,\n working=_is_working())\n response = make_response(template, 302)\n response.headers['Location'] = '/'\n return response", "def ping():\n \"\"\"Get the estimator object for this instance, loading it if it's not already loaded.\"\"\"\n checker = os.listdir('/opt/ml')\n health = checker is not None # health check here\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "async def test_health():\n response = health()\n assert response\n assert {'status': 'ok'} == response", "def health():\n content = Markup(markdown.markdown(\"The server is healthy!\"))\n return content", "def health():\n return jsonify({\n 'status': 'UP',\n 'dependencies': {\n 'predixpy': predix.version,\n 'python': sys.version,\n }\n })", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")", "def desired_healthy(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"desired_healthy\")", "def ping():\r\n health1 = ScoringService.get_model1() is not None # You can insert a health check here\r\n ping_response = \"Docker for Discover non prime users\"\r\n status = 200 if (health1) else 404\r\n return flask.Response(response=ping_response, status=status, mimetype='application/json')", "def check_health(self):\n return defer.succeed(True)", "def health(self):\n return \"I'm Alive\"", "def api_health(self):\n return messages.SUCCESS_JSON, 200", "def health_check(cls):\n cb = cls.CACHE_BACKEND()\n return cb.health_check()", "def health_checks(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"health_checks\")", "def test_health(self):\n res = self.client().get('/')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn('health', data)\n self.assertEqual(data['health'], 'Running!!')", "def healthcheck(self, request):\n statuscode = OK\n msg = self.make_response('OK')\n if self.redirector.active_node_ip_port is None:\n statuscode = SERVICE_UNAVAILABLE\n msg = self.make_response('No Active Vault')\n request.setResponseCode(statuscode, message=msg)\n request.setHeader(\"Content-Type\", 'application/json')\n # log if logging is enabled\n if self.redirector.log_enabled:\n queued = ''\n if request.queued:\n queued = 'QUEUED '\n logger.info('RESPOND %d for %s%s request for '\n '/vault-redirector-health from %s:%s',\n statuscode, queued, str(request.method),\n request.client.host, request.client.port)\n return self.make_response(self.status_response())", "def ping():\n health = ScoringService.get_model() is not None # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "def health():\n return jsonify(hostname=hostname, uptime=uptime(), \\\n cpu_percent=int(cpu_percent(interval=None, percpu=False)))", "async def health(self) -> Health:\n response = await self._http_requests.get(build_url(Paths.HEALTH))\n return Health(**response.json())", "def healthcare():", "def health_check(self, *, scope: Scope) -> HealthCheckStatus:", "async def test_health_check(client: AsyncClient):\n\n response = await client.get(f\"/health-check\")\n assert response.status_code == 200\n\n data = response.json()\n assert data[\"service\"][\"status\"] == \"healthy\"\n assert data[\"service\"][\"error\"] is None\n assert data[\"database\"][\"status\"] == \"healthy\"\n assert data[\"database\"][\"error\"] is None", "def test_dbhealth_check(client):\n res = client.get(\"/v0/dbcheck\")\n assert res.data == b\"Comet-API-v0\"", "def get(self, req, resp):\n health_check = HealthCheck()\n # Test database connection\n try:\n now = self.state_manager.get_now()\n if now is None:\n raise Exception('None received from database for now()')\n except Exception:\n hcm = HealthCheckMessage(msg='Unable to connect to database',\n error=True)\n health_check.add_detail_msg(msg=hcm)\n\n # Test MaaS connection\n try:\n task = self.orchestrator.create_task(\n action=hd_fields.OrchestratorAction.Noop)\n maas_validation = ValidateNodeServices(task, self.orchestrator,\n self.state_manager)\n maas_validation.start()\n if maas_validation.task.get_status() == ActionResult.Failure:\n raise Exception('MaaS task failure')\n except Exception:\n hcm = HealthCheckMessage(msg='Unable to connect to MaaS',\n error=True)\n health_check.add_detail_msg(msg=hcm)\n\n if self.extended:\n resp.text = json.dumps(health_check.to_dict())\n\n if health_check.is_healthy() and self.extended:\n resp.status = falcon.HTTP_200\n elif health_check.is_healthy():\n resp.status = falcon.HTTP_204\n else:\n resp.status = falcon.HTTP_503", "def is_healthy(self) -> bool:\n return False", "def get_health(self):\n return self.bot_client.send_command(_Command.GetHealth)", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def get_healthcheck() -> Response:\n\n try:\n with get_cursor(db_creds, commit=False) as cur:\n cur.execute(\"SELECT * FROM events.healthchecks\")\n data = cur.fetchall()\n return jsonify(status_code=200, data=data)\n except psycopg2.Error as e:\n return jsonify(\n message=f\"Psycopg2 driver error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )\n except Exception as e:\n return jsonify(\n message=f\"Internal Server Error: {type(e)}\",\n args=e.args,\n status_code=500,\n error_type=\"Internal Server Error\",\n )", "def ping():\n\treturn HTTPResponse(status=200)", "def ping():\n health = AutoGluonClassifierService.load_model() is not None # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "def poll_health():\n global timesCalled\n\n # Poll /health\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n response = session.get(health_url)\n\n # Check HTTP status code\n status_code = response.status_code\n if status_code != status_ok:\n exit(1)\n\n # Get metrics values\n metrics = response.json()['metrics']\n requestLatencyValues.append(metrics['requestLatency'])\n dbLatencyValues.append(metrics['dbLatency'])\n cacheLatencyValues.append(metrics['cacheLatency'])\n\n # If 60 seconds has passed, send data to STDOUT\n timesCalled += 1\n if timesCalled == 6:\n output_data()\n\n timesCalled = 0\n requestLatencyValues.clear()\n dbLatencyValues.clear()\n cacheLatencyValues.clear()", "def __check(self):\n status = '200 OK'\n try:\n response = get(self.__url)\n status = '{} {}'.format(\n response.status_code,\n http.client.responses[response.status_code]\n )\n except Exception as e:\n status = e.__class__.__name__\n \n if status[:3] == '200':\n self.__notify_up()\n else:\n if not self.downtime_info:\n self.downtime_info = DowntimeInfo(status)\n self.__notify_down()", "def test_get_hyperflex_health_list(self):\n pass", "def health_checks(self) -> Dict[str, str]:\n try:\n self.get_object_information('/')['ResponseMetadata']['HTTPStatusCode']\n except Exception:\n return dict(clouddirectory_health_status='unhealthy')\n else:\n return dict(clouddirectory_health_status='ok')", "def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")", "def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")", "def is_alive():\n return jsonify({'message': 'Service is alive'}), 200", "def index():\n global _is_healthy\n return render_template('index.html',\n hostname=gethostname(),\n zone=_get_zone(),\n template=_get_template(),\n healthy=_is_healthy,\n working=_is_working())", "def GetHealthStatus(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get(self):\n log.debug('/x-tree/FSMonitor.html: invoked')\n try:\n log.info('application health check...')\n host_name = socket.gethostname()\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\": \"SELECT count(*) FROM \\\"ttd_devices\\\" \"}\n response = requests.request(\"GET\", url, params=querystring)\n D=json.loads(response.text)\n total_recs=str(max(D['results'][0]['series'][0]['values'][0][1:]))\n except:\n result = {}\n log.exception('Exception while doing HealthCheck')\n return Response ('<html><body>THE SERVER IS DOWN</body></html>', mimetype=\"text/html\", status=500)\n return Response('<html><body>INFLUX DB <p/> Count:' + total_recs + '</body></html>', mimetype=\"text/html\")", "def test_get_hyperflex_health_by_moid(self):\n pass", "def get_health(self):\n return {\n 'api_name': 'BrightHive Master Client Index API',\n 'current_time': str(datetime.utcnow()),\n 'current_api_version': '1.0.0',\n 'api_status': 'OK'\n }, 200", "def test_readiness_endpoint(self):\n url = f'{BASE_URL}/ready'\n response = requests.get(url)\n response_json = response.json()\n assert response.status_code == 503\n assert response_json['status'] == 503", "def current_healthy(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"current_healthy\")", "def is_healthy(self) -> bool:\n try:\n self.health()\n except MeiliSearchError:\n return False\n return True", "def test_check_health(self):\n cache = DummyCache()\n ok, msg = cache.check_health()\n self.assertTrue(ok)", "def GetHealthz(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_heartbeat( self ):\n with self.app.app_context():\n url = '/donation/heartbeat'\n\n # Ensure a GET with no saved caged_donors returns 0.\n response = self.test_client.get( url, headers=self.headers )\n self.assertEqual( response.status_code, status.HTTP_200_OK )", "def is_healthy(self, metric_results):\n raise NotImplementedError()", "def test_healthcheck(self):\n self.assertEqual(\"OK\", \"OK\")", "def test_dashboard_is_up(dashboard_address):\n response = requests.get(f\"{dashboard_address}/health\")\n assert response.status_code == 200\n assert response.text == \"ok\"", "def _route_get(self):\n if self.path == '/status':\n self._create_status()\n else:\n self._create_method_not_allowed()", "def ping_response():\n\n return Response(\"ok\", status=200)", "def health_http_uri(self) -> Optional[str]:\n return pulumi.get(self, \"health_http_uri\")" ]
[ "0.7397413", "0.73224634", "0.73210156", "0.7185449", "0.70526415", "0.70526195", "0.70261866", "0.68668026", "0.67948645", "0.6786084", "0.6785235", "0.6748028", "0.667565", "0.6667806", "0.6618256", "0.6600627", "0.658808", "0.65732", "0.6528593", "0.652601", "0.65006965", "0.64928883", "0.64801455", "0.6476352", "0.63759875", "0.6343168", "0.633779", "0.6301957", "0.62945306", "0.62870985", "0.6276816", "0.6202588", "0.6199872", "0.6181164", "0.61757594", "0.6172827", "0.61440796", "0.6141847", "0.6137912", "0.6115325", "0.61103183", "0.6057641", "0.6050275", "0.60296094", "0.6014095", "0.60111254", "0.59958583", "0.59958583", "0.59958583", "0.59958583", "0.59958583", "0.59958583", "0.5941254", "0.5898708", "0.589373", "0.58919173", "0.58912057", "0.58870465", "0.5886789", "0.58401716", "0.5832451", "0.5829143", "0.58287185", "0.58065575", "0.5797169", "0.5781834", "0.5778894", "0.5769488", "0.5749306", "0.57260036", "0.5666639", "0.5661083", "0.5652648", "0.56416315", "0.5633212", "0.56312394", "0.56229985", "0.5614953", "0.56073666", "0.5599194", "0.5599194", "0.5577636", "0.555926", "0.552855", "0.5520156", "0.5507846", "0.55062884", "0.54884416", "0.54862845", "0.5483366", "0.5471988", "0.54689854", "0.545958", "0.54571843", "0.544833", "0.5447783", "0.54110014", "0.5406859", "0.54060465" ]
0.672783
13
it makes the flow of a given input through the network, all data are stored in the layers "y" and "v"
def flow(input_): global number_of_neurons_by_layer if len(input_) != number_of_neurons_by_layer[0]: raise IndexError( f"\033[91mInput length is incorrect. It must be {number_of_neurons_by_layer[0]}.\033[m") layers[0]["y"][1:] = np.array(input_).flatten().reshape(len(input_), 1) for i_lay in range(1, len(layers)): layers[i_lay]["v"][:] = logistic( layers[i_lay]["weigths"] @ layers[i_lay-1]["y"] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trainNet():", "def forward(self, x):\n # sources保存特征图,loc与conf保存所有PriorBox的位置与类别预测特征\n sources = list()\n loc = list()\n conf = list()\n\n # 对输入图像卷积到conv4_3,将特征添加到sources中\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # 继续卷积到conv7,将特征添加到sources中\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # 继续利用额外的卷积层计算,并将特征添加到sources中\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1: # 间隔一层\n sources.append(x)\n\n # 对sources中的特征图利用类别与位置网络进行卷积计算,并保存到loc与conf中\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1, self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n # 对于训练来说,output包括了loc与conf的预测值以及PriorBox的信息\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output", "def epoch(self, v, expected):\n self.V = []\n self.O_hidden = []\n self.O_output = []\n self.D_1 = []\n\n self.error = []\n\n\n self.forward(np.transpose([v]), np.transpose([expected]))\n self.backward()", "def forward(self, x): \n pal1_sources = list()\n pal2_sources = list()\n loc_pal1 = list()\n conf_pal1 = list()\n loc_pal2 = list()\n conf_pal2 = list()\n\n # apply vgg up to conv3_3 relu\n for k in range(16):\n x = self.vgg[k](x)\n\n of1 = x\n s = self.L2Normof1(of1)\n pal1_sources.append(s)\n \n # apply vgg up to conv4_3 relu\n for k in range(16, 23):\n x = self.vgg[k](x)\n\n of2 = x\n s = self.L2Normof2(of2)\n pal1_sources.append(s)\n\n # apply vgg up to conv5_3 relu\n for k in range(23, 30):\n x = self.vgg[k](x)\n of3 = x\n s = self.L2Normof3(of3)\n pal1_sources.append(s)\n\n # apply vgg up to fc7\n for k in range(30, len(self.vgg)):\n x = self.vgg[k](x)\n of4 = x\n pal1_sources.append(of4)\n \n # apply extra layers and cache source layer outputs\n for k in range(2):\n x = F.relu(self.extras[k](x), inplace=True)\n of5 = x\n pal1_sources.append(of5)\n for k in range(2, 4):\n x = F.relu(self.extras[k](x), inplace=True)\n of6 = x\n pal1_sources.append(of6)\n\n ## fpn module\n \"\"\"\n lfpn6 = self.fpn_topdown6(of6)\n lfpn5 = self._upsample_product(self.fpn_topdown5(of6), self.fpn_latlayer5(of5))\n lfpn4 = self._upsample_product(self.fpn_topdown4(of5), self.fpn_latlayer4(of4))\n lfpn3 = self._upsample_product(self.fpn_topdown3(of4), self.fpn_latlayer3(of3))\n lfpn2 = self._upsample_product(self.fpn_topdown2(of3), self.fpn_latlayer2(of2))\n lfpn1 = self._upsample_product(self.fpn_topdown1(of2), self.fpn_latlayer1(of1))\n\n\n ef1 = self.fpn_fem3_3(lfpn1)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem4_3(lfpn2)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem5_3(lfpn3)\n ef3 = self.L2Normef3(ef3)\n\n ef4 = self.fpn_fem7(lfpn4)\n ef5 = self.fpn_fem6_2(lfpn5)\n ef6 = self.fpn_fem7_2(lfpn6)\n \"\"\"\n\n conv7 = F.relu(self.fpn_topdown[0](of6), inplace=True)\n x = F.relu(self.fpn_topdown[1](conv7), inplace=True)\n conv6 = F.relu(self._upsample_product(x, self.fpn_latlayer[0](of5)), inplace=True)\n\n x = F.relu(self.fpn_topdown[2](conv6), inplace=True)\n convfc7_2 = F.relu(self._upsample_product(x, self.fpn_latlayer[1](of4)), inplace=True)\n\n x = F.relu(self.fpn_topdown[3](convfc7_2), inplace=True)\n conv5 = F.relu(self._upsample_product(x, self.fpn_latlayer[2](of3)), inplace=True)\n\n x = F.relu(self.fpn_topdown[4](conv5), inplace=True)\n conv4 = F.relu(self._upsample_product(x, self.fpn_latlayer[3](of2)), inplace=True)\n\n x = F.relu(self.fpn_topdown[5](conv4), inplace=True)\n conv3 = F.relu(self._upsample_product(x, self.fpn_latlayer[4](of1)), inplace=True)\n\n ef1 = self.fpn_fem[0](conv3)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem[1](conv4)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem[2](conv5)\n ef3 = self.L2Normef3(ef3)\n ef4 = self.fpn_fem[3](convfc7_2)\n ef5 = self.fpn_fem[4](conv6)\n ef6 = self.fpn_fem[5](conv7)\n\n pal2_sources = (ef1, ef2, ef3, ef4, ef5, ef6)\n\n ## first shot \n for (x, l, c) in zip(pal1_sources, self.loc_pal1, self.conf_pal1):\n loc_pal1.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal1.append(c(x).permute(0, 2, 3, 1).contiguous())\n \n ## second shot\n for (x, l, c) in zip(pal2_sources, self.loc_pal2, self.conf_pal2):\n loc_pal2.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal2.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n # first shot\n loc_pal1 = torch.cat([o.view(o.size(0), -1) for o in loc_pal1], 1)\n conf_pal1 = torch.cat([o.view(o.size(0), -1) for o in conf_pal1], 1)\n \n # second shot\n loc_pal2 = torch.cat([o.view(o.size(0), -1) for o in loc_pal2], 1)\n conf_pal2 = torch.cat([o.view(o.size(0), -1) for o in conf_pal2], 1)\n\n if self.phase == 'test':\n # 测试时, 仅使用shot2 的输出\n output = self.detect(\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n self.softmax(conf_pal2.view(conf_pal2.size(0), -1,\n self.num_classes)), # conf preds\n )\n else:\n ## 训练时,使用shot1 和 shot2 的输出\n output = (\n loc_pal1.view(loc_pal1.size(0), -1, 4),\n conf_pal1.view(conf_pal1.size(0), -1, self.num_classes),\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n conf_pal2.view(conf_pal2.size(0), -1, self.num_classes))\n return output", "def forward(self, x):\n x=T.div(x,255.0)\n \n #print(state[20:,20:,0])\n #print(state[:,0,:,:])\n conv1 = F.relu(self.conv1(x))\n conv2 = F.relu(self.conv2(conv1))\n conv3 = F.relu(self.conv3(conv2))\n ###\n conv_state = conv3.view(conv3.size()[0], -1)\n flat1 = F.relu(self.fc1(conv_state))\n flat2 = F.relu(self.fc2(flat1))\n\n V = self.V(flat2)\n A = self.A(flat2)\n\n return V, A\n return x", "def arch(self, nn) -> 'final node of the tensor flow graph (y_conv)':\n\n print(self)\n\n # first conv. layer \n # 5x5 filter, 1 input channel, 32 output channels\n W_conv1 = nn.weight_variable([5, 5, 1, 32])\n b_conv1 = nn.bias_variable([32])\n stride1 = 1\n h_conv1 = tf.nn.relu(nn.conv2d(nn.x_image, W_conv1, stride1, 'VALID') + b_conv1) \n # outputs a 24x24x32 image\n \n # first pooling layer (2x2) \n h_pool1 = nn.max_pool_2x2(h_conv1) \n # outputs a 12x12x32 image\n\n # second conv. layer \n # 3x3 filter, 32 input channel, 32 output channels\n W_conv2 = nn.weight_variable([3, 3, 32, 32])\n b_conv2 = nn.bias_variable([32])\n stride2 = 1\n h_conv2 = tf.nn.relu(nn.conv2d(h_pool1, W_conv2, stride2, 'VALID') + b_conv2)\n # outputs a 10x10x32 image\n\n # third conv. layer\n # 3x3 filter, 32 input channel, 32 output channels\n W_conv3 = nn.weight_variable([3, 3, 32, 32])\n b_conv3 = nn.bias_variable([32])\n stride3 = 1\n h_conv3 = tf.nn.relu(nn.conv2d(h_conv2, W_conv3, stride3, 'VALID') + b_conv3)\n # outputs a 8x8x32 image\n\n # reshape (flatten) output\n h_conv3_flat = tf.reshape(h_conv3, [-1, 8*8*32])\n\n # first fully connected layer\n W_fc1 = nn.weight_variable([8 * 8 * 32, 1024])\n b_fc1 = nn.bias_variable([1024])\n h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)\n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, nn.keep_prob)\n\n # second (final) fully connected layer (softmax)\n W_fc2 = nn.weight_variable([1024, 10])\n b_fc2 = nn.bias_variable([10])\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n return y_conv", "def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n #print('Reached start of vgg')\n for k in self.vgg._modules.keys():\n if int(k) < 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n #print('Reached L2Norm')\n s = self.L2Norm(x)\n sources.append(s)\n\n #print('Reached after L2Norm')\n # apply vgg up to fc7\n for k in self.vgg._modules.keys():\n if int(k) >= 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n sources.append(x)\n #print('Reached end of VGG')\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(-1, self.num_classes)), # conf preds\n self.priors # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output", "def forward(self, x):\n\n def run0(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n mat = out7[:, :, :, None] + out7[:, :, None, :]\n cur = mat\n if self.num_1d:\n output1d = self.final_1d(out7)\n return cur, output1d\n else:\n return cur\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n if self.num_1d:\n cur, output1d = checkpoint(run0, x, dummy)\n else:\n cur = checkpoint(run0, x, dummy)\n\n def run1(cur):\n first = True\n for lm, m in zip(self.lconvtwos[:7], self.convtwos[:7]):\n if first:\n cur = lm(cur)\n\n first = False\n else:\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run2(cur):\n for lm, m in zip(self.lconvtwos[7:13], self.convtwos[7:13]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run3(cur):\n for lm, m in zip(self.lconvtwos[13:], self.convtwos[13:]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n\n cur = self.final(cur)\n cur = 0.5 * cur + 0.5 * cur.transpose(2, 3)\n return cur\n\n cur = checkpoint(run1, cur)\n cur = checkpoint(run2, cur)\n cur = checkpoint(run3, cur)\n\n if self.num_1d:\n return cur, output1d\n else:\n return cur", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in self.base.named_parameters():\n print(name, param.size())\n\n res50_model = self.base\n res50_conv2 = ResNet50Bottom(res50_model)\n for i,child in enumerate(self.base.children()):\n print(i)\n if i==8:\n l4=x\n break\n if i==6:\n l2=x\n x=res50_conv2(x.detach())\"\"\"\n\n s2 = l2.sum(1) #/ 100\n #\n s4 = l4.sum(1) #/ 1000\n\n\n sw2 = s2 / (s2.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n sw4 = s4 / (s4.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n\n l2 = l2 * sw2.unsqueeze(1)\n l4 = l4 * sw4.unsqueeze(1)\n\n \n c2 = self.inconv2(l2)\n c4 = self.inconv4(l4)\n c2 = self.bn2(c2)\n c4 = self.bn4(c4)\n \n n2 = F.softmax(torch.mean(torch.mean(c2, dim=2), dim=2), dim=1)\n n4 = F.softmax(torch.mean(torch.mean(c4, dim=2), dim=2), dim=1)\n nn2 = n2.data.cpu().numpy()\n nn4 = n4.data.cpu().numpy()\n cam2 = np.zeros((x.size(0), 28, 28), dtype=float)\n cam4 = np.zeros((x.size(0), 7, 7), dtype=float)\n\n\n for i in range(0, x.size(0)):\n for j in range(0, 2):\n temp1 = c2[i, j, :, :].data.cpu().numpy()\n temp1 = np.maximum(temp1, 0)\n temp1 = temp1 - np.min(temp1)\n temp1 = temp1 / (np.max(temp1)+1e-8)\n cam2[i] = cam2[i] + nn2[i, j] * temp1\n cam2 = torch.FloatTensor(cam2)\n l2 = l2 * (cam2.unsqueeze(1).cuda())\n l2 = self.stack1(l2)\n l2 = self.stack1_1(l2)\n\n for i in range(0, x.size(0)):\n for j in range(0, 8):\n temp2 = c4[i, j, :, :].data.cpu().numpy()\n temp2 = np.maximum(temp2, 0)\n temp2 = temp2 - np.min(temp2)\n temp2 = temp2 / (np.max(temp2)+1e-8)\n cam4[i] =cam4[i] + nn4[i, j] * temp2\n cam4 = torch.FloatTensor(cam4)\n l4 = l4 * cam4.unsqueeze(1).cuda()\n l4 = self.stack3(l4)\n X = l2.view(x.size(0), 512, 7 ** 2)\n Y = l4.view(x.size(0), 512, 7 ** 2)\n Z = self.cross_bilinear(X, Y)\n return n2, n4, Z", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self, input_x):\n return self.net(input_x.float())", "def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # apply vgg up to fc7\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1,\n self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output", "def forward(self, x):\n sources = list()\n tcb_source = list()\n odm_loc = list()\n odm_conf = list()\n if self.phase == 'test':\n feat_sizes = list()\n\n # apply vgg up to conv4_3 relu and conv5_3 relu\n for k in range(self.conv5_3_layer):\n x = self.vgg[k](x)\n if self.size != 512 and self.size != 320 and self.conv3_3_layer - 1 == k:\n s = self.conv3_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if self.conv4_3_layer - 1 == k:\n s = self.conv4_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n elif self.conv5_3_layer - 1 == k:\n s = self.conv5_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply vgg up to fc7\n for k in range(self.conv5_3_layer, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply extra layers and cache source layer outputs\n for k in range(len(self.extras)):\n x = self.extras[k](x)\n if self.extra_1_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if (self.size == 640 or self.size == 5126) and self.extra_2_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # calculate TCB features\n p = None\n for k, v in enumerate(sources[::-1]):\n s = v\n for i in range(3):\n s = self.tcb0[(self.step-k)*3 + i](s)\n if k != 0:\n u = p\n u = self.tcb1[self.step-k](u)\n s += u\n for i in range(3):\n s = self.tcb2[(self.step-k)*3 + i](s)\n p = s\n tcb_source.append(s)\n tcb_source.reverse()\n\n # apply ODM to source layers\n for (x, l, c) in zip(tcb_source, self.odm_loc, self.odm_conf):\n odm_loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n odm_conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n odm_loc = torch.cat([o.view(o.size(0), -1) for o in odm_loc], 1)\n odm_conf = torch.cat([o.view(o.size(0), -1) for o in odm_conf], 1)\n\n if self.phase == \"test\":\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4), # odm loc preds\n self.softmax(odm_conf.view(odm_conf.size(0), -1,\n self.num_classes)), # odm conf preds\n feat_sizes\n )\n else:\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4),\n odm_conf.view(odm_conf.size(0), -1, self.num_classes),\n )\n return output", "def forward(self, inputs):\r\n\r\n assert len(inputs) == self.depth, \\\r\n \"Mismatch between input and Network scales\"\r\n\r\n y = self.rgb_to_features[self.depth - 2](inputs[self.depth - 1])\r\n y = self.layers[self.depth - 2](y)\r\n for x, block, converter in \\\r\n zip(reversed(inputs[1:-1]),\r\n reversed(self.layers[:-1]),\r\n reversed(self.rgb_to_features[:-1])):\r\n input_part = converter(x) # convert the input:\r\n y = torch.cat((input_part, y), dim=1) # concatenate the inputs:\r\n y = block(y) # apply the block\r\n\r\n # calculate the final block:\r\n input_part = self.final_converter(inputs[0])\r\n y = torch.cat((input_part, y), dim=1)\r\n y = self.final_block(y)\r\n\r\n # return calculated y\r\n return y", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n model_output = None\n #######################################################################\n # Student code begins\n #######################################################################\n\n (N,C,H,W) = x.shape\n\n conv_features = self.conv_layers(x)\n \n flat_features = conv_features.reshape(-1, 500)\n model_output = self.fc_layers(flat_features)\n\n\n #######################################################################\n # Student code ends\n #######################################################################\n return model_output", "def arch(self, nn) -> 'final node of the tensor flow graph (y_conv)':\n\n print(self)\n\n # first conv. layer \n # 5x5 filter, 1 input channel, 32 output channels\n W_conv1 = nn.weight_variable([5, 5, 1, 32])\n b_conv1 = nn.bias_variable([32])\n stride1 = 1\n h_conv1 = tf.nn.relu(nn.conv2d(nn.x_image, W_conv1, stride1) + b_conv1)\n \n # first pooling layer (2x2) \n h_pool1 = nn.max_pool_2x2(h_conv1)\n\n # second conv. layer \n # 5x5 filter, 32 input channel, 64 output channels\n W_conv2 = nn.weight_variable([5, 5, 32, 64])\n b_conv2 = nn.bias_variable([64])\n stride2 = 1\n h_conv2 = tf.nn.relu(nn.conv2d(h_pool1, W_conv2, stride2) + b_conv2)\n\n # second pooling layer (2x2) \n h_pool2 = nn.max_pool_2x2(h_conv2)\n\n # reshape (flatten) output\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n\n # first fully connected layer\n W_fc1 = nn.weight_variable([7 * 7 * 64, 1024])\n b_fc1 = nn.bias_variable([1024])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # dropout\n h_fc1_drop = tf.nn.dropout(h_fc1, nn.keep_prob)\n\n # second (final) fully connected layer (softmax)\n W_fc2 = nn.weight_variable([1024, 10])\n b_fc2 = nn.bias_variable([10])\n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n return y_conv", "def __call__(self, x_1d, is_training, reuse=False, nfilt=32):\n with tf.variable_scope(self.name):\n x = tf.reshape(x_1d, [-1, self.input_dim, self.input_dim, self.channels])\n\n e1 = unet_conv(x, nfilt*1, 'e1', reuse, is_training)\n e2 = unet_conv(e1, nfilt*2, 'e2', reuse, is_training)\n e3 = unet_conv(e2, nfilt*4, 'e3', reuse, is_training)\n e4 = unet_conv(e3, nfilt*8, 'e4', reuse, is_training)\n e5 = unet_conv(e4, nfilt*8, 'e5', reuse, is_training)\n e6 = unet_conv(e5, nfilt*8, 'e6', reuse, is_training, s=1)\n e7 = unet_conv(e6, nfilt*8, 'e7', reuse, is_training, s=1)\n e8 = unet_conv(e7, nfilt*8, 'e8', reuse, is_training, s=1)\n\n d1 = unet_conv_t(e8, e7, nfilt*8, 'd1', reuse, is_training, s=1)\n d2 = unet_conv_t(d1, e6, nfilt*8, 'd2', reuse, is_training, s=1)\n d3 = unet_conv_t(d2, e5, nfilt*8, 'd3', reuse, is_training, s=1)\n d4 = unet_conv_t(d3, e4, nfilt*8, 'd4', reuse, is_training)\n d5 = unet_conv_t(d4, e3, nfilt*4, 'd5', reuse, is_training)\n d6 = unet_conv_t(d5, e2, nfilt*2, 'd6', reuse, is_training)\n d7 = unet_conv_t(d6, e1, nfilt*1, 'd7', reuse, is_training)\n out = unet_conv_t(\n d7, None, self.channels, 'out', reuse, is_training,\n activation=tf.nn.tanh, use_batch_norm=False, use_dropout=False)\n\n out_1d = tf.reshape(out, (-1, self.output_dim*self.output_dim*self.channels))\n\n tensors = [\n x, e1, e2, e3, e4, e5, e6, e7, e8, d1, d2, d3, d4, d5, d6, d7, out, out_1d]\n\n for tensor in tensors:\n print(tensor)\n\n return out_1d", "def Network_model(input_data):\n layer1_param={'weights':tf.Variable(tf.random_normal([784, no_neurons_layer1])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer1]))}\n \n layer2_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer1, no_neurons_layer2])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer2]))}\n \n layer3_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer2, no_neurons_layer3])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer3]))}\n \n layer4_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer3, no_neurons_layer4])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer4]))}\n \n output_layer_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer4, no_classes])), \n 'biases': tf.Variable(tf.random_normal([no_classes]))}\n \n #so uptill now the weights for each layer is initialized\n \n \"\"\"\n Now what will happened in each layer, I will define next. basically the weights are multiplied\n in each layer with the corresponding inputs and then it is passed through activation function \n (relu in this case) and the output is given as input to the other layer.\n sign:B-Jan\n \"\"\"\n \n l1_output= tf.add(tf.matmul(input_data,layer1_param['weights']), layer1_param['biases'])\n l1_output=tf.nn.relu(l1_output)\n \n l2_output= tf.add(tf.matmul(l1_output,layer2_param['weights']), layer2_param['biases'])\n l2_output=tf.nn.relu(l2_output)\n \n \n l3_output= tf.add(tf.matmul(l2_output,layer3_param['weights']), layer3_param['biases'])\n l3_output=tf.nn.relu(l3_output)\n \n l4_output= tf.add(tf.matmul(l3_output,layer4_param['weights']), layer4_param['biases'])\n l4_output=tf.nn.relu(l4_output)\n \n #The final output Layer\n output= tf.matmul(l4_output, output_layer_param['weights'])+output_layer_param['biases']\n \n return output # contains the output of the last output layer", "def network_modified(input):\n\n up6 = upsample_and_concat( conv5, conv4, 256, 512 , 'up_conv1' )\n conv6=slim.conv2d(up6, 256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_1')\n conv6=slim.conv2d(conv6,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_2')\n\n up7 = upsample_and_concat( conv6, conv3, 128, 256 , 'up_conv2' )\n conv7=slim.conv2d(up7, 128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_1')\n conv7=slim.conv2d(conv7,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_2')\n\n up8 = upsample_and_concat( conv7, conv2, 64, 128 , 'up_conv3')\n conv8=slim.conv2d(up8, 64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_1')\n conv8=slim.conv2d(conv8,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_2')\n\n up9 = upsample_and_concat( conv8, conv1, 32, 64 , 'up_conv4')\n conv9=slim.conv2d(up9, 32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_1')\n conv9=slim.conv2d(conv9,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_2')\n\n conv10=slim.conv2d(conv9,12,[1,1], rate=1, activation_fn=None, scope='g_conv10')\n out = tf.depth_to_space(conv10,2)\n return out", "def model(data, train=None):\n\n conv = tf.nn.conv2d(data, layer1_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer1_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,1,1,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer2_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer2_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,2,2,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer3_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer3_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,1,1,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer6_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer6_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,2,2,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer7_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer7_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,1,1,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n conv = tf.nn.conv2d(pool, layer8_filter, [1, 1, 1, 1], padding='SAME')\n hidden = tf.nn.relu(conv + layer8_biases)\n pool = tf.nn.max_pool(hidden, [1,2,2,1], [1,2,2,1], 'SAME')\n if train:\n pool = tf.nn.dropout(pool,.6)\n\n shape = pool.get_shape().as_list()\n reshape = tf.reshape(pool, [shape[0], shape[1] * shape[2] * shape[3]])\n hidden = tf.nn.relu(tf.matmul(reshape, layer4_weights) + layer4_biases)\n\n return (tf.matmul(hidden, layer5_weights1) + layer5_biases1), (tf.matmul(hidden, layer5_weights2) + layer5_biases2), \\\n (tf.matmul(hidden, layer5_weights3) + layer5_biases3), (tf.matmul(hidden, layer5_weights4) + layer5_biases4), \\\n (tf.matmul(hidden, layer5_weights5) + layer5_biases5)", "def model(image_height,image_width,path):\n\n\tdef load_file(path='vgg19.mat'):\n\t\t\"\"\"\n\t\tLoads Weights File & returns Object of Numpy array\n\t\t\"\"\"\n\t\tfile=loadmat(path)\n\t\tfile=file['layers']\n\t\tprint(\"Success load_file\")\n\t\treturn file\n\n\tdef ret_layer_index(file):\n\t\t\"\"\"\n\t\tTakes file as input & returns a dictionary having name of layers with their code\n\t\t\"\"\"\n\t\tnames={}\n\t\tfor i in range(len(file[0])):\n\t\t\tprint(file[0][i][0][0][0])\n\t\t\tnames[file[0][i][0][0][0][0]]=i\n\t\tprint(\"Success layer_index\")\n\t\treturn names\n \n\tdef weight(layer_name):\n\t\t\"\"\" Asks for Layer Name & returns its weights & bias\n\t\t\"\"\"\n\t\tlayer_no=names[layer_name]\n\t\twb =file[0][layer_no][0][0][2]\n\t\tw=wb[0][0]\n\t\tb=wb[0][1]\n\t\tname=file[0][layer_no][0][0][0]\n\t\tassert name==layer_name\n\t\tprint(\"Success weight\")\n\t\treturn w,b\n\n\tdef conv_relu(prev_layer,layer_no,layer_name):\n\t\tW,b=weight(layer_name)\n\t\tW=tf.constant(W)\n\t\tb=tf.constant(np.reshape(b, (b.size)))\n\t\tl=tf.nn.conv2d(prev_layer,filter=W,strides=[1,1,1,1],padding='SAME') +b\n\t\tprint(\"Success convrelu\")\n\t\treturn tf.nn.relu(l)\n\n\tdef avg_pool(prev_layer):\n\t\treturn tf.nn.avg_pool(prev_layer,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')\n\n\tdef load_graph():\n\t\tgraph={}\n\t\tgraph['input'] = tf.Variable(np.zeros((1, image_height, image_width,3)), dtype = 'float32')\n\t\tgraph['conv1_1'] = conv_relu(graph['input'], 0, 'conv1_1')\n\t\tgraph['conv1_2'] = conv_relu(graph['conv1_1'], 2, 'conv1_2')\n\t\tgraph['avgpool1'] = avg_pool(graph['conv1_2'])\n\t\tgraph['conv2_1'] = conv_relu(graph['avgpool1'], 5, 'conv2_1')\n\t\tgraph['conv2_2'] = conv_relu(graph['conv2_1'], 7, 'conv2_2')\n\t\tgraph['avgpool2'] = avg_pool(graph['conv2_2'])\n\t\tgraph['conv3_1'] = conv_relu(graph['avgpool2'], 10, 'conv3_1')\n\t\tgraph['conv3_2'] = conv_relu(graph['conv3_1'], 12, 'conv3_2')\n\t\tgraph['conv3_3'] = conv_relu(graph['conv3_2'], 14, 'conv3_3')\n\t\tgraph['conv3_4'] = conv_relu(graph['conv3_3'], 16, 'conv3_4')\n\t\tgraph['avgpool3'] = avg_pool(graph['conv3_4'])\n\t\tgraph['conv4_1'] = conv_relu(graph['avgpool3'], 19, 'conv4_1')\n\t\tgraph['conv4_2'] = conv_relu(graph['conv4_1'], 21, 'conv4_2')\n\t\tgraph['conv4_3'] = conv_relu(graph['conv4_2'], 23, 'conv4_3')\n\t\tgraph['conv4_4'] = conv_relu(graph['conv4_3'], 25, 'conv4_4')\n\t\tgraph['avgpool4'] = avg_pool(graph['conv4_4'])\n\t\tgraph['conv5_1'] = conv_relu(graph['avgpool4'], 28, 'conv5_1')\n\t\tgraph['conv5_2'] = conv_relu(graph['conv5_1'], 30, 'conv5_2')\n\t\tgraph['conv5_3'] = conv_relu(graph['conv5_2'], 32, 'conv5_3')\n\t\tgraph['conv5_4'] = conv_relu(graph['conv5_3'], 34, 'conv5_4')\n\t\tgraph['avgpool5'] = avg_pool(graph['conv5_4'])\n\t\treturn graph\n\n\tfile=load_file(path)\n\tnames=ret_layer_index(file)\n\treturn load_graph()", "def train(self, X, y):", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def forward(self, inputs):\n #NOTE: Already merge axis 0(batches) and axis 1(channels) before extracting feature phase,\n # please refer to paddlevideo/modeling/framework/recognizers/recognizer2d.py#L27\n #y = paddle.reshape(\n # inputs, [-1, inputs.shape[2], inputs.shape[3], inputs.shape[4]])\n\n ####ResNet-C: use three 3x3 conv, replace, one 7x7 conv\n y = self.conv1_1(inputs)\n y = self.conv1_2(y)\n y = self.conv1_3(y)\n\n y = self.pool2D_max(y)\n for block in self.block_list:\n y = block(y)\n return y", "def forward(self, inputs, end_points, mode=\"\"):\n batch_size = inputs['point_clouds'].shape[0]\n\n end_points = self.backbone_net1(inputs['point_clouds'], end_points)\n end_points = self.backbone_net2(inputs['point_clouds'], end_points, mode='net1')\n end_points = self.backbone_net3(inputs['point_clouds'], end_points, mode='net2')\n end_points = self.backbone_net4(inputs['point_clouds'], end_points, mode='net3')\n\n ### Extract feature here\n xyz = end_points['fp2_xyz']\n features1 = end_points['fp2_features']\n features2 = end_points['fp2_features'+'net1']\n features3 = end_points['fp2_features'+'net2']\n features4 = end_points['fp2_features'+'net3']\n end_points['seed_inds'] = end_points['fp2_inds']\n end_points['seed_xyz'] = xyz\n end_points['seed_features'] = features1\n \n ### Combine the feature here\n features_hd_discriptor = torch.cat((features1, features2, features3, features4), dim=1)\n features_hd_discriptor = F.relu(self.bn_agg1(self.conv_agg1(features_hd_discriptor)))\n features_hd_discriptor = F.relu(self.bn_agg2(self.conv_agg2(features_hd_discriptor)))\n\n end_points['hd_feature'] = features_hd_discriptor\n \n net_flag_z = F.relu(self.bn_flag_z1(self.conv_flag_z1(features_hd_discriptor)))\n net_flag_z = self.conv_flag_z2(net_flag_z)\n end_points[\"pred_flag_z\"] = net_flag_z\n\n net_flag_xy = F.relu(self.bn_flag_xy1(self.conv_flag_xy1(features_hd_discriptor)))\n net_flag_xy = self.conv_flag_xy2(net_flag_xy)\n end_points[\"pred_flag_xy\"] = net_flag_xy\n\n net_flag_line = F.relu(self.bn_flag_line1(self.conv_flag_line1(features_hd_discriptor)))\n net_flag_line = self.conv_flag_line2(net_flag_line)\n end_points[\"pred_flag_line\"] = net_flag_line\n\n proposal_xyz, proposal_features, center_offset, center_residual = self.vgen(xyz, features_hd_discriptor)\n proposal_features_norm = torch.norm(proposal_features, p=2, dim=1)\n proposal_features = proposal_features.div(proposal_features_norm.unsqueeze(1))\n end_points['vote_xyz'] = proposal_xyz\n end_points['vote_features'] = proposal_features\n \n voted_z, voted_z_feature, z_offset, z_residual = self.vgen_z(xyz, features_hd_discriptor)\n voted_z_feature_norm = torch.norm(voted_z_feature, p=2, dim=1)\n voted_z_feature = voted_z_feature.div(voted_z_feature_norm.unsqueeze(1))\n end_points['vote_z'] = voted_z\n end_points['vote_z_feature'] = voted_z_feature\n\n voted_xy, voted_xy_feature, xy_offset, xy_residual = self.vgen_xy(xyz, features_hd_discriptor)\n voted_xy_feature_norm = torch.norm(voted_xy_feature, p=2, dim=1)\n voted_xy_feature = voted_xy_feature.div(voted_xy_feature_norm.unsqueeze(1))\n end_points['vote_xy'] = voted_xy\n end_points['vote_xy_feature'] = voted_xy_feature\n\n voted_line, voted_line_feature, line_offset, line_residual = self.vgen_line(xyz, features_hd_discriptor)\n voted_line_feature_norm = torch.norm(voted_line_feature, p=2, dim=1)\n voted_line_feature = voted_line_feature.div(voted_line_feature_norm.unsqueeze(1))\n end_points['vote_line'] = voted_line\n end_points['vote_line_feature'] = voted_line_feature\n \n center_z, feature_z, end_points = self.pnet_z(voted_z, voted_z_feature, end_points, mode='_z')\n center_xy, feature_xy, end_points = self.pnet_xy(voted_xy, voted_xy_feature, end_points, mode='_xy')\n center_line, feature_line, end_points = self.pnet_line(voted_line, voted_line_feature, end_points, mode='_line')\n\n end_points = self.pnet_final(proposal_xyz, proposal_features, center_z, feature_z, center_xy, feature_xy, center_line, feature_line, end_points)\n return end_points", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def forward(self, x):\n return self.net(x)", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)):\n if l == 0:\n z = self.layers[l].forward(x)\n else:\n z = self.layers[l].forward(a)\n a = self.activations[l].forward(z)\n\n # output from softmax layer\n out = a\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self, input):\n input, _ = input\n bs = input.shape[0]\n d1 = self.relu1(self.fc1(input))\n d2 = self.relu2(self.fc2(d1))\n d3 = self.fc3(d2)\n out = self.sigmoid(d3)\n\n out = out.view(bs, 17, 3)\n return out", "def forward(self, x):\n sources = list()\n new_sources = list()\n\n # apply lds to the initial image\n x_pool = self.lds(x)\n\n # apply vgg up to conv4_3\n for k in range(22):\n x = self.features[k](x)\n conv4_3_bn = self.ibn1(x)\n x_pool1_skip, x_pool1_icn = self.icn1(x_pool)\n s = self.Norm1(conv4_3_bn * x_pool1_icn)\n\n # apply vgg up to fc7\n for k in range(22, 34):\n x = self.features[k](x)\n conv7_bn = self.ibn2(x)\n x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)\n p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)\n\n x = self.features[34](x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extra):\n x = v(x)\n if k == 0:\n x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)\n w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)\n elif k == 2:\n x_pool4_skip, x_pool4_icn = self.icn4(x_pool3_skip)\n q = self.Norm4(self.dsc3(w) + x * x_pool4_icn)\n elif k == 4:\n o = self.Norm5(self.dsc4(q) + x)\n sources.append(o)\n elif k == 7 or k == 9:\n sources.append(x)\n else:\n pass\n\n # project the forward features into lower dimension.\n tmp1 = self.proj1(p)\n tmp2 = self.proj2(w)\n tmp3 = self.proj3(q)\n tmp4 = self.proj4(o)\n\n # The conv4_3 level\n proj1 = F.upsample(tmp1, scale_factor=2, mode='bilinear')\n proj2 = F.upsample(tmp2, scale_factor=4, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=8, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=16, mode='bilinear')\n proj = torch.cat([proj1, proj2, proj3, proj4], dim=1)\n\n agent1 = self.agent1(s)\n\n convert1 = self.convert1(proj)\n pred1 = torch.cat([agent1, convert1], dim=1)\n pred1 = self.merge1(pred1)\n new_sources.append(pred1)\n\n # The fc_7 level\n proj2 = F.upsample(tmp2, scale_factor=2, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=4, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=8, mode='bilinear')\n proj = torch.cat([proj2, proj3, proj4], dim=1)\n\n agent2 = self.agent2(p)\n convert2 = self.convert2(proj)\n pred2 = torch.cat([agent2, convert2], dim=1)\n pred2 = self.merge2(pred2)\n new_sources.append(pred2)\n\n # The conv8 level\n proj3 = F.upsample(tmp3, scale_factor=2, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=4, mode='bilinear')\n proj = torch.cat([proj3, proj4], dim=1)\n\n agent3 = self.agent3(w)\n convert3 = self.convert3(proj)\n pred3 = torch.cat([agent3, convert3], dim=1)\n pred3 = self.merge3(pred3)\n new_sources.append(pred3)\n\n # The conv9 level\n proj4 = F.upsample(tmp4, scale_factor=2, mode='bilinear')\n proj = proj4\n\n agent4 = self.agent4(q)\n convert4 = self.convert4(proj)\n pred4 = torch.cat([agent4, convert4], dim=1)\n pred4 = self.merge4(pred4)\n new_sources.append(pred4)\n\n for prediction in sources:\n new_sources.append(prediction)\n\n return new_sources", "def _forward(self):\n\n tf.summary.image(\"image\", tensor=tf.reshape(self.x, (self.batch_size, 28, 28, 1)), max_outputs=10)\n x = self.x\n\n # x = layers.dropout(self.x, keep_prob=0.7)\n # with tf.variable_scope(\"layer1\") as scope:\n h = tf.nn.relu(layers.fully_connected(x, num_outputs=self.input_size // 2, activation_fn=None))\n # tf.summary.histogram(\"moving_mean1\", tf.get_variable(scope + \"moving_mean\"))\n # with tf.variable_scope(\"layer2\") as scope:\n # h = tf.nn.relu(layers.fully_connected(h, num_outputs=32, activation_fn=None))\n # tf.summary.histogram(\"moving_mean2\", tf.get_variable(\"moving_mean\"))\n # with tf.variable_scope(\"layer3\") as scope:\n self.logits = layers.fully_connected(h, num_outputs=10, activation_fn=None)\n # tf.summary.histogram(\"moving_mean3\", tf.get_variable(\"moving_mean\"))\n\n self.probability = tf.nn.softmax(self.logits)\n self.prediction = tf.argmax(self.probability, axis=1)", "def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.maxpool(out)\n out = self.avgpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out", "def forward(self, x, y):\n if self.dataset_type == 'tabular':\n input = torch.cat([x, y.unsqueeze(1)], dim=1).float()\n\n out = self.net(input).squeeze(dim=-1)\n\n else:\n intermediate = self.cnn(x)\n intermediate = torch.cat([intermediate.float(), y.float().unsqueeze(1)], dim=1)\n out = self.fc(intermediate).squeeze(dim=-1)\n\n return out", "def forward(self, x):\n x1, x2 = x\n y1 = self.conv_net.forward(x1)\n y2 = self.sparse_net.forward(x2)\n return y1, y2", "def build_net(graph, training=True, validation=False):\n\n with graph.as_default(): \n x = tf.placeholder(tf.float32, [None] + resize_shape, 'x')\n # TODO: use len(labels_map)\n y = tf.placeholder(tf.int32, [None, 17], 'y')\n phase_train = tf.placeholder(tf.bool, name='phase_train')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n keep_prob_fc1 = tf.placeholder(tf.float32, name='keep_prob_fc1')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n # Create Input Pipeline for Train, Validation and Test Sets\n if training:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=image_paths[:index_split_train_val],\n labels=labels_onehot_list[:index_split_train_val],\n batch_size=batch_size,\n n_epochs=n_epochs,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training,\n randomize=True)\n elif validation:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=image_paths[index_split_train_val:],\n labels=labels_onehot_list[index_split_train_val:],\n batch_size=batch_size,\n # only one epoch for test output\n n_epochs=1,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training) \n else:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=test_image_paths,\n labels=test_onehot_list,\n batch_size=batch_size,\n # only one epoch for test output\n n_epochs=1,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training)\n\n Ws = []\n \n current_input = x\n\n for layer_i, n_output in enumerate(n_filters):\n with tf.variable_scope('layer{}'.format(layer_i)):\n # 2D Convolutional Layer with batch normalization and relu\n h, W = utils.conv2d(x=current_input,\n n_output=n_output,\n k_h=filter_sizes[layer_i],\n k_w=filter_sizes[layer_i])\n h = tf.layers.batch_normalization(h, training=phase_train)\n h = tf.nn.relu(h, 'relu' + str(layer_i))\n\n # Apply Max Pooling Every 2nd Layer\n if layer_i % 2 == 0:\n h = tf.nn.max_pool(value=h,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # Apply Dropout Every 2nd Layer\n if layer_i % 2 == 0:\n h = tf.nn.dropout(h, keep_prob)\n\n Ws.append(W)\n current_input = h\n\n h = utils.linear(current_input, fc_size, name='fc_t')[0]\n h = tf.layers.batch_normalization(h, training=phase_train)\n h = tf.nn.relu(h, name='fc_t/relu')\n h = tf.nn.dropout(h, keep_prob_fc1)\n\n logits = utils.linear(h, len(labels_map), name='fc_t2')[0]\n h = tf.nn.sigmoid(logits, 'fc_t2')\n\n # must be the same type as logits\n y_float = tf.cast(y, tf.float32)\n\n cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,\n labels=y_float)\n loss = tf.reduce_mean(cross_entropy)\n\n if training:\n # update moving_mean and moving_variance so it will be available at inference time\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n else:\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n \n saver = tf.train.Saver()\n init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n return batch, batch_labels, batch_image_paths, init, x, y, phase_train, keep_prob, keep_prob_fc1, learning_rate, h, loss, optimizer, saver", "def forward_propagate(self, x):\n self.z_h = np.dot( x, self.w_ih ) + self.b_h\n #Activations of hidden layer\n self.a_h = self.sigmoid( self.z_h )\n self.z_o = np.dot( self.a_h, self.w_ho ) + self.b_o\n #yEst = activations of output layer\n yEst = self.sigmoid( self.z_o )\n return yEst", "def feedforward(self, _input):\r\n self._input = _input\r\n self.hidden_layers[0] = sigmoid(np.dot(self._input, self[0]))\r\n for i in range(1, len(self.hidden_layers)):\r\n matrix = np.dot(self.hidden_layers[i - 1], self[i])\r\n self.hidden_layers[i] = sigmoid(matrix)\r\n self._output = sigmoid(np.dot(self.hidden_layers[-1], self[-1]))", "def _propagateInputRegression(self,input):\n Y = [0] * self.K #init output to list of K zeroes\n \n #init hidden layer to list of 1 followed by H zeroes\n Z = [0] * (self.H -1) \n \n #propagate inputs to hidden layer (start at 1, \n #first node in hidden layer should not be touched)\n for h in range(0,self.H-1):\n #Z = sig(W^T * x) \n Z[h] = _sigmoid(_mulVectors(self.W[h],input))\n \n Z.append(1) \n \n #propagate hidden layer to outputs\n for i in range(0,self.K):\n #y = v^T * z\n Y[i] = _mulVectors(self.V[i],Z)\n \n return Y,Z", "def go(self, z):\n with tf.variable_scope(self.name) as scope:\n batch_size = tf.shape(z)[0]\n fc = tf.contrib.layers.fully_connected(z, 4*4*1024, activation_fn=tf.identity)\n reshape_fc = tf.reshape(fc, [1, 4, 4, 1024])\n \n conv1 = tf.contrib.layers.conv2d_transpose(\n reshape_fc, 512, [4,4], [2,2],\n weights_initializer = tf.random_normal_initializer(stddev=0.02), #st_dev from dcgan paper\n activation_fn = leaky_relu\n )\n \n conv2 = tf.contrib.layers.conv2d_transpose(\n conv1, 256, [4, 4], [2,2],\n weights_initializer = tf.random_normal_initializer(stddev=0.02),\n activation_fn = leaky_relu\n )\n \n conv3 = tf.contrib.layers.conv2d_transpose(\n conv2, 3, [4,4], [2,2],\n weights_initializer = tf.random_normal_initializer(stddev=0.02),\n# activation_fn = leaky_relu\n activation_fn = tf.tanh\n )\n \n# conv4 = tf.contrib.layers.conv2d_transpose(\n# conv3, 3, [4,4], [2,2],\n# weights_initializer = tf.random_normal_initializer(stddev=0.02),\n# activation_fn = tf.tanh\n# )\n return conv3", "def forward_pass_unet(images, phase_train):\n\n K = 4\n images = tf.expand_dims(images, -1)\n\n # Network blocks\n conv1 = sdn.convolution('Conv1', images, 3, K, 1, phase_train=phase_train)\n down = sdn.convolution('Down128', conv1, 2, K*2, 2, phase_train=phase_train)\n\n conv2 = sdn.convolution('Conv2', down, 3, K*2, 1, phase_train=phase_train)\n conv2 = sdn.residual_layer('Conv2b', conv2, 3, K*2, 1, phase_train=phase_train)\n down = sdn.convolution('Down64', conv2, 2, K*4, 2, phase_train=phase_train)\n\n conv3 = sdn.residual_layer('Conv3', down, 3, K*4, 1, phase_train=phase_train)\n conv3 = sdn.residual_layer('Conv3b', conv3, 3, K*4, 1, phase_train=phase_train)\n down = sdn.convolution('Down32', conv3, 2, K*8, 2, phase_train=phase_train) # Now 32x32\n\n conv4 = sdn.residual_layer('Conv4', down, 3, K*8, 1, phase_train=phase_train)\n conv4 = sdn.residual_layer('Conv4b', conv4, 3, K*8, 1, phase_train=phase_train)\n down = sdn.convolution('Down16', conv4, 2, K*16, 2, phase_train=phase_train)\n\n conv5 = sdn.inception_layer('Conv5', down, K*16, 1, phase_train=phase_train)\n conv5 = sdn.inception_layer('Conv5b', conv5, K*16, 1, phase_train=phase_train)\n down = sdn.convolution('Down8', conv5, 2, K*32, 2, phase_train=phase_train)\n\n conv6 = sdn.inception_layer('Conv6', down, K*32, phase_train=phase_train)\n conv6 = sdn.inception_layer('Conv6b', conv6, K*32, phase_train=phase_train)\n down = sdn.convolution('Down4', conv6, 2, K*64, 2, phase_train=phase_train)\n\n # Bottom of the decoder: 4x4\n conv7 = sdn.inception_layer('Bottom1', down, K*64, phase_train=phase_train)\n conv7 = sdn.residual_layer('Bottom2', conv7, 3, K*64, 1, dropout=FLAGS.dropout_factor, phase_train=phase_train)\n conv7 = sdn.inception_layer('Bottom2', conv7, K*64, phase_train=phase_train)\n\n # Upsample 1\n dconv = sdn.deconvolution('Dconv1', conv7, 2, K*32, S=2, phase_train=phase_train, concat=False, concat_var=conv6, out_shape=[FLAGS.batch_size, 8, 8, K*32])\n dconv = sdn.inception_layer('Dconv1b', dconv, K*32, phase_train=phase_train)\n\n dconv = sdn.deconvolution('Dconv2', dconv, 2, K*16, S=2, phase_train=phase_train, concat=False, concat_var=conv5, out_shape=[FLAGS.batch_size, 16, 16, K*16])\n dconv = sdn.inception_layer('Dconv2b', dconv, K*16, phase_train=phase_train)\n\n dconv = sdn.deconvolution('Dconv3', dconv, 2, K*8, S=2, phase_train=phase_train, concat=False, concat_var=conv4, out_shape=[FLAGS.batch_size, 32, 32, K*8])\n dconv = sdn.inception_layer('Dconv3b', dconv, K*8, phase_train=phase_train)\n\n dconv = sdn.deconvolution('Dconv4', dconv, 2, K*4, S=2, phase_train=phase_train, concat=False, concat_var=conv3, out_shape=[FLAGS.batch_size, 64, 64, K*4])\n dconv = sdn.residual_layer('Dconv4b', dconv, 3, K*4, S=1, phase_train=phase_train)\n\n dconv = sdn.deconvolution('Dconv5', dconv, 2, K*2, S=2, phase_train=phase_train, concat=False, concat_var=conv2, out_shape=[FLAGS.batch_size, 128, 128, K*2])\n dconv = sdn.residual_layer('Dconv5b', dconv, 3, K*2, S=1, phase_train=phase_train)\n\n dconv = sdn.deconvolution('Dconv6', dconv, 2, K, S=2, phase_train=phase_train, concat=False, concat_var=conv1, out_shape=[FLAGS.batch_size, 256, 256, K])\n dconv = sdn.convolution('Dconv6b', dconv, 3, K, S=1, phase_train=phase_train, dropout=FLAGS.dropout_factor)\n\n # Output is a 1x1 box with 3 labels\n Logits = sdn.convolution('Logits', dconv, 1, FLAGS.num_classes, S=1, phase_train=phase_train, BN=False, relu=False, bias=False)\n\n return Logits, sdn.calc_L2_Loss(FLAGS.l2_gamma)", "def forward(self, x, y):\n y_summary = self.summary_net(y)\n return self.invertible_net(x, y_summary, inverse=False)", "def forward(self, inputs):\n raise NotImplementedError", "def forward(self, input):\n\n x = self.conv(input)\n x = self.bn(x)\n out = self.act(x)\n return out", "def forward(self, x):\n #print('output of fetures.children() : %s'%str([i for i in self.features.children()]))\n #print(\"shape of input is %s\" % str(x.size()))\n for layer_no, layer in enumerate(self.features.children()):\n\n if layer_no is 23:\n y = layer(x)\n if layer_no is 33:\n z = layer(x)\n x = layer(x)\n\n #print('debug')\n #print('layer info: %s'%str(layer))\n #print(\"shape of x is %s\" % str(x.size()))\n\n x = self.conv1D_downstream1(x)\n x = self.conv1D_downstream2(x)\n x = self.upsample_1(x)\n\n z = self.conv1D_pool4(z)\n y = self.conv1D_pool3(y)\n #print('debug')\n #print(\"shape of x is %s\"%str(x.size()))\n #print(\"shape of z is %s\" % str(z.size()))\n\n if x.size() is not z.size():\n x = nn.functional.interpolate(x,size = (z.size()[2],z.size()[3]), mode = 'nearest')\n x = x+ z\n x = self.upsample_2(x)\n x = x+y\n x = self.upsample_3(x)\n\n return x", "def forward(self, x):\n out_conv1 = self.conv1(x)\n out_conv2 = self.conv2(out_conv1)\n out_conv3 = self.conv3(out_conv2)\n out_conv4 = self.conv4(out_conv3)\n out_conv5 = self.conv5(out_conv4)\n out_conv6 = self.conv6(out_conv5)\n out_conv7 = self.conv7(out_conv6)\n\n out_upconv7 = self.crop_top_left(self.upconv7(out_conv7), out_conv6)\n concat7 = torch.cat((out_upconv7, out_conv6), 1)\n out_iconv7 = self.iconv7(concat7)\n\n out_upconv6 = self.crop_top_left(self.upconv6(out_iconv7), out_conv5)\n concat6 = torch.cat((out_upconv6, out_conv5), 1)\n out_iconv6 = self.iconv6(concat6)\n\n out_upconv5 = self.crop_top_left(self.upconv5(out_iconv6), out_conv4)\n concat5 = torch.cat((out_upconv5, out_conv4), 1)\n out_iconv5 = self.iconv5(concat5)\n\n out_upconv4 = self.crop_top_left(self.upconv4(out_iconv5), out_conv3)\n concat4 = torch.cat((out_upconv4, out_conv3), 1)\n out_iconv4 = self.iconv4(concat4)\n disp4 = self.alpha * self.predict_disp4(out_iconv4) + self.beta\n\n out_upconv3 = self.crop_top_left(self.upconv3(out_iconv4), out_conv2)\n disp4_up = self.crop_top_left(torch.nn.functional.interpolate(disp4,\n scale_factor=2,\n mode='bilinear',\n align_corners=False), out_conv2)\n concat3 = torch.cat((out_upconv3, out_conv2, disp4_up), 1)\n out_iconv3 = self.iconv3(concat3)\n disp3 = self.alpha * self.predict_disp3(out_iconv3) + self.beta\n\n out_upconv2 = self.crop_top_left(self.upconv2(out_iconv3), out_conv1)\n disp3_up = self.crop_top_left(torch.nn.functional.interpolate(disp3,\n scale_factor=2,\n mode='bilinear',\n align_corners=False), out_conv1)\n concat2 = torch.cat((out_upconv2, out_conv1, disp3_up), 1)\n out_iconv2 = self.iconv2(concat2)\n disp2 = self.alpha * self.predict_disp2(out_iconv2) + self.beta\n\n out_upconv1 = self.crop_top_left(self.upconv1(out_iconv2), x)\n disp2_up = self.crop_top_left(torch.nn.functional.interpolate(disp2,\n scale_factor=2,\n mode='bilinear',\n align_corners=False), x)\n concat1 = torch.cat((out_upconv1, disp2_up), 1)\n out_iconv1 = self.iconv1(concat1)\n disp1 = self.alpha * self.predict_disp1(out_iconv1) + self.beta\n\n if self.training:\n return disp1, disp2\n else:\n return disp1", "def test_propagate(self):\n # Get network components\n data = array([[0], [1]])\n cdata = LabeledCData(data, labels=array([0, 1]))\n encoder = BinaryEncoding(cdata)\n unitary = ProductAnsatz(1)\n measure = Measurement(1, [0])\n qnn = Network([encoder, unitary, measure], \"1q-qvm\")\n\n # Propagate the zeroth data point\n out = qnn.propagate(0, shots=10)\n\n print(out)", "def model_pass(input, keypoints, training):\n # Convolutional layers\n with tf.variable_scope('conv1'):\n conv1 = conv_relu(input, kernel_size=3, depth=32)\n pool1 = pool(conv1, size=2)\n # Apply dropout if needed\n pool1 = tf.cond(training, lambda: tf.nn.dropout(pool1, keep_prob=0.9), lambda: pool1)\n with tf.variable_scope('conv2'):\n conv2 = conv_relu(pool1, kernel_size=2, depth=64)\n pool2 = pool(conv2, size=2)\n # Apply dropout if needed\n pool2 = tf.cond(training, lambda: tf.nn.dropout(pool2, keep_prob=0.8), lambda: pool2)\n with tf.variable_scope('conv3'):\n conv3 = conv_relu(pool2, kernel_size=2, depth=128)\n pool3 = pool(conv3, size=2)\n # Apply dropout if needed\n pool3 = tf.cond(training, lambda: tf.nn.dropout(pool3, keep_prob=0.7), lambda: pool3)\n\n # Flatten convolutional layers output\n shape = pool3.get_shape().as_list()\n flattened = tf.reshape(pool3, [-1, shape[1] * shape[2] * shape[3]])\n\n # Fully connected layers\n with tf.variable_scope('fc4'):\n fc4 = fully_connected_relu(flattened, size=1000)\n # Apply dropout if needed\n fc4 = tf.cond(training, lambda: tf.nn.dropout(fc4, keep_prob=0.5), lambda: fc4)\n with tf.variable_scope('fc5'):\n fc5 = fully_connected_relu(fc4, size=1000)\n with tf.variable_scope('out'):\n prediction = fully_connected(fc5, size=keypoints)\n return prediction", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, x):\n x = x.float()\n n, c, t, v, m = x.size()\n x = x.permute(0, 4, 3, 1, 2).contiguous()\n x = x.view(n * m, v * c, t)\n x = self.data_bn(x)\n x = x.view(n, m, v, c, t)\n x = x.permute(0, 1, 3, 4, 2).contiguous()\n x = x.view(n * m, c, t, v)\n for gcn in self.agcn_networks:\n x = gcn(x)\n return x", "def model(inputs, is_training):\n\n tf.logging.info(FLAGS.model_structure)\n tf.logging.info(FLAGS.model_edge_weights)\n structure = json.loads(FLAGS.model_structure)\n\n if FLAGS.use_object_input:\n feature_shape = inputs[0].shape\n original_inputs = inputs[0]\n object_inputs = inputs[1]\n else:\n feature_shape = inputs.shape\n original_inputs = inputs\n object_inputs = None\n\n batch_size = feature_shape[0] // FLAGS.num_frames\n original_num_frames = FLAGS.num_frames\n num_frames = original_num_frames\n\n grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}\n for i in range(len(structure)):\n grouping[structure[i][0]].append(i)\n\n stem_count = len(grouping[-3]) + len(grouping[-2]) + len(grouping[-1])\n\n assert stem_count != 0\n stem_filters = 128 // stem_count\n\n if grouping[-2]:\n # Instead of loading optical flows as inputs from data pipeline, we are\n # applying the \"Representation Flow\" to RGB frames so that we can compute\n # the flow within TPU/GPU on fly. It's essentially optical flow since we\n # do it with RGBs.\n flow_inputs = rf.rep_flow(\n original_inputs,\n batch_size,\n original_num_frames,\n num_iter=40,\n is_training=is_training,\n bottleneck=1,\n scope='rep_flow')\n streams = []\n\n for i in range(len(structure)):\n with tf.variable_scope('Node_' + str(i)):\n if structure[i][0] == -1:\n inputs = asn.rgb_conv_stem(original_inputs,\n original_num_frames,\n stem_filters,\n structure[i][1],\n is_training,\n data_format)\n streams.append(inputs)\n elif structure[i][0] == -2:\n inputs = asn.flow_conv_stem(flow_inputs,\n stem_filters,\n structure[i][1],\n is_training,\n data_format)\n streams.append(inputs)\n elif structure[i][0] == -3:\n # In order to use the object inputs, you need to feed your object\n # input tensor here.\n inputs = object_conv_stem(object_inputs,\n data_format)\n streams.append(inputs)\n else:\n block_number = structure[i][0]\n\n combined_inputs = [streams[structure[i][1][j]]\n for j in range(0, len(structure[i][1]))]\n\n tf.logging.info(grouping)\n nodes_below = []\n for k in range(-3, structure[i][0]):\n nodes_below = nodes_below + grouping[k]\n\n peers = []\n if FLAGS.attention_mode:\n lg_channel = -1\n tf.logging.info(nodes_below)\n for k in nodes_below:\n tf.logging.info(streams[k].shape)\n lg_channel = max(streams[k].shape[3], lg_channel)\n\n for node_index in nodes_below:\n attn = tf.reduce_mean(streams[node_index], [1, 2])\n\n attn = tf.layers.dense(\n inputs=attn,\n units=lg_channel,\n kernel_initializer=tf.random_normal_initializer(stddev=.01))\n peers.append(attn)\n\n combined_inputs = fusion_with_peer_attention(\n combined_inputs,\n index=i,\n attention_mode=FLAGS.attention_mode,\n attention_in=peers,\n use_5d_mode=False,\n data_format=data_format)\n\n graph = asn.block_group(\n inputs=combined_inputs,\n filters=structure[i][2],\n block_fn=block_fn,\n blocks=layers[block_number],\n strides=structure[i][4],\n is_training=is_training,\n name='block_group' + str(i),\n block_level=structure[i][0],\n num_frames=num_frames,\n temporal_dilation=structure[i][3],\n data_format=data_format)\n\n streams.append(graph)\n\n outputs = asn.multi_stream_heads(streams,\n grouping[3],\n original_num_frames,\n num_classes,\n data_format)\n\n return outputs", "def forward(self, inputs, inputs1):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_5(inputs=down4,\n layer_size=3)\n\n up5 = self.layer_6(inputs=down5, indices=indices_5,\n output_shape=unpool_shape5, layer_size=3)\n up4 = self.layer_7(inputs=up5, indices=indices_4,\n output_shape=unpool_shape4, layer_size=3)\n up3 = self.layer_8(inputs=up4, indices=indices_3,\n output_shape=unpool_shape3, layer_size=3)\n up2 = self.layer_9(inputs=up3, indices=indices_2,\n output_shape=unpool_shape2, layer_size=2)\n output = self.layer_10(inputs=up2, indices=indices_1,\n output_shape=unpool_shape1, layer_size=2)\n\n # Second Modality\n\n down11, indices_11, unpool_shape11 = self.layer_11(inputs=inputs,\n layer_size=2)\n down12, indices_12, unpool_shape12 = self.layer_12(inputs=down1,\n layer_size=2)\n down13, indices_13, unpool_shape13 = self.layer_13(inputs=down2,\n layer_size=3)\n down14, indices_14, unpool_shape14 = self.layer_14(inputs=down3,\n layer_size=3)\n down15, indices_15, unpool_shape15 = self.layer_15(inputs=down4,\n layer_size=3)\n\n up15 = self.layer_16(inputs=down15, indices=indices_15,\n output_shape=unpool_shape15, layer_size=3)\n up14 = self.layer_17(inputs=up15, indices=indices_14,\n output_shape=unpool_shape4, layer_size=3)\n up13 = self.layer_18(inputs=up14, indices=indices_13,\n output_shape=unpool_shape13, layer_size=3)\n up12 = self.layer_19(inputs=up13, indices=indices_12,\n output_shape=unpool_shape12, layer_size=2)\n output1 = self.layer_110(inputs=up12, indices=indices_11,\n output_shape=unpool_shape11, layer_size=2)\n\n # End Pipe\n\n Concat = torch.cat((output, output1), 1)\n\n finalout = self.layer_1110(Concat)\n\n return finalout", "def run(args):\n\n #Get input values\n n_feat = list(map(int, args.n_feat))[0]\n hidden = list(map(int, args.hidden))[0]\n latent = int(args.latent)\n data_path = args.i[0]\n LOGDIR = args.o\n n_layer = args.n_layers\n drop_rate = args.dp_rate\n\n # setup graph\n g = tf.Graph()\n with g.as_default():\n\n ## Setup placeholders and one hot encode input\n with tf.variable_scope('inputs', reuse=True):\n x_data = tf.placeholder(tf.float32, [None, n_feat], name=\"x_data\")\n x_onehot = tf.one_hot(tf.cast(x_data, tf.int32),3,dtype=tf.float32)\n x_flat = tf.reshape(x_onehot, [-1, 3*n_feat])\n is_training = tf.placeholder(tf.bool)\n beta = tf.placeholder(tf.float32, [1,], name=\"Beta\")\n\n #Encoder\n with tf.name_scope('encoder'):\n en = encoder(x_flat,hidden,n_layer,tf.nn.relu,drop_rate,is_training)\n\n #Latent layers\n with tf.name_scope('latent_space'):\n z_mean = fc(en, latent, scope='enc_fc4_mu', activation_fn=None) # Linear activation\n z_log_sigma = fc(en, latent, scope='enc_fc4_sigma', activation_fn=tf.nn.softplus) # softplus activation\n\n # Sample from gaussian distribution\n z = sample_z(z_mean, z_log_sigma)\n\n #Decoder\n with tf.name_scope('decoder'):\n de = decoder(z, hidden, n_layer, tf.nn.relu,drop_rate,is_training)\n\n # get flat reconstruction and reshape back to genotype format with argmax\n with tf.name_scope('output'):\n x_hat = fc(de, 3*n_feat, scope='dec_fc4', activation_fn=None) #linear activation\n x_hat = tf.reshape(x_hat,[-1, n_feat,3])\n x_decoded = tf.cast(tf.argmax(x_hat,axis=-1),tf.int64)\n\n # Loss functions\n with tf.name_scope(\"cross_entropy\"):\n cross_entropy = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=x_hat, labels=x_onehot))\n recon_loss = tf.reduce_mean(cross_entropy)\n tf.summary.scalar(\"cross_entropy\", recon_loss)\n\n with tf.name_scope(\"KL_divergence\"):\n KL_divergence = -0.5 * tf.reduce_sum(1 + z_log_sigma - tf.square(z_mean) - tf.exp(z_log_sigma), axis=1)\n latent_loss = tf.reduce_mean(KL_divergence)\n tf.summary.scalar(\"KL_divergence\", latent_loss)\n\n with tf.name_scope(\"total_loss\"):\n total_loss = tf.reduce_mean(recon_loss + tf.reduce_mean(tf.multiply(KL_divergence, beta)))\n tf.summary.scalar(\"total_loss\", total_loss)\n\n # Train optimizer\n with tf.name_scope(\"train\"):\n train_step = tf.train.AdamOptimizer(learning_rate=args.lrate).minimize(total_loss)\n\n # save summaries\n saver = tf.train.Saver()\n\n # initializer\n init = tf.global_variables_initializer()\n\n # prepare lists for collecting data\n epoch_dict = {\"CE\":[], \"KLd\": [], \"loss\": []}\n\n # open handle forsaving loss\n loss_file = \"%s/loss.tab\" % LOGDIR\n if os.path.exists(loss_file):\n os.remove(loss_file)\n fh_log = open(loss_file, \"a\")\n\n fh_log.write(\"Epoch\\tLoss\\tKL\\tCE\\n\")\n\n ## Run session ##\n with tf.Session(graph=g, config=tf.ConfigProto(log_device_placement=True)) as sess:\n #sess.run()\n sess.run(init)\n\n beta_, to_add_ = init_warmup(args.warmup)\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(LOGDIR + '/train', sess.graph)\n\n ## Get genotype data\n data=get_data(data_path)\n ran=30\n\n # run epochs\n for epoch in range(args.epochs):\n\n # prepare list for collecting data for minibatches\n mb_dict = {\"CE\":[], \"KLd\": [], \"loss\": []}\n\n # training\n data2 = data.iloc[:,0:n_feat]\n for iter in range(ran):\n D = data2.sample(n=args.batch,axis=0)\n data2=data2.drop(D.index)\n batch=np.array(D)\n _, vaecost, cre, KLd, summary = sess.run([train_step, total_loss, cross_entropy, KL_divergence, merged], feed_dict={x_data: batch, beta: beta_, is_training: True})\n mb_dict[\"loss\"].append(vaecost)\n mb_dict[\"CE\"].append(cre)\n mb_dict[\"KLd\"].append(KLd)\n\n # summaries for every epoch\n epoch_dict = summaries(epoch_dict, mb_dict)\n\n # add to tensorboard\n train_writer.add_summary(summary, epoch)\n\n # after epoch add information to epoch lists and write out to file\n report(LOGDIR, epoch, epoch_dict, saver, sess, fh_log)\n\n # add to beta\n beta_ = beta_ + to_add_\n if beta_ > 1:\n beta_ = np.array([1,])\n\n # after session\n fh_log.close()\n\n ## get latent representation and save reconstructions\n la_dict = {\"CE\":[], \"KLd\": [], \"loss\": [],}\n\n latent_file = \"%s/latent.representation.tab\" % LOGDIR\n if os.path.exists(latent_file):\n os.remove(latent_file)\n open_file_1 = open(latent_file, 'ab')\n\n genotype_file = \"%s/genotype.reconstruction.tab\" % LOGDIR\n if os.path.exists(genotype_file):\n os.remove(genotype_file)\n open_file_2 = open(genotype_file, 'ab')\n\n labels_file = \"%s/labels.reconstruction.tab\" % LOGDIR\n if os.path.exists(labels_file):\n os.remove(labels_file)\n open_file_3 = open(labels_file, 'ab')\n\n # final pass\n for iter in range(1):\n D = data.iloc[0:,:]\n ind = D.index\n #drop those individuals after use\n data=data.drop(ind)\n batch_test=np.array(D.iloc[:,0:n_feat])\n vaecost, cre, KLd, mu, x_reconstruction = sess.run([total_loss, cross_entropy, KL_divergence, z_mean,x_decoded], feed_dict={x_data: batch_test,beta: beta_, is_training: False})\n la_dict[\"loss\"].append(vaecost)\n la_dict[\"CE\"].append(cre)\n la_dict[\"KLd\"].append(KLd)\n np.savetxt(open_file_1, mu, fmt=\"%.3f\", delimiter=\"\\t\")\n np.savetxt(open_file_2, x_reconstruction, delimiter=\"\\t\")\n np.savetxt(open_file_3, list(ind), delimiter=\"\\t\",fmt=\"%s\")\n\n # close\n open_file_1.close()\n open_file_2.close()\n open_file_3.close()\n\n # print loss to screen\n print (\"Final model loss: %f; KLd: %f; CE %f; \" % (np.mean(la_dict[\"loss\"]), np.mean(la_dict[\"KLd\"]), np.mean(la_dict[\"CE\"])))", "def model(inputs, is_training):\n\n\n if data_format == 'channels_first':\n # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).\n # This provides a large performance boost on GPU. See\n # https://www.tensorflow.org/performance/performance_guide#data_formats\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n\n #localize network to generate the transformation parameters\n # raw_inputs = inputs\n\n # inputs = tf.layers.conv2d(inputs = inputs, filters = 32, strides = 2, kernel_size = 5, padding = 'SAME', kernel_initializer=tf.variance_scaling_initializer())\n\n # print(inputs.shape)\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'VALID')\n # print(inputs.shape)\n # inputs = tf.layers.conv2d(inputs = inputs, filters = 64, strides = 2, kernel_size = 5, padding = 'SAME', kernel_initializer = tf.variance_scaling_initializer())\n # print(inputs.shape)\n # inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 2, strides = 2, padding = 'VALID')\n # print(inputs.shape)\n # inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n # inputs = tf.layers.flatten(inputs = inputs)\n\n # inputs = tf.layers.dense(inputs = inputs, units = 128)\n # print(inputs.shape)\n # trans_parameters = tf.layers.dense(inputs = inputs, units = 6)\n # print(trans_parameters.shape)\n # inputs = stn(input_fmap = raw_inputs, theta = trans_parameters, out_dims = [60, 60])\n\n\n\n #embedding network\n inputs = conv2d_fixed_padding(inputs = inputs, filters = 64, kernel_size = 7, strides = 2, data_format = data_format)\n\n print('height:', inputs.shape[1])\n inputs = tf.identity(inputs, 'initial_conv')\n\n inputs = tf.layers.max_pooling2d(inputs = inputs, pool_size = 3, strides = 2, padding = 'SAME', data_format = data_format)\n\n print('height:', inputs.shape[1])\n inputs = tf.identity(inputs, 'initial_max_pool')\n\n inputs = block_layer(inputs = inputs, filters = 64, block_fn = block_fn, blocks = layers[0], strides = 1, \n is_training = is_training, name = 'blcok_layer1', data_format = data_format)\n print('height:', inputs.shape[1])\n\n #attention module\n #input_fmap = inputs\n # inputs = tf.reshape(inputs, (-1, 64))\n #inputs = tf.layers.dense(inputs = inputs, units = 32, activation = tf.tanh)\n\n #inputs = tf.reshape(inputs, [-1, 32])\n #inputs = tf.layers.dense(inputs = inputs, units = 1, activation = tf.sigmoid)\n\n #attention_para = tf.reshape(inputs, [-1, 21, 21, 1])\n\n \n #inputs = tf.multiply(input_fmap, attention_para)\n\n inputs = block_layer(inputs = inputs, filters = 128, block_fn = block_fn, blocks = layers[1], strides = 2,\n is_training = is_training, name = 'block_layer2', data_format = data_format)\n print('height:', inputs.shape[1])\n inputs = block_layer(inputs = inputs, filters = 256, block_fn = block_fn, blocks = layers[2], strides = 2, \n is_training = is_training, name = 'block_layer3', data_format = data_format)\n print('height:', inputs.shape[1])\n inputs = block_layer(inputs = inputs, filters = 512, block_fn = block_fn, blocks = layers[3], strides = 2, \n is_training = is_training, name = 'block_layer4', data_format = data_format)\n\n print('height:', inputs.shape)\n inputs = batch_norm_relu(inputs, is_training, data_format)\n \n inputs = tf.layers.average_pooling2d(inputs = inputs, pool_size = 3, strides = 2, padding = 'VALID', data_format = data_format)\n\n inputs = tf.layers.dropout(inputs = inputs, rate = _DROPOUT_RATE)\n\n inputs = tf.identity(inputs, 'final_avg_pool')\n\n inputs = tf.layers.flatten(inputs = inputs)\n\n #TODO\n inputs = tf.layers.dense(inputs = inputs, units = num_classes)\n\n print(inputs.shape)\n outputs = tf.identity(inputs, 'final_dense')\n\n return outputs", "def forward(self, x, vars=None, bn_training=True):\n\n if vars is None:\n vars = self.vars\n\n idx = 0\n bn_idx = 0\n\n for name, param in self.config:\n if name is 'conv2d':\n w, b = vars[idx], vars[idx + 1]\n # remember to keep synchrozied of forward_encoder and forward_decoder!\n x = F.conv2d(x, w, b, stride=param[4], padding=param[5])\n idx += 2\n # print(name, param, '\\tout:', x.shape)\n elif name is 'convt2d':\n w, b = vars[idx], vars[idx + 1]\n # remember to keep synchrozied of forward_encoder and forward_decoder!\n x = F.conv_transpose2d(x, w, b, stride=param[4], padding=param[5])\n idx += 2\n # print(name, param, '\\tout:', x.shape)\n elif name is 'linear':\n w, b = vars[idx], vars[idx + 1]\n o = F.linear(x, w, b)\n idx += 2\n # print('forward:', idx, x.norm().item())\n elif name is 'bn':\n w, b = vars[idx], vars[idx + 1]\n running_mean, running_var = self.vars_bn_mean[bn_idx], self.vars_bn_var[bn_idx]\n x = F.batch_norm(x, running_mean, running_var, weight=w, bias=b, training=bn_training)\n idx += 2\n bn_idx += 1\n\n elif name is 'flatten':\n x = x.reshape(((x.shape)[0], -1))\n elif name is 'reshape':\n # [b, 8] => [b, 2, 2, 2]\n x = x.view(x.size(0), *param)\n elif name is 'relu':\n x = F.relu(x)\n elif name is 'leakyrelu':\n x = F.leaky_relu(x, negative_slope=param[0], inplace=param[1])\n elif name is 'tanh':\n x = F.tanh(x)\n elif name is 'sigmoid':\n x = F.sigmoid(x)\n elif name is 'upsample':\n x = F.upsample_nearest(x, scale_factor=param[0])\n elif name is 'max_pool2d':\n x = F.max_pool2d(x, param[0], param[1], param[2])\n elif name is 'avg_pool2d':\n x = F.avg_pool2d(x, param[0], param[1], param[2])\n\n else:\n raise NotImplementedError\n\n # make sure variable is used properly\n assert idx == len(vars)\n assert bn_idx == len(self.vars_bn_mean)\n\n\n return o", "def forward(self, input):\n label=np.dot(input,self.w)+self.b\n return label", "def feedForward(self, inputs):\n\n\t\tinputs = np.atleast_1d(inputs)\n\n\t\tif not len(inputs) == self.nInputs:\n\n\t\t\traise ValueError(\"The input vector is the wrong length for this network\")\n\n\t\t#don't forget we have a bias unit in here too\n\t\tfor i in range(1,self.nInputs+1):\n\t\t\tself.inputLayer[i].activation = inputs[i-1]\n\t\t\tself.inputLayer[i].output = inputs[i-1]\t\t\t\n\n\t\tfor layer in self.hiddenLayers:\n\n\t\t\tfor unit in layer:\n\n\t\t\t\tunit.forwardValue()\n\n\t\tfor unit in self.outputLayer:\n\t\n\t\t\tunit.forwardValue()", "def forward(self, x):\r\n x = x.reshape(x.shape[0], x.shape[1], 1 , 1)\r\n x = self.input(x)\r\n x = self.bn(x)\r\n x = F.relu(x)\r\n for i in range(len(self.DV)-1, -1, -1):\r\n x = self.DV[i](x)\r\n if i != 0:\r\n x = self.BN[i](x)\r\n x = F.relu(x)\r\n for col, t in enumerate(self.col_type):\r\n i = int(col/self.shape)\r\n j = col % self.shape\r\n if t == \"binary\":\r\n x[:,:,i,j] = torch.sigmoid(x[:,:,i,j])\r\n elif t == \"normalize\":\r\n x[:,:,i,j] = torch.tanh(x[:,:,i,j])\r\n else:\r\n x[:,:,i,j] = torch.relu(x[:,:,i,j])\r\n return x", "def forward(self, x):\n x1 = x[:, 0, :, :].reshape((-1, 1, obs_size * 2 + 1, obs_size * 2 + 1))\n x2 = x[:, 1, :, :].reshape((-1, (obs_size * 2 + 1) ** 2))\n if x2.shape[0] == 1:\n x2 = np.tile(x2, (minibatch_size, 1))\n h = F.relu(self.bn1(self.conv1(x)))\n h = F.relu(self.bn2(self.conv2(x)))\n h = F.relu(self.bn3(self.conv3(x)))\n h = self.l(h)\n return DiscreteActionValue(h)", "def forward(self):\n R = self.LP.cost.R\n A = self.LP.dyn.A\n B = self.LP.dyn.B\n\n x = self.LP.x0\n self.x[0] = x\n for i in range(self.LP.N):\n u = - np.linalg.inv(R+B.T.dot(self.V[i+1]).dot(B)).dot(.5*B.T.dot(self.W[i+1]) \\\n + B.T.dot(self.V[i+1]).dot(A).dot(x))\n if self.LP.dyn.u_dim == 1:\n self.u[i] = float(u)\n else:\n self.u[i] = u\n self.J_star[i] = float(x.T.dot(self.V[i]).dot(x) + self.W[i].T.dot(x)) #up to constant\n\n if i == 0:\n self.J[i] = self.LP.cost.loss(x, u, i)\n else:\n self.J[i] = self.J[i-1] + self.LP.cost.loss(x, u, i)\n x = self.LP.dyn.next_state(x, u)\n self.x[i+1] = x\n\n self.J[self.LP.N] = self.J[self.LP.N-1] + self.LP.cost.loss(x, 0, self.LP.N)\n\n self.J_star[self.LP.N] = float(x.T.dot(self.V[self.LP.N]).dot(x) \\\n + self.W[self.LP.N].T.dot(x)) #up to constant", "def forward(self, input):\n return self.layers(input)", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def build_graph(self):\n\n\n\n self.inputs.append( #uint8\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='input/lr')) \n\n self.label.append(\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='label/hr'))", "def forward(self, x):\n out = self.pre_processing(x)\n out = self.a3(out)\n out = self.b3(out)\n out = self.maxpool(out)\n out = self.a4(out)\n out = self.b4(out)\n out = self.c4(out)\n out = self.d4(out)\n out = self.e4(out)\n out = self.maxpool(out)\n out = self.a5(out)\n out = self.b5(out)\n out = self.avgpool(out)\n out = out.view(out.size(0), -1) # reshape the output tensor\n out = self.linear(out)\n\n return out", "def forward(self, x):\r\n\r\n y = self.conv1(x)\r\n y = self.bn1(y)\r\n y = F.relu(y, inplace = True)\r\n y = self.conv2(y)\r\n y = self.bn2(y)\r\n y = F.relu(y, inplace = True)\r\n\r\n return y", "def forward(self, x):\n x = self.input(x)\n x = self.in0(x)\n x = self.block0(x) + x\n x = self.block1(x) + x\n x = self.block2(x) + x\n x = self.block3(x) + x\n x = self.block4(x) + x\n x = self.in0(x)\n\n out = self.out(x)\n\n return out", "def forward(self, inputs):\n #print(\"w1 shape\", self.w1.shape)\n z1 = np.dot(inputs, self.w1)\n self.a1 = sigmoid(z1)\n \n z2 = np.dot(self.a1, self.w2)\n self.a2 = sigmoid(z2)\n \n z3 = np.dot(self.a2, self.w3)\n self.y = sigmoid(z3)\n \n return self.y", "def train():\n args = arguments_st_train()\n\n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n\n \n if args.use_random_seed:\n tf.set_random_seed(args.random_seed)\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n input_size=input_size,\n random_scale=args.random_scale,\n random_mirror=args.random_mirror,\n random_crop=args.random_crop,\n ignore_label=args.ignore_label,\n img_mean=IMG_MEAN,\n coord=coord,\n task=args.task)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Create network.\n with slim.arg_scope(vgg_arg_scope(weight_decay=args.weight_decay, use_batch_norm=True, is_training=True)):\n if args.network == 'vgg_16_deeplab_st':\n net, end_points = vgg_16_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n elif args.network == 'vgg_16_shortcut_deeplab_st':\n net, end_points = vgg_16_shortcut_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n else:\n raise Exception('network name is not recognized!')\n \n \n # Predictions.\n raw_output = end_points['vgg_16/fc8_voc12']\n\n # gt labels\n raw_gt = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes,\n one_hot=False, task=args.task) # [batch_size, h, w]\n\n # losses\n if args.task == 'normal':\n loss = get_normal_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n elif args.task == 'seg':\n loss = get_seg_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]\n reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\n \n # Image summary for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, axis=3)\n pred = tf.expand_dims(raw_output_up, dim=3)\n \n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes, args.task], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes, args.task], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n \n # Define loss and optimisation parameters.\n train_op, step_ph = create_train_ops_st(reduced_loss, args)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n\n # Load variables if the checkpoint is provided.\n if args.restore_from is not None:\n load_st(sess, args)\n \n # Saver for storing checkpoints of the model.\n save_op = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=args.max_to_keep)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n # Iterate over training steps.\n for step in range(args.num_steps):\n start_time = time.time()\n feed_dict = { step_ph : step }\n \n if step % args.save_pred_every == 0:\n loss_value, images, labels, preds, summary, _ = sess.run([reduced_loss, image_batch, label_batch, pred, total_summary, train_op], feed_dict=feed_dict)\n summary_writer.add_summary(summary, step)\n save(save_op, sess, args.snapshot_dir, step)\n else:\n loss_value, _ = sess.run([reduced_loss, train_op], feed_dict=feed_dict)\n duration = time.time() - start_time\n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n coord.request_stop()\n coord.join(threads)", "def forward(self, inp: torch.Tensor) -> torch.Tensor:\n x = self.conv1(inp)\n x = self.maxpool(x)\n\n for i in range(self._num_layers):\n x = getattr(self, \"C%d\" % (i + 1))(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x", "def forward(self, x):\n lay1 = self.linear1(x)\n lay1 = nn.functional.relu(lay1)\n\n lay2 = self.linear2(lay1)\n lay2 = nn.functional.relu(lay2)\n \n lay3_1 = self.linear3_1(lay2)\n lay3_1 = nn.functional.relu(lay3_1)\n\n ## CHECK HERE TOO!!!\n out_1 = self.linear4_1(lay3_1)\n out_1 = out_1.view(-1, ) # reshape it to a 1d-array\n \n # taken care by BCEWithLogitsLoss\n # out_1 = nn.functional.softmax(out_1, dim=0) \n \n lay3_2 = self.linear3_2(lay2)\n lay3_2 = nn.functional.relu(lay3_2)\n \n out_2 = self.linear4_2(lay3_2)\n \n return out_1, out_2", "def __call__(self, x, is_training, nfilt=32, reuse=False):\n with tf.variable_scope(self.name):\n x = tf.reshape(x, [-1, self.input_dim, self.input_dim, self.channels])\n\n h1 = unet_conv(x, nfilt*1, 'h1', reuse, is_training, use_batch_norm=False)\n h2 = unet_conv(h1, nfilt*2, 'h2', reuse, is_training)\n\n # if self.input_dim==32:\n # imdim16 = int(self.input_dim/4)\n # minibatch_features = minibatch(tf.layers.flatten(h2), nfilt*2*imdim16*imdim16, num_kernels=1*imdim16*imdim16, reuse=reuse)\n # minibatch_features = tf.reshape(minibatch_features, [-1, imdim16, imdim16, 1])\n # h2 = tf.concat([h2, minibatch_features], 3)\n\n h3 = unet_conv(h2, nfilt*4, 'h3', reuse, is_training)\n h4 = unet_conv(h3, nfilt*8, 'h4', reuse, is_training)\n\n out = unet_conv(\n h4, 1, 'out', reuse, is_training, s=1, use_batch_norm=False,\n activation=None)\n\n tensors = [x, h1, h2, h3, h4, out]\n for tensor in tensors:\n print(tensor)\n\n return out", "def forward(self, x):\n if self.training:\n x = self.input_pert(x)\n x = self.encoder(x)\n x = self.decoder(x)\n return x", "def train():\n pass", "def run_neural_network(self, rover_input, weight_vec, rover_id):\n self.get_inputs(rover_input, rover_id)\n self.get_weights(weight_vec, rover_id)\n self.get_outputs(rover_id)", "def forward(self, x):\n if x.size()[0] != 1 or x.size()[1] != 200 or x.size()[2] != 96:\n return torch.zeros(1,1)\n x = x.view(1,1,x.size()[1],x.size()[2]) #1,1,200,96\n x = nn.MaxPool2d(2)(self.conv1(x))\n x = self.dropout(F.relu(x)) #1,3,96,46\n x = nn.MaxPool2d(2)(self.conv2(x))\n x = self.dropout(F.relu(x)) #1,6,47,21\n x = nn.MaxPool2d(2)(self.conv3(x))\n x = self.dropout(F.relu(x)) #1,12,21,8\n x = nn.MaxPool2d(2)(self.conv4(x))#1,24,8,2\n x = x.view(1,-1)#1,384\n x = self.fc1(F.relu(x))\n x = self.fc2(F.relu(x))\n x = self.fc3(F.relu(x))\n return F.sigmoid(x)", "def build_graph(self):\n # Print\n if self.verbose:\n print('Building Yolo Graph....')\n # Reset default graph\n tf.reset_default_graph()\n # Input placeholder\n self.x = tf.placeholder('float32', [None, 448, 448, 3])\n # conv1, pool1\n self.conv1 = self.conv_layer(1, self.x, 64, 7, 2)\n self.pool1 = self.maxpool_layer(2, self.conv1, 2, 2)\n # size reduced to 64x112x112\n # conv2, pool2\n self.conv2 = self.conv_layer(3, self.pool1, 192, 3, 1)\n self.pool2 = self.maxpool_layer(4, self.conv2, 2, 2)\n # size reduced to 192x56x56\n # conv3, conv4, conv5, conv6, pool3\n self.conv3 = self.conv_layer(5, self.pool2, 128, 1, 1)\n self.conv4 = self.conv_layer(6, self.conv3, 256, 3, 1)\n self.conv5 = self.conv_layer(7, self.conv4, 256, 1, 1)\n self.conv6 = self.conv_layer(8, self.conv5, 512, 3, 1)\n self.pool3 = self.maxpool_layer(9, self.conv6, 2, 2)\n # size reduced to 512x28x28\n # conv7 - conv16, pool4\n self.conv7 = self.conv_layer(10, self.pool3, 256, 1, 1)\n self.conv8 = self.conv_layer(11, self.conv7, 512, 3, 1)\n self.conv9 = self.conv_layer(12, self.conv8, 256, 1, 1)\n self.conv10 = self.conv_layer(13, self.conv9, 512, 3, 1)\n self.conv11 = self.conv_layer(14, self.conv10, 256, 1, 1)\n self.conv12 = self.conv_layer(15, self.conv11, 512, 3, 1)\n self.conv13 = self.conv_layer(16, self.conv12, 256, 1, 1)\n self.conv14 = self.conv_layer(17, self.conv13, 512, 3, 1)\n self.conv15 = self.conv_layer(18, self.conv14, 512, 1, 1)\n self.conv16 = self.conv_layer(19, self.conv15, 1024, 3, 1)\n self.pool4 = self.maxpool_layer(20, self.conv16, 2, 2)\n # size reduced to 1024x14x14\n # conv17 - conv24\n self.conv17 = self.conv_layer(21, self.pool4, 512, 1, 1)\n self.conv18 = self.conv_layer(22, self.conv17, 1024, 3, 1)\n self.conv19 = self.conv_layer(23, self.conv18, 512, 1, 1)\n self.conv20 = self.conv_layer(24, self.conv19, 1024, 3, 1)\n self.conv21 = self.conv_layer(25, self.conv20, 1024, 3, 1)\n self.conv22 = self.conv_layer(26, self.conv21, 1024, 3, 2)\n self.conv23 = self.conv_layer(27, self.conv22, 1024, 3, 1)\n self.conv24 = self.conv_layer(28, self.conv23, 1024, 3, 1)\n # size reduced to 1024x7x7\n # fc1, fc2, fc3\n self.fc1 = self.fc_layer(29, self.conv24, 512,\n flatten=True, linear=False)\n self.fc2 = self.fc_layer(\n 30, self.fc1, 4096, flatten=False, linear=False)\n self.fc3 = self.fc_layer(\n 31, self.fc2, 1470, flatten=False, linear=True)\n # Run session\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n self.saver.restore(self.sess, self.weightFile)\n # Print\n print('Graph built.')", "def feedForward(self):\n # Calculate the current values of the first layer\n self.layer1 = sigmoid(np.dot(self.input, self.weights1))\n\n # Calculate the sigmoid of the second layer which is the output\n self.output = sigmoid(np.dot(self.layer1, self.weights2))", "def forward(self, x):\n # Get results of encoder network\n h1 = self.encode_nn(x)\n\n # latent space\n mu = self.encode_mu(h1)\n log_var = self.encode_log_var(h1)\n\n # Reparameterize\n z = self.reparameterize(mu, log_var)\n return z, mu, log_var", "def forward(self, x):\n # Get results of encoder network\n h1 = self.encode_nn(x)\n\n # latent space\n mu = self.encode_mu(h1)\n log_var = self.encode_log_var(h1)\n\n # Reparameterize\n z = self.reparameterize(mu, log_var)\n return z, mu, log_var", "def forward(self, x):\n x = self.first_deconv(x)\n x = self.first_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.second_deconv(x)\n x = self.second_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.third_deconv(x)\n x = self.third_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.fourth_deconv(x)\n x = self.fourth_batch_norm(x)\n\n x = self.fifth_deconv(x)\n x = self.fifth_batch_norm(x)\n\n x = self.sixth_deconv(x)\n x = self.sixth_batch_norm(x)\n\n x = self.seventh_deconv(x)\n\n # sigmoid_out = nn.functional.sigmoid(x)\n tanh_out = nn.functional.tanh(x)\n\n out = (tanh_out + 1) * 255 / 2\n\n # print 'out.shape =', out.shape\n\n return out", "def forward(self, x):\n previous_batch, current_batch = x\n previous_batch_pc, previous_batch_f = previous_batch[0], previous_batch[1]\n current_batch_pc, current_batch_f = current_batch[0], current_batch[1]\n\n f1 = previous_batch_pc[:, :, 3:]\n pc1 = previous_batch_pc[:, :, :3]\n\n f2 = current_batch_pc[:, :, 3:]\n pc2 = current_batch_pc[:, :, :3]\n\n batch_size, n_points_prev, _ = previous_batch_pc.shape\n batch_size, n_points_cur, _ = current_batch_pc.shape\n\n # All outputs of the following layers are tuples of (pos, features)\n # --- Point Feature Part ---\n pf_prev_1, pf_prev_2, pf_prev_3 = self._point_feature_net(pc1.float(), f1.float())\n pf_curr_1, pf_curr_2, pf_curr_3 = self._point_feature_net(pc2.float(), f2.float())\n\n # --- Flow Embedding / Point Mixture Part ---\n _, fe_2, fe_3 = self._point_mixture(x1=pf_prev_3, x2=pf_curr_3)\n\n # --- Flow Refinement Part ---\n x = self._flow_refinement(pf_curr_1=pf_curr_1, pf_curr_2=pf_curr_2, pf_curr_3=pf_curr_3, fe_2=fe_2, fe_3=fe_3)\n\n # --- Final fully connected layer ---\n pos, features = x\n features = features.transpose(1, 2)\n x = self._fc(features)\n return x", "def test(self):\n self.output = self.net.forward(Variable(self.source, volatile=True))\n self.loss = self.loss_function(self.output,\n Variable(self.target, volatile=True))", "def _forward(self, x):\n global global_epoch\n global_epoch += 1\n bias = -np.ones((x.shape[0], 1))\n tail = np.zeros((x.shape[0], self.dim_hid+self.dim_out))\n nodes = np.concatenate((bias, x, tail), axis=1)\n weight = self.weight * self.connectivity\n for i in range(self.dim_in, self.dim_in+self.dim_hid+self.dim_out):\n net = nodes.dot(weight[i])\n nodes[:,i] = self.__sigmoid(net)\n nodes[:,self.dim_in:self.dim_in+self.dim_hid] *= self.hidden\n return nodes", "def forward(self, x): \n out = self.layer1(x)\n out = self.layer2(out)\n\n out = out.reshape(out.size(0), -1)\n \n out = self.dropout(out)\n out = self.fc1(out)\n out = self.fc2(out)\n \n return out", "def forward(self, x):\n # x = state\n \n x = F.relu(self.input(x))\n x = self.output(x)\n \n return x", "def feedForward(self, inputs):\n self.ai = np.array(inputs)\n self.ah1 = tanh(self.ai.dot(self.wi))\n self.ah2 = tanh(self.ah1.dot(self.wh))\n self.ao = softmax(self.ah2.dot(self.wo))", "def forward(self, img):\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n H, W = img.size()[2], img.size()[3]\n #print('x',x)\n #print('x.shape',x.shape) ## 32 x 3 x 96 x 128\n z32 = self.start(img)\n z64 = self.layer1(z32) + self.layer1_ds(z32)\n #print('z1',z64.shape)\n z128 = self.layer2(z64) + self.layer2_ds(z64)\n #print('z2',z128.shape)\n z256 = self.layer3(z128) + self.layer3_ds(z128)\n #print('z3',z256.shape)\n z256d = self.drop_out_layer(z256)\n #print('z_drop',z256d.shape)\n z256u = self.layer4(z256d)\n #print('z4',z256u.shape)\n z128u = self.layer5(torch.cat((z256u, F.interpolate(z256d,size=z256u.size()[2:] )), 1))\n #print('z5',z128u.shape)\n z64u = self.layer6(torch.cat((z128u, F.interpolate(z128,size=z128u.size()[2:] )), 1))\n #print('z6',z64u.shape)\n\n z32u = self.final(torch.cat((z64u, F.interpolate(z64,size=z64u.size()[2:] )), 1))\n #print('z6_plus',z32u.shape)\n\n #print('z7_result',self.classifer(z32u)[:, :, :H, :W].shape)\n result_class = self.classifer(z32u)[:, :, :H, :W]\n\n #print('model result shape',result_class.shape)\n ## 16 x 1 x 300 x 400\n\n # using soft argmax\n spa_argmax = spatial_argmax(torch.squeeze(result_class,1))\n\n #one hot with spatial argmax\n #xy_val = torch.zeros(spa_argmax.shape).float()\n #for idx, pt in enumerate(spa_argmax):\n # x_val = (pt[0]+1.0)*63.5\n # y_val = (pt[1]+1.0)*47.5\n # # for each batch. [0...127][0...95]\n # xy_val[idx][0] = x_val\n # xy_val[idx][1] = y_val\n\n xy_val = (spa_argmax+1.0).to(device)\n #print('spa_argmax',spa_argmax)\n scaling_factor = torch.FloatTensor([[(W-1)/2,0.],[0.,(H-1)/2]]).to(device)\n #scaling_factor = torch.FloatTensor([[63.5,0.],[0.,44.5]]).to(device)\n xy_val = xy_val.mm(scaling_factor)\n\n return xy_val", "def forward(self, input_, is_training = True, reuse = False, data_format = 'channels_last'):\n self.input = input_\n self.reuse = reuse\n self.is_training = is_training\n self.data_format = data_format\n outp = self._build_graph()\n return outp", "def forward(self, x):\r\n y = self.en_fc1(x)\r\n y = F.relu(y)\r\n y = self.en_fc2(y)\r\n y = F.relu(y)\r\n y = self.en_fc3(y)\r\n y = F.relu(y)\r\n\r\n mean = self.en_mu(y)\r\n stddev_p = self.en_log(y)\r\n \r\n n = x.shape[0]\r\n z = torch.randn(n,self.latent_dim)\r\n std = torch.exp(stddev_p/2.0)\r\n z = z.mul(std) + mean\r\n \r\n xhat = self.de_fc1(z)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc2(xhat)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc3(xhat)\r\n xhat = F.sigmoid(xhat)\r\n \r\n return y,mean,stddev_p,z,xhat", "def forward(self, x):\n\n x = F.max_pool2d(F.relu(self.batch_norm1(self.conv1(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm2(self.conv2(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm3_b(self.conv3_b(F.relu(self.batch_norm3_a(self.conv3_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm4_b(self.conv4_b(F.relu(self.batch_norm4_a(self.conv4_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm5_b(self.conv5_b(F.relu(self.batch_norm5_a(self.conv5_a(x)))))), 3, stride=2, padding=1)\n x = self.avg_pool(x).view(-1,512)\n out = self.linear(x)\n\n return out", "def forward(self,z):\n z = z.float().transpose(1,2).contiguous()\n x = F.relu(self.bn1(self.fc1(z)))\n x = F.relu(self.bn1(x))\n x = F.relu(self.bn2(self.fc2(x)))\n x = F.relu(self.bn3(self.fc3(x)))\n vocal = torch.exp(self.fc4_1(x))\n noise = torch.exp(self.fc4_2(x))\n return vocal.transpose(1,2), noise.transpose(1,2)", "def forward(self, inputs):\n\n down1, indices_1, unpool_shape1 = self.layer_1(inputs=inputs,\n layer_size=2)\n down2, indices_2, unpool_shape2 = self.layer_2(inputs=down1,\n layer_size=2)\n down3, indices_3, unpool_shape3 = self.layer_3(inputs=down2,\n layer_size=3)\n down4, indices_4, unpool_shape4 = self.layer_4(inputs=down3,\n layer_size=3)\n down5, indices_5, unpool_shape5 = self.layer_5(inputs=down4,\n layer_size=3)\n down6, indices_6, unpool_shape6 = self.layer_6(inputs=down5,\n layer_size=3)\n up5 = self.layer_7(inputs=down6, indices=indices_6,\n output_shape=unpool_shape6, layer_size=3)\n up4 = self.layer_8(inputs=up5, indices=indices_5,\n output_shape=unpool_shape5, layer_size=3)\n up3 = self.layer_9(inputs=up4, indices=indices_4,\n output_shape=unpool_shape4, layer_size=3)\n up2 = self.layer_10(inputs=up3, indices=indices_3,\n output_shape=unpool_shape3, layer_size=3)\n up1 = self.layer_11(inputs=up2, indices=indices_2,\n output_shape=unpool_shape2, layer_size=2)\n output = self.layer_12(inputs=up1, indices=indices_1,\n output_shape=unpool_shape1, layer_size=2)\n\n return output", "def forward(self, x):\n y_pred = self.net(x)\n return y_pred", "def network(self):\n inp = Input((self.env_dim))\n # #\n # x = Dense(256, activation='relu')(inp)\n # x = GaussianNoise(1.0)(x)\n # #\n # x = Flatten()(x)\n # x = Dense(128, activation='relu')(x)\n # x = GaussianNoise(1.0)(x)\n # #\n # out = Dense(self.act_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n # out = Lambda(lambda i: i * self.act_range)(out)\n # #\n\n x = conv_block(inp, 32, (2, 2), 8)\n x = conv_block(x, 64, (2, 2), 4)\n x = conv_block(x, 64, (2, 2), 3)\n x = Flatten()(x)\n x = Dense(256, activation='relu')(x)\n\n x = Dense(self.act_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(x)\n\n return Model(inp, out)", "def forward(self, inputs):\n features, coords = inputs\n voxel_features, voxel_coords = self.voxelization(features, coords)\n voxel_features = self.voxel_layers(voxel_features)\n voxel_features = trilinear_devoxelize(voxel_features, voxel_coords,\n self.resolution, self.training)\n fused_features = voxel_features + self.point_features(features)\n return fused_features, coords", "def main(args):\n with tf.Graph().as_default():\n with tf.Session() as sess:\n # prepare validate datasets\n train_x, train_y, test_x, test_y = load_dataset()\n\n # Load the modelc\n load_model(args.model)\n\n # Get input and output tensors, ignore phase_train_placeholder\n # for it have default value.\n inputs_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n\n feature_maps = tf.get_default_graph().\\\n get_tensor_by_name('MobileFaceNet/MobileFaceNet/'\n 'Conv2d_4_InvResBlock_5/Conv/Conv2D:0')\n\n feature_maps_r = tf.reshape(feature_maps, [-1, 3, 14, 14, 256])\n anchor_feature_maps = feature_maps_r[:, 0, :, :, :]\n pos_feature_maps = feature_maps_r[:, 1, :, :, :]\n neg_feature_maps = feature_maps_r[:, 2, :, :, :]\n\n anchor_labels = tf.placeholder(tf.int32, [None, 3], name='anchor_labels')\n neg_labels = tf.placeholder(tf.int32, [None, 3], name='neg_labels')\n test_labels = tf.placeholder(tf.int32, [None, 3], name='test_labels')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n anchor_logits, anchor_feature = network(feature_maps=anchor_feature_maps, keep_prob=0.5)\n pos_logits, pos_feature = network(feature_maps=pos_feature_maps, keep_prob=0.5)\n neg_logits, neg_feature = network(feature_maps=neg_feature_maps, keep_prob=0.5)\n test_logits, _ = network(feature_maps=feature_maps, keep_prob=1.0,\n is_training=False, reuse=True)\n train_accuracy = calculate_accuracy(logit=anchor_logits, label=anchor_labels,\n name='train_accuracy')\n test_accuracy = calculate_accuracy(logit=test_logits, label=test_labels,\n name='test_accuracy')\n\n with tf.name_scope(\"retrain_loss\"):\n pos_pair_loss = tf.losses.mean_squared_error(anchor_feature, pos_feature)\n temp_neg_pair_loss = tf.losses.mean_squared_error(anchor_feature, neg_feature)\n neg_pair_loss = tf.maximum(0.0, 2.0 - temp_neg_pair_loss)\n\n anchor_loss = cross_entropy_loss(anchor_logits, anchor_labels)\n pos_loss = cross_entropy_loss(pos_logits, anchor_labels)\n neg_loss = cross_entropy_loss(neg_logits, neg_labels)\n\n loss = anchor_loss + pos_loss + neg_loss + pos_pair_loss + neg_pair_loss\n\n with tf.name_scope(\"retrain_op\"): # not shown in the book\n optimizer = tf.train.GradientDescentOptimizer(learning_rate) # not shown\n train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n scope=\"retrain_net\")\n train_op = optimizer.minimize(loss, var_list=train_vars)\n\n uninitialized_vars = []\n for var in tf.all_variables():\n try:\n sess.run(var)\n except tf.errors.FailedPreconditionError:\n uninitialized_vars.append(var)\n\n init_new_vars_op = tf.initialize_variables(uninitialized_vars)\n sess.run(init_new_vars_op)\n\n model_path = './checkpoint/SR_model/'\n new_saver = tf.train.Saver()\n\n batch_size = 7\n n_epochs = 45\n train_iteration = len(train_x) // batch_size\n test_iteration = len(test_x) // batch_size\n\n # loop for epoch\n best_test_accuracy = 0\n epoch_lr = 0.0005\n for epoch in range(0, n_epochs):\n if epoch == int(epoch * 0.33) or epoch == int(epoch * 0.66):\n epoch_lr = epoch_lr * 0.1\n\n # get batch data\n for idx in range(0, train_iteration):\n batch_x = train_x[idx * batch_size:(idx + 1) * batch_size]\n batch_y = train_y[idx * batch_size:(idx + 1) * batch_size]\n\n concat_data = convert_train_data_batch(batch_x)\n anchor_label_r, neg_label_r = convert_train_label_batch(batch_y)\n\n train_feed_dict = {\n inputs_placeholder: concat_data,\n anchor_labels: anchor_label_r,\n neg_labels: neg_label_r,\n learning_rate: epoch_lr\n }\n\n _, train_accuracy_v, anchor_loss_v, pos_loss_v, neg_loss_v, pos_pair_loss_v, \\\n neg_pair_loss_v = sess.run([train_op, train_accuracy,\n anchor_loss, pos_loss, neg_loss, pos_pair_loss,\n neg_pair_loss], feed_dict=train_feed_dict)\n\n if idx % 10 == 0:\n # display training status\n print('Epoch: [%2d][%4d/%4d] Anchor Loss %.4f Pos Loss %.4f Neg Loss %.4f '\n 'Pos Pair Loss %.4f Neg Pair Loss %.4f Prec %.4f\\t'\n % (epoch, idx, train_iteration, anchor_loss_v, pos_loss_v, neg_loss_v,\n pos_pair_loss_v, neg_pair_loss_v, train_accuracy_v))\n\n total_test_accuracy = 0\n for idx in range(test_iteration):\n batch_x = test_x[idx * batch_size:(idx + 1) * batch_size]\n batch_y = test_y[idx * batch_size:(idx + 1) * batch_size]\n\n batch_x_r = convert_test_data_batch(batch_x)\n batch_y_r = convert_test_label_batch(batch_y)\n\n test_feed_dict = {\n inputs_placeholder: batch_x_r,\n test_labels: batch_y_r\n }\n\n test_accuracy_v = sess.run(test_accuracy, feed_dict=test_feed_dict)\n\n total_test_accuracy += test_accuracy_v\n total_test_accuracy /= test_iteration\n\n # display training status\n print(\"Epoch: [%2d/%2d]\\ttest_accuracy: %.2f\" \\\n % (epoch, n_epochs, total_test_accuracy))\n\n # save model\n if best_test_accuracy < total_test_accuracy:\n best_test_accuracy = total_test_accuracy\n new_saver.save(sess, os.path.join(model_path, 'SR_model_%2.4f.ckpt'\n % best_test_accuracy))", "def forward(self, x):\n #batch_size = x.shape[0]\n out = self.model(x)\n return out", "def predict_from(self, inputs, to_layers):" ]
[ "0.6696856", "0.6663844", "0.6572504", "0.6489043", "0.6439873", "0.6390039", "0.63803667", "0.630682", "0.63015246", "0.6280952", "0.6269826", "0.62485385", "0.62225485", "0.62188786", "0.6218298", "0.6182807", "0.61798155", "0.61786693", "0.6172364", "0.61677724", "0.61635745", "0.6146077", "0.6119188", "0.6116991", "0.610831", "0.60976243", "0.6078553", "0.60722274", "0.6066292", "0.6065537", "0.60653657", "0.6058921", "0.60554653", "0.6049077", "0.60384744", "0.6033601", "0.6032454", "0.60177004", "0.6015599", "0.60095865", "0.6006739", "0.6006507", "0.5988767", "0.5982556", "0.5976134", "0.5968475", "0.59679985", "0.5960302", "0.5958814", "0.5958814", "0.5958814", "0.59587353", "0.5940697", "0.5933931", "0.5932743", "0.5931696", "0.593162", "0.59282374", "0.5927116", "0.59242344", "0.5921011", "0.59200513", "0.59183514", "0.5914985", "0.5913951", "0.59135205", "0.5910604", "0.5904652", "0.5904258", "0.5898163", "0.58974653", "0.5897427", "0.58944607", "0.58843225", "0.58830565", "0.58823544", "0.58785194", "0.58763516", "0.58699155", "0.5863696", "0.5863696", "0.5856657", "0.5856537", "0.585464", "0.5853751", "0.58526635", "0.58525336", "0.58459353", "0.58434194", "0.58423704", "0.5831844", "0.58314246", "0.58285433", "0.58271676", "0.58231974", "0.58220136", "0.58193076", "0.5819265", "0.5818187", "0.5810341" ]
0.76247156
0
it computes the error vector between desired and obtained output, stored at the last layer
def error(input_, output): global number_of_neurons_by_layer if len(output) != number_of_neurons_by_layer[-1]: raise IndexError( f"\033[91mDesired output length is incorrect. It must be {number_of_neurons_by_layer[-1]}.\033[m") output = np.array(output).reshape(len(output), 1) flow(input_) layers[-1]["error"] = output - layers[-1]["v"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error2(input_, output):\n error(input_, output)\n layers[-1][\"error2\"] = layers[-1][\"error\"].T @ layers[-1][\"error\"]", "def getError(outputVector, targetVector):\r\n return np.sum((outputVector-targetVector)**2)", "def get_error(self, params):\n return self.endog - self.predict(params)", "def _compute_error(self,expected_out,actual_out,error_func):\n\n error = error_func(expected_out,actual_out)\n return error", "def get_error(self, output,target):\n return [target[i]-output[i] for i in range(len(output))]", "def train_error(self):\n self.prediction = self.predict()\n pred = self.prediction.reshape(-1)\n self.error = np.sum(pred != self.label) / self.train_data.shape[0]\n return(self.error)", "def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)", "def backprop_error(self,t=0, external_error=0):\n\t\taux = external_error\n\t\t# Other layers' contributions\n\t\tif t != 0:\n\t\t\tfor layer in self.next_recurrent:\n\t\t\t\taux += layer.get_error_contribution(self, t=t+1)\n\t\tfor layer in self.next:\n\t\t\taux += layer.get_error_contribution(self, t=t)\n\n\t\tif t != 0:\n\t\t\taux += np.dot(\n\t\t\t\tself.get_o_error(t=t+1) * self.__dev_sigm__(self.get_o(t=t+1)),\n\t\t\t\tnp.transpose(self.W_o_atprev)\n\t\t\t) + \\\n\t\t\tnp.dot(\n\t\t\t\tself.get_c_error(t=t+1) * self.get_i(t=t+1) * self.__dev_tanh__(self.get_c(t=t+1)),\n\t\t\t\tnp.transpose(self.W_c_atprev)\n\t\t\t) + \\\n\t\t\tnp.dot(\n\t\t\t\tself.get_i_error(t=t+1) * self.__dev_sigm__(self.get_i(t=t+1)),\n\t\t\t\tnp.transpose(self.W_i_atprev)\n\t\t\t) + \\\n\t\t\tnp.dot(\n\t\t\t\tself.get_f_error(t=t+1) * self.__dev_sigm__(self.get_f(t=t+1)),\n\t\t\t\tnp.transpose(self.W_f_atprev)\n\t\t\t)\n\t\t\n\t\tif isinstance(aux, np.ndarray):\n\t\t\tself.error_a = [aux]+self.error_a\n\n\t\t# error in o\n\t\tself.error_o = [self.get_a_error(t=t) * self.__tanh__(self.get_c(t=t))] + self.error_o\n\n\t\t# error in c\n\t\taux = self.w_o_c * self.get_o_error(t=t) * self.__dev_sigm__(self.get_o(t=t)) \\\n\t\t+ self.get_a_error(t=t) * self.get_o(t=t) * self.__dev_tanh_z__(self.get_c(t=t))\n\t\tif t!=0:\n\t\t\taux += self.get_c_error(t=t+1) * self.get_f(t=t+1) \\\n\t\t\t+ self.w_i_ctprev * self.get_i_error(t=t+1) * self.__dev_sigm__(self.get_i(t=t+1)) \\\n\t\t\t+ self.w_f_ctprev * self.get_f_error(t=t+1) * self.__dev_sigm__(self.get_f(t=t+1))\n\t\tself.error_c = [aux] + self.error_c\n\n\t\t# error in f\n\t\tself.error_f = [self.get_c(t=t-1)*self.get_c_error(t=t)] + self.error_f\n\n\t\t# error in i\n\t\tself.error_i = [self.get_c_error(t=t)*self.get_tanh_zc(t=t)] + self.error_i", "def calcError(self, inherited_error):\r\n\t\tif inherited_error == None:\t\t# output neurons\r\n\t\t\tself.error = (self.target - self.value) * self.activate_der()\r\n\t\telse:\r\n\t\t\tself.error = inherited_error * self.activate_der()", "def _delta(self, output, err, neuron):\n return neuron._g_prime(output) * err", "def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum", "def calculate_error(self):\n \n delta = self.Y - self.T\n error = delta.dot(delta) / self.N\n error = format(error, '.5f')\n \n self.errors.append(error)", "def _wer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def test(test_data, test_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, bias_w1, bias_w2, bias_w3, l1, lmbda):\n # Set up initial variables\n samples = test_data.shape[0]\n correct_values = np.argmax(test_output, axis=1)\n predicted_values = np.zeros((samples,))\n error = np.zeros(test_output.shape)\n error_l1 = 0\n\n # Extract inputs\n x0 = test_data.T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n\n # Checks if L1 is wanted\n if l1:\n # Calculates l1 error for input layer\n error_l1 = lmbda * np.sum(np.sqrt(np.square(w1)))\n\n # Checks if hidden layer is needed\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n if l1:\n # Calculates l1 error for hidden layer\n error_l1 += lmbda * np.sum(np.sqrt(np.square(w2)))\n\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n if l1:\n # Calculates l1 error for second hidden layer\n error_l1 += lmbda * np.sum(np.sqrt(np.square(w3)))\n\n # Calculate labels\n predicted_values = np.argmax(x3, axis=0)\n # Error Signal\n error = (test_output - x3.T)\n\n else:\n # Calculate labels\n predicted_values = np.argmax(x2, axis=0)\n # Error Signal\n error = (test_output - x2.T)\n\n else:\n # Calculate labels\n predicted_values = np.argmax(x1, axis=0)\n # Error Signal\n error = (test_output - x1.T)\n\n # Calculate MSE error\n error_mse = np.sum(np.square(error)) / (2 * error.shape[0])\n\n # Add MSE error to L1 error, if L1 isn't used this will add 0\n error = error_mse + error_l1\n\n # Calculate accuracy of predictions\n accuracy = (np.sum(predicted_values == correct_values) / samples) * 100\n\n print(\"Accuracy = \", accuracy)\n print(\"Error = \", error)\n return accuracy, error", "def error_function(prediction_dict, use_example_flags):\n\n predicted_flux_matrix_w_m02 = numpy.mean(\n prediction_dict[prediction_io.SCALAR_PREDICTIONS_KEY][\n use_example_flags, ...\n ],\n axis=-1\n )\n actual_flux_matrix_w_m02 = prediction_dict[\n prediction_io.SCALAR_TARGETS_KEY\n ][use_example_flags, :]\n\n predicted_net_flux_matrix_w_m02 = (\n predicted_flux_matrix_w_m02[:, 0] -\n predicted_flux_matrix_w_m02[:, 1]\n )\n actual_net_flux_matrix_w_m02 = (\n actual_flux_matrix_w_m02[:, 0] -\n actual_flux_matrix_w_m02[:, 1]\n )\n\n net_flux_sse_w2_m04 = numpy.sum(\n (predicted_net_flux_matrix_w_m02 - actual_net_flux_matrix_w_m02)\n ** 2\n )\n raw_flux_sse_w2_m04 = numpy.sum(\n (predicted_flux_matrix_w_m02 - actual_flux_matrix_w_m02) ** 2\n )\n\n num_examples = actual_flux_matrix_w_m02.shape[0]\n flux_mse_w_m02 = (\n (net_flux_sse_w2_m04 + raw_flux_sse_w2_m04) / (3 * num_examples)\n )\n\n predicted_hr_matrix_k_day01 = numpy.mean(\n prediction_dict[prediction_io.VECTOR_PREDICTIONS_KEY][\n use_example_flags, ...\n ],\n axis=-1\n )\n actual_hr_matrix_k_day01 = prediction_dict[\n prediction_io.VECTOR_TARGETS_KEY\n ][use_example_flags, ...]\n\n weight_matrix_k_day01 = numpy.maximum(\n numpy.absolute(predicted_hr_matrix_k_day01),\n numpy.absolute(actual_hr_matrix_k_day01)\n )\n heating_rate_dwmse_k3_day03 = numpy.mean(\n weight_matrix_k_day01 *\n (predicted_hr_matrix_k_day01 - actual_hr_matrix_k_day01) ** 2\n )\n\n return (\n scaling_factor_for_dwmse * heating_rate_dwmse_k3_day03 +\n scaling_factor_for_flux_mse * flux_mse_w_m02\n )", "def input_error(self, out_influence, new_weights):\n in_influence = np.dot(np.transpose(new_weights), out_influence)\n return in_influence", "def ErrorFunction(p,x,y,z):\n \n return TargetFunction(p,x,y) - z", "def error_compute(self):\n self.tt_error = np.linalg.norm(self.rel_error)\n if self.global_rank==0:print('Overall error is::',self.tt_error)\n return {'NMF': self.rel_error, 'tt': self.tt_error}", "def _mer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def error_values(X_train,X_test,Y_train,Y_test):\n #setting up parameters and variables for plotting \n n_train = X_train.shape[0]\n n_test = X_test.shape[0]\n d = X_train.shape[1]\n hdnode = 100\n w1 = np.random.normal(0,0.001,d*hdnode).reshape((d,hdnode))\n d1 = np.zeros((d,hdnode))\n w2 = np.random.normal(0,0.001,hdnode).reshape((hdnode,1))\n d2 = np.zeros(hdnode)\n h = np.zeros(hdnode)\n mb = 100 #minibatch size\n m = int(n_train/mb)\n batch = np.arange(m) \n lr = 0.00020\n EP = 20000 #needed for initializing \n ep = 0\n yh = np.zeros((n_train,1))\n yh2 = np.zeros((n_test,1))\n L_train= np.zeros(EP+1)\n L_test = np.zeros(EP+1)\n Y_train = Y_train.reshape(len(Y_train),1)\n #activation function for the hidden layer is tanh\n \n def g(A):\n return (np.tanh(A))\n\n def gd(A):\n return (1-np.square(np.tanh(A)))\n \n #setting up how long the epoch will run\n EP = 200\n ep = 0\n while ep < EP:\n ep += 1\n yh = g(X_train.dot(w1)).dot(w2)\n yh2 = g(X_test.dot(w1)).dot(w2)\n L_train[ep] = LA.norm(yh-Y_train.reshape(len(Y_train),1))/n_train\n L_test[ep] = LA.norm(yh2-Y_test.reshape(len(Y_test),1))/n_test\n \n np.random.shuffle(batch)\n for i in range(m):\n st = batch[i]*mb\n ed = (batch[i]+1)*mb\n h = g(X_train[st:ed].dot(w1))\n y = h.dot(w2)\n d2 = h.T.dot(Y_train[st:ed]-y)\n d1 = X_train[st:ed].T.dot(np.multiply((Y_train[st:ed]-y).dot(w2.T),gd(X_train[st:ed].dot(w1))))\n w2 += lr*d2\n w1 += lr*d1\n return yh, yh2", "def calc_error(self):\n if self._fit_data.y is not None and self._fit_data.y_fit is not None:\n self._fit_data.error_vector = self._fit_data.y - self._fit_data.y_fit", "def _cer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def train(X, Y=[], hidden_layer_sizes=[], error_deriv=\"default\", n_outputs=\"default\", n_loops=100,\n eta=.1, output_transformation=\"default\", error_function=\"default\"):\n \n # the default error function will be the squared error function, whose derivative is just\n # the output minus the target values\n def square_error_deriv(output, x, y):\n return output - y\n if error_deriv == \"default\":\n error_deriv = square_error_deriv\n \n # the default error function is the squared error \n def square_error(Y, Y_hat):\n return np.linalg.norm(Y_hat - Y, ord='fro') / (Y.shape[0]*Y.shape[1])\n if error_function == \"default\":\n error_function = square_error\n\n # the default output transformatoin is none\n def none_transformation(output):\n return output\n if output_transformation == \"default\":\n output_transformation = none_transformation\n\n if len(Y) > 0:\n if n_outputs == \"default\":\n n_outputs = Y.shape[0]\n else:\n # setting one y value for the rest of the training\n y = \"None\"\n layer_sizes = [X.shape[0]] + hidden_layer_sizes + [n_outputs]\n weights = construct_network(layer_sizes)\n \n errorvec = np.empty((n_loops))\n for loop in np.arange(n_loops):\n delta_W = []\n # initiazlizing delta Ws\n for weight_mat_idx in range(len(weights)):\n delta_W.append(np.zeros((weights[weight_mat_idx].shape)))\n \n for data_idx in np.arange(n_data):\n x = X[:, data_idx]\n y = Y[:, data_idx]\n layer_activations, output = forward_propagate(x, weights)\n \n errors = [error_deriv(output, x, y)]\n # go from the output layer towards the input layer and calculate error values\n for idx in np.arange(1, len(weights)):\n # prepend the newest error layer to the errors list:\n errors = [-np.multiply(deriv_sigmoid(layer_activations[-idx-1]), \\\n weights[-idx].T @ errors[-idx])] \\\n + errors\n errors[-idx-1] = errors[-idx-1][:-1,0] # removing bias\n # go from input layer towards output layer and calculate weight updates\n for idx in range(len(weights)):\n delta_W[idx] -= errors[idx] @ layer_activations[idx].T\n delta_W[idx][:,:-1] = np.mean(layer_activations[idx])\n \n # update all weight matrices\n for idx in range(len(weights)): \n weights[idx] += eta/n_data * delta_W[idx]\n \n Y_hat = classify(X, weights, output_transformation)\n errorvec[loop] = error_function(Y, Y_hat)\n return weights, errorvec", "def output_error(self, zs, activations, y):\n nabla_b = [np.zeros(b.shape) for b in self.biases]\n nabla_w = [np.zeros(w.shape) for w in self.weights]\n\n delta = self.cost_derivative_for_output(activations[-1], y) * sigmoid_prime(zs[-1])\n nabla_b[-1] = delta\n nabla_w[-1] = np.dot(delta, activations[-2].transpose())\n return delta, nabla_b, nabla_w", "def transformMeasurementError(self):\n var = self.model.observationError**2\n self.errShD = self.model.observationError\n self.errSinvD = 1.0/var\n self.errSinvhD = np.sqrt(self.errSinvD)", "def error(beta_0: float, beta_1: float, x_i: float, y_i: float) -> float:\n return predict(beta_0, beta_1, x_i) - y_i", "def compute_loss(self):", "def reserrorcalc(test_set, model):\n # Extracting X\n X = test_set[:,:-1]\n\n # Extracting labels\n Y = test_set[:,-1]\n residual_err = sum((model.predict(X) - Y) ** 2)\n return residual_err", "def errors(self):\n # placeholders for the target network q values and the action\n self.target_q = tf.placeholder(tf.float32, [None], name=\"target_q\")\n self.action = tf.placeholder(tf.int64, [None], name=\"action\")\n\n # convert the action to one-hot representation in order to compute the\n # error\n action_one_hot = tf.one_hot(\n self.action,\n self.action_space,\n on_value=1,\n off_value=0,\n name=\"action_one_hot\")\n\n self.q_acted = tf.reduce_sum(\n self.q_values *\n tf.cast(\n action_one_hot,\n tf.float32),\n axis=1,\n name=\"q_acted\")\n\n self.delta = self.target_q - self.q_acted\n\n \"\"\"\n [Article] We also found it helpful to clip the error term from the update r + gamma max_d Q(s', a', theta-)\n to be between -1 and 1. Because the absolute value loss function |x| has a derivative of -1\n for all negative values of x and a derivative of 1 for all positive values of x,\n clipping the squared error to be between -1 and 1 corresponds to using an absolute value\n loss function for errors outside of the (-1,1) interval. This form of error clipping further\n improved the stability of the algorithm.\n\n It is called the Huber loss and because the name is so cool, we have to implement it\n With d = 1 (we could also try with d = 2) (d <> self.delta)\n x = 0.5 * x^2 if |x| <= d\n x = 0.5 * d^2 + d * (|x| - d) if |x| > d\n \"\"\"\n self.clipped_error = tf_array_ops.where(tf.abs(self.delta) < 1.0,\n tf.square(self.delta) * 0.5,\n tf.abs(self.delta) - 0.5)\n return(self.clipped_error)", "def mse_cost_function(predicted_output, actual_output):\n error = predicted_output - actual_output\n mse_cost = np.sum(error ** 2) /(2 * len(actual_output),)\n return mse_cost, error", "def calc_error_dist(self):\n pass", "def error(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][1]", "def test_error_map_fct(self):\n # reproducible arbitrariness\n np.random.seed(2343)\n\n nsteps = 12\n nchan = 4\n tmax = nsteps*self.dt\n sequence = np.random.randn(nsteps, self.N)\n\n target = np.random.randn(nchan, nsteps)\n controller = LinearController(self.G, target, tau=None)\n controller.W = np.random.randn(*controller.W.shape)\n controller.error_map_fct = lambda err: np.tanh(err)\n\n self.G.out_fct = lambda i: sequence[i]\n\n class SourceErrorGrabber(object):\n def __init__(self, target):\n self.target = target\n self.order = 10\n \n def prepare(self, tmax, dt):\n nsteps = int_r(tmax/dt)\n self.motor_error = np.zeros((nsteps, self.target.source.N))\n\n def evolve(self, t, dt):\n i = int_r(t/dt)\n self.motor_error[i, :] = self.target.get_source_error()\n\n M = SourceErrorGrabber(controller)\n M1 = simulation.StateMonitor(controller, 'out')\n\n sim = simulation.Simulation(self.G, controller, M, M1, dt=self.dt)\n sim.run(tmax)\n\n for i in xrange(int_r(tmax/self.dt)):\n diff = M1.out[:, i] - target[:, i]\n self.assertTrue(np.allclose(M.motor_error[i],\n np.dot(controller.error_map_fct(diff), controller.W)))", "def compute_error(data, user_features, item_features, nz):\n sum_err = 0\n for d, n in nz:\n err = data[d,n] - np.dot(item_features[d,:],user_features[:,n])\n sum_err += err**2\n rmse = 0.5*sum_err/len(nz)\n return rmse", "def _calc_loss(self, p_act_output:torch.Tensor, p_pred_output:torch.Tensor) -> float:\r\n\r\n return self._loss_fct(p_act_output, p_pred_output)", "def compute_loss(self, obs, returns):", "def calc_error(y_real, y_pred):\n if len(y_real) > 0:\n curr_err = rmse(y_pred, y_real)\n else:\n curr_err = np.nan\n return curr_err", "def __error(self, R, P, Q, K, beta):\n e = 0\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # loss function error sum( (y-y_hat)^2 )\n e = e + pow(R[i][j]-numpy.dot(P[i,:],Q[:,j]), 2)\n\n # add regularization\n for k in xrange(K):\n\n # error + ||P||^2 + ||Q||^2\n e = e + (beta/2) * ( pow(P[i][k], 2) + pow(Q[k][j], 2) )\n return e", "def calc_error(opt, net, cuda, dataset, num_tests):\n if num_tests > len(dataset):\n num_tests = len(dataset)\n with torch.no_grad():\n erorr_arr, IOU_arr, prec_arr, recall_arr = [], [], [], []\n for idx in tqdm(range(num_tests)):\n\n # retrieve data for one frame (or multi-view)\n data = dataset[idx * len(dataset) // num_tests]\n image_tensor = data['img'].to(device=cuda) # (num_views, C, W, H) for 3x512x512 images, float -1. ~ 1.\n calib_tensor = data['calib'].to(device=cuda) # (num_views, 4, 4) calibration matrix\n sample_tensor = data['samples'].to(device=cuda).unsqueeze(0) # (1, 3, n_in + n_out), float XYZ coords are inside the 3d-volume of [self.B_MIN, self.B_MAX]\n if opt.num_views > 1:\n sample_tensor = reshape_sample_tensor(sample_tensor, opt.num_views) # (num_views, 3, n_in + n_out)\n label_tensor = data['labels'].to(device=cuda).unsqueeze(0) # (1, 1, n_in + n_out), float 1.0-inside, 0.0-outside\n deepVoxels_tensor = torch.zeros([label_tensor.shape[0]], dtype=torch.int32).to(device=cuda) # small dummy tensors\n if opt.deepVoxels_fusion != None: deepVoxels_tensor = data[\"deepVoxels\"].to(device=cuda)[None,:] # (B=1,C=8,D=32,H=48,W=32), np.float32, all >= 0.\n\n # forward pass\n res, error = net.forward(image_tensor, sample_tensor, calib_tensor, labels=label_tensor, deepVoxels=deepVoxels_tensor) # (1, 1, n_in + n_out), R\n if len(opt.gpu_ids) > 1: error = error.mean()\n\n # compute errors {IOU, prec, recall} based on the current set of query 3D points\n IOU, prec, recall = compute_acc(res, label_tensor) # R, R, R\n\n # print(\n # '{0}/{1} | Error: {2:06f} IOU: {3:06f} prec: {4:06f} recall: {5:06f}'\n # .format(idx, num_tests, error.item(), IOU.item(), prec.item(), recall.item()))\n erorr_arr.append(error.item())\n IOU_arr.append(IOU.item())\n prec_arr.append(prec.item())\n recall_arr.append(recall.item())\n\n return np.average(erorr_arr), np.average(IOU_arr), np.average(prec_arr), np.average(recall_arr)", "def get_error(deltas, sums, weights):\n \n print(deltas)\n print(sums)\n print(weights)\n print('===================================')\n \n # here goes your code\n A = weights.T.dot(deltas.T)\n print(A)\n B = sigmoid_prime(sums)\n print(B)\n \n print(A.shape)\n print(B.shape)\n C = A.T * B\n print(C)\n D = C.mean(axis=0)\n print(D)\n print(D.shape)\n \n return ((weights.T.dot(deltas.T)).T * sigmoid_prime(sums)).mean(axis=0)", "def stderr(predicted, actual):\n return np.sqrt(mse(predicted, actual))", "def __error(self,node_set):\n error=0\n for n in node_set:\n if(n.seq_num!=0):\n error+=LA.norm(n.node_vol-node_set[n.neighbor.parent].node_vol-n.impedance*n.branch_cur)\n #print n.node_vol, '\\n', node_set[n.neighbor.parent].node_vol\n \n return error", "def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):\n pass", "def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err", "def check_error(gluon_output, k_model, input_np, epsilon=1e-4):\n gluon_output = gluon_output.asnumpy()\n keras_output = k_model.predict(input_np)\n\n error = np.max(gluon_output - keras_output)\n print('Error:', error)\n\n assert error < epsilon\n return error", "def error(Y, X):\n return (Y - X) ** 2", "def error(self):\n self.mean_error = tf.reduce_mean(self.errors, name=\"mean_error\")\n return(self.mean_error)", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def calculate_loss(self, output, batch, training_context, last_activation=None):\n if self._model_loss_key is None:\n return output\n else:\n return output[self._model_loss_key]", "def vae_loss(self, inputs, outputs, z_mean, z_log):\n\n reconstruction_loss = self.loss(inputs, outputs)\n reconstruction_loss *= self.n_features_\n kl_loss = 1 + z_log - K.square(z_mean) - K.exp(z_log)\n kl_loss = -0.5 * K.sum(kl_loss, axis=-1)\n kl_loss = self.gamma * K.abs(kl_loss - self.capacity)\n\n return K.mean(reconstruction_loss + kl_loss)", "def td0_error_from_segment(batch_segment, all_Vs, last_Vs, gamma):\n assert isinstance(batch_segment, BatchSegment)\n \n Vs, last_Vs = V_to_numpy(all_Vs, last_Vs)\n \n Vs = np.concatenate([Vs, np.zeros([batch_segment.N, 1])], axis=-1)\n Vs[:, -1] = last_Vs\n \n out = batch_segment.numpy_rewards + gamma*Vs[:, 1:]*batch_segment.numpy_masks\n out = out - Vs[:, :-1]\n \n return out", "def node_layer_error(self, errors, layer):\n weights = self.weights[layer]\n\n node_errors = []\n\n for i, input in enumerate(self.input_matrix[layer-1]):\n node_errors.append(float(log_deriv(input) * dot_product(weights[i], errors)))\n\n return node_errors", "def post_iteration(self, step, level_number):\n\n super(error_output, self).post_iteration(step, level_number)\n\n # some abbreviations\n L = step.levels[level_number]\n P = L.prob\n\n L.sweep.compute_end_point()\n\n uex = P.u_exact(step.time + step.dt)\n err = abs(uex - L.uend)\n\n est = []\n\n for m in range(1, L.sweep.coll.num_nodes + 1):\n est.append(abs(L.uold[m] - L.u[m]))\n\n est_all = max(est)\n\n print(step.status.iter, err, est_all, L.status.residual)", "def epoch(self, v, expected):\n self.V = []\n self.O_hidden = []\n self.O_output = []\n self.D_1 = []\n\n self.error = []\n\n\n self.forward(np.transpose([v]), np.transpose([expected]))\n self.backward()", "def __call__(self, output, target, params):\n res = self._loss(output, target, params)\n if self._fact is not None:\n res *= self._fact\n return res", "def loss(returns, predicted_output):\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n raise NotImplementedError", "def max_error(y_true, y_pred):\n ...", "def back_prop(self, error):\t\n\n\t\terror_h = np.transpose(np.multiply(error,self.d_a_func(self.h)))\n\t\t#print ('error_h should be a vector: ,', error_h)\n\t\tnext_error = error_h.dot(self.weights)\n\t\t#print ('next error should be a vector: ', next_error)\n\t\tself.grad_weights = np.outer(error_h, self.input)\n\n\t\treturn next_error", "def dependent_error_exp(data, weak_signal_data, num_weak_signal):\n\n w_model = train_weak_signals(data, weak_signal_data, num_weak_signal)\n\n training_data = data['training_data'][0].T\n training_labels = data['training_data'][1]\n val_data, val_labels = data['validation_data']\n val_data = val_data.T\n test_data = data['test_data'][0].T\n test_labels = data['test_data'][1]\n\n num_features, num_data_points = training_data.shape\n\n weak_signal_ub = w_model['error_bounds']\n weak_signal_probabilities = w_model['probabilities']\n weak_test_accuracy = w_model['test_accuracy']\n\n weights = np.zeros(num_features)\n\n print(\"Running tests...\")\n\n optimized_weights, ineq_constraint = train_all(val_data, weights, weak_signal_probabilities, weak_signal_ub, max_iter=5000)\n\n # calculate test probabilities\n test_probabilities = probability(test_data, optimized_weights)\n # calculate test accuracy\n test_accuracy = getModelAccuracy(test_probabilities, test_labels)\n\n print(\"\")\n print(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n print(\"Experiment %d\"%num_weak_signal)\n print(\"We trained %d learnable classifiers with %d weak signals\" %(1, num_weak_signal))\n print(\"The accuracy of the model on the test data is\", test_accuracy)\n print(\"The accuracy of weak signal(s) on the test data is\", weak_test_accuracy)\n print(\"\")\n\n # calculate ge criteria\n print(\"Running tests on ge criteria...\")\n model = ge_criterion_train(val_data.T, val_labels, weak_signal_probabilities, num_weak_signal)\n ge_test_accuracy = accuracy_score(test_labels, np.round(probability(test_data, model)))\n print(\"The accuracy of ge criteria on test data is\", ge_test_accuracy)\n print(\"+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n\n # calculate baseline\n print(\"Running tests on the baselines...\")\n baselines = runBaselineTests(val_data, weak_signal_probabilities)\n b_test_accuracy = getWeakSignalAccuracy(test_data, test_labels, baselines)\n print(\"The accuracy of the baseline models on test data is\", b_test_accuracy)\n print(\"\")\n\n output = {}\n output['ALL'] = test_accuracy\n output['WS'] = w_model['test_accuracy'][-1]\n output['GE'] = ge_test_accuracy\n output['AVG'] = b_test_accuracy[-1]\n\n return output", "def calc_error_parameter(X, y, target, dimension): #change if more parameters\n\n pos_max = np.argmax(y)\n best_parameters = X[pos_max, 0:dimension]\n best_parameters = np.reshape(best_parameters, (-1, 1))\n\n l2_errors = (\n np.power(best_parameters[0, :] - target[0], 2) +\n np.power(best_parameters[1, :] - target[1], 2) +\n np.power(best_parameters[2, :] - target[2], 2))\n\n return l2_errors.tolist(), best_parameters.tolist()", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def calculate_loss(self, output, target, **kwargs):\n ##dont do aggregation\n raise NotImplementedError", "def compute_error(self):\n \n self.error = pd.DataFrame()\n \n for name in self.conf[\"w_sizes\"].keys():\n \n self.error[f\"mae {name}\"] = self.predict[[name, \"test\"]].apply(lambda x: mae(x), axis=1)\n self.error[f\"mape {name}\"] = self.predict[[name, \"test\"]].apply(lambda x: MAPE(x[0], x[1]), axis=1)\n \n self.predict['error'] = self.error.filter(like='mae').apply(lambda r: tuple(r), axis=1).apply(np.array)", "def vae_loss(x, t_decoded):\r\n return K.mean(reconstruction_loss(x, t_decoded))", "def error(y_pred, y_true):\n m = len(y_true) # number of samples\n error = np.sum( (y_pred - y_true)**2 )/m\n return error", "def error(clf, X, y, ntrials=100, test_size=0.2) :\n\n train_error = 0\n test_error = 0\n ### ========== TODO : START ========== ###\n # compute cross-validation error over ntrials\n # hint: use train_test_split (be careful of the parameters)\n for i in range(0,ntrials, 1):\n #get the value of the error for each division\n #train on the test data for the clf\n #test also on the data\n #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 42)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= test_size, random_state=i)\n #now find the error\n #first train the model\n #then predict\n #check the accuracy\n clf.fit(X_train,y_train)\n y_pred = clf.predict(X_train)\n #now find the error for the train_error\n train_err = 1 - metrics.accuracy_score(y_train, y_pred, normalize=True)\n train_error += train_err\n\n y_pred = clf.predict(X_test)\n test_err = 1 - metrics.accuracy_score(y_test, y_pred, normalize=True)\n test_error += test_err\n\n\n #get the average\n train_error = float(train_error)/((1-test_size)*len(X))\n test_error = float(test_error)/((test_size)*len(X))\n ### ========== TODO : END ========== ###\n\n return train_error, test_error", "def nzErr(xerr, yerr, vxerr, vyerr, year_x, year_y, mag, alnDir = '13_08_21/', chainsDir = 'efit/chains_S0-2_newRV2/'):\n\n #Read in values for error in position and velocity of sgr*\n origin_val = asciidata.open('/g/ghez/align/' + alnDir + chainsDir + 'efit_summary.txt')\n ori_x0e = origin_val[25][0]\n ori_y0e = origin_val[26][0]\n ori_vxe = origin_val[27][0]\n ori_vye = origin_val[28][0]\n t_0 = 2000.0 #hard coded t_0 of sgr*\n\n # magBins=np.array([9,11,12,13,14,15,16,17,18,19,20,21])\n # deltaArr=np.array([3.5,71.0,58.0,210.0,300.0,650.0,700.0,1100.0,1900.0,2200.0,3000.0])*1e-6\n\n# delta = mag*0.0\n# for i in range(len(mag)):\n# for j in range(len(deltaArr)):\n# if ((mag[i] > magBins[j]) & (mag[i] <= magBins[j+1])):\n# delta[i]=deltaArr[j]\n\n#pdb.set_trace()\n\n #Update errors\n xerr = np.sqrt(xerr**2 + ori_x0e**2 + ((year_x - t_0)*ori_vxe)**2)\n yerr = np.sqrt(yerr**2 + ori_y0e**2 + ((year_y - t_0)*ori_vye)**2)\n vxerr = np.sqrt(vxerr**2 + ori_vxe**2)\n vyerr = np.sqrt(vyerr**2 + ori_vye**2)\n\n return xerr, yerr, vxerr, vyerr", "def calc_error(W: np.ndarray, C: np.ndarray, U_in: np.ndarray, U_out: np.ndarray) -> float:\n W_hat = np.einsum('whnm,in,om->whio', C, U_in, U_out)\n elemental_error = np.abs(W - W_hat)\n error_bound = np.mean(elemental_error) / np.mean(np.abs(W))\n return error_bound", "def get_control(self, pos_error, vel_error):\n return self.stiffness * pos_error + self.damping * vel_error", "def backward(self, inputs): \n self.error = self.error * sigmoid(self.output, der=True) # because the activation function of last layer must be sigmoid\n delta3_weights = np.dot(self.z2.T, self.error)\n\n self.error = np.dot(self.error, self.output3_weights.T) * self.af(self.z2, der=True) \n delta2_weights = np.dot(self.z1.T, self.error)\n\n self.error = np.dot(self.error, self.hidden2_weights.T) * self.af(self.z1, der=True)\n delta1_weights = np.dot(inputs.T, self.error)\n\n self.hidden1_weights -= self.lr * delta1_weights\n self.hidden2_weights -= self.lr * delta2_weights\n self.output3_weights -= self.lr * delta3_weights", "def loss_fn(self, targets, outputs, model):", "def errorList(self, ylist, predictedlist):\n\n e = [0 for i in range(self.size)]\n for i in range(self.size):\n e[i] = round(ylist[i] - predictedlist[i], 3)\n \n errors = Vector(self.size)\n errors.changeVals(e)\n return errors", "def propagate(self, expected):\n\n df = self._layer.dfunction()\n se = 0.0\n for i,(a,e) in enumerate(zip(self._layer, expected)):\n error = e - a\n self._deltas[i] = df(a)*error\n se += error * error\n return se/2.0", "def _compute_errors(self):\n self.errors = np.sqrt(self.data)\n self.errors[self.errors == 0.] = 1.", "def all_param_AN(ds, myloss='mean_squared_error'):\n wr = ds[0]\n wl = ds[1]\n V = ds[2]\n omega = ds[3]\n input = np.zeros((len(wl),2))\n input[:,0] = wr\n input[:,1] = wl\n output = np.zeros((len(wl),2))\n output[:,0] = V\n output[:,1] = omega\n input_layer = keras.layers.Input((2,),name=\"input\") #wr et wl\n hidden_layer = keras.layers.Dense(2, activation='linear', kernel_initializer='uniform',\n input_shape=(2,), use_bias=False, name=\"output\") #V et omega\n output_layer = hidden_layer(input_layer)\n ann = keras.models.Model(inputs=input_layer, outputs=output_layer)\n opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n ann.compile(loss=myloss, optimizer=opt)\n ann_in, ann_out = input, output\n history = ann.fit(ann_in, ann_out, epochs=40, batch_size=64, verbose=0,\n shuffle=True, validation_split=0.1)#, callbacks=callbacks)\n\n \"\"\"plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\"\"\"\n\n weights = hidden_layer.get_weights()[0]\n Rr_est = weights[0][0]*2\n Rl_est = weights[1][0]*2\n L_est1 = 1/(weights[0][1]/Rr_est)\n L_est2 = -1/(weights[1][1]/Rr_est)\n return Rr_est, Rl_est, (L_est2+L_est1)/2 #moyenne des deux longueurs obtenues", "def multi_output_loss(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.float32:\n tf.print(\"Sum of actual masking: \", tf.reduce_sum(y_true))\n tf.print(\"Sum of predicted masking: \", tf.reduce_sum(y_pred))\n # loss_multiplier = tf.where(tf.greater(y_true, tf.constant(5.)), tf.constant(10.),\n # tf.constant(1.))\n loss = tf.keras.losses.mean_squared_error(y_true,\n y_pred)\n # tf.print(\"Y true: \", y_true)\n # tf.print(\"Loss multiplier: \", loss_multiplier)\n # loss *= tf.cast(loss_multiplier, dtype=tf.float32)\n return tf.reduce_mean(loss)", "def _wip_compute(errors: Tensor, target_total: Tensor, preds_total: Tensor) ->Tensor:\n return errors / target_total * (errors / preds_total)", "def forward(self,y_out, y_truth): \n result = (np.square(np.subtract(y_out, y_truth)))\n #########################################################################\n # TODO: #\n # Implement the forward pass and return the output of the MSE loss. #\n #########################################################################\n\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n \n return result", "def get_output(self, prev_layers=None, **kwargs):\n if prev_layers is None:\n prev_layers = list()\n if self not in prev_layers:\n prev_layers += [self]\n incoming = self.incoming(prev_layers=prev_layers, **kwargs)\n with tf.variable_scope(self.layer_scope):\n self.out = incoming * self.factor\n return self.out", "def est_return(self, r, mask):\n batchsz = r.size(0)\n\n # v_target is worked out by Bellman equation.\n v_target = torch.Tensor(batchsz).to(device=DEVICE)\n\n prev_v_target = 0\n for t in reversed(range(batchsz)):\n # mask here indicates a end of trajectory\n # this value will be treated as the target value of value network.\n # mask = 0 means the immediate reward is the real V(s) since it's end of trajectory.\n # formula: V(s_t) = r_t + gamma * V(s_t+1)\n v_target[t] = r[t] + self.gamma * prev_v_target * mask[t]\n # update previous\n prev_v_target = v_target[t]\n\n return v_target", "def calculate_td_error(self, old_state, new_state, reward):\n\n output = self.net(self.state_tensor_convert(old_state))\n target = self.gamma * self.net(self.state_tensor_convert(new_state)) + reward\n self.loss = self.net.loss(output,target)\n return float(target-output)", "def model_output(model, t, s, i):\n return 0, 0, 0, 0", "def errors(self, target):\n\n return T.mean(T.neq(self.y_pred, T.argmax(target, axis=1)))", "def err_func(x,rv,valore,specn,lcrop,models='da2014'):\n tmp = tmp_func(x[0], x[1], rv, specn, lcrop, models)\n if tmp != 1: return abs(tmp[3]-(valore+1.)) #this is quantity that gets minimized \n else: return 1E30", "def compute_global_error(g):\n Fx = 0\n for edge in g.edges:\n\n # pose-pose constraint\n if edge.Type == 'P':\n\n # compute idx for nodes using lookup table\n fromIdx = g.lut[edge.fromNode]\n toIdx = g.lut[edge.toNode]\n\n # get node state for the current edge\n x1 = g.x[fromIdx:fromIdx + 3]\n x2 = g.x[toIdx:toIdx + 3]\n\n # get measurement and information matrix for the edge\n z12 = edge.measurement\n info12 = edge.information\n\n # (TODO) compute the error due to this edge`\n z12 = v2t(z12)\n x1 = v2t(x1)\n x2 = v2t(x2)\n eij = t2v(np.linalg.inv(z12) @ np.linalg.inv(x1) @ x2)\n Fx = Fx + eij.transpose() @ info12 @ eij\n # pose-pose constraint\n elif edge.Type == 'L':\n print(\"You shouldn't be here.\")\n # compute idx for nodes using lookup table\n fromIdx = g.lut[edge.fromNode]\n toIdx = g.lut[edge.toNode]\n\n # get node states for the current edge\n x = g.x[fromIdx:fromIdx + 3]\n l = g.x[toIdx:toIdx + 2]\n\n # get measurement and information matrix for the edge\n z = edge.measurement\n info12 = edge.information\n\n # (TODO) compute the error due to this edge\n # TODO2 : do on homo matrices\n xtr = v2t(x)\n R = xtr[0:2, 0:2]\n eil = R.transpose() @ (l - x[0:2]) - z\n Fx = Fx + eil.transpose() @ info12 @ eil\n\n return Fx", "def compute_error_cross_dataset(AL, train_y):\n # print(train_y.shape)\n nb = train_y.shape[0]\n error=np.power(np.add(train_y,-AL),2)*1/nb\n return error\n # raise NotImplementedError", "def backpropagate(self, error, learning_rate):\n #assert isinstance(error, np.ndarray)\n #assert isinstance(self._prev_layer._outputs, np.ndarray)\n #assert isinstance(self._outputs, np.ndarray) \n\n # Compute deltas. \n deltas = np.dot(error, self._activation_function.derivative_given_y(self._outputs))\n \n # Compute gradient.\n synapse = np.dot(np.dot(error, self._prev_layer._outputs), learning_rate)\n \n # Adjust weights.\n self._weights = np.add(self._weights, synapse)\n \n # Adjust bias weights.\n if self._use_bias:\n self._bias = np.add(self._bias, synapse)\n \n return deltas", "def compute_loss(self, x, gt):\n loss = sum([torch.mean((out - gt)**2) for out in self.forward(x)])\n return loss", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def __call__(self, errors: List[float]) -> List[float]:", "def compute_error(y_true, y_pred):\r\n\r\n # INSERT YOUR CODE HERE\r\n \r\n n = len(y_true)\r\n err = [y_true[i] != y_pred[i] for i in range(n)]\r\n return sum(err) / n\r\n \r\n raise Exception('Function not yet implemented!')", "def get_errors(self):\n return {'loss': self.loss.data[0]}", "def _val(self):\r\n lr, hr = self.sess.run(self.train_batch)\r\n res = self.sess.run(\r\n [self.merged,\r\n self.GAN.g_loss, self.GAN.mse_loss, self.GAN.g_gan_loss,\r\n self.GAN.d_loss, self.GAN.d_loss_real, self.GAN.d_loss_fake],\r\n feed_dict={\r\n self.GAN.g_images: lr,\r\n self.GAN.d_images: hr,\r\n self.GAN.is_training: False\r\n })\r\n\r\n return res", "def back_propagate(self, inputs, hidden, output, errors):\n d_output = self._da(output) * errors\n d_hidden = self._da(hidden) * dot(d_output, self.W_output[:-1].T)\n\n n_samples = inputs.shape[0]\n bias = ones((n_samples, 1))\n # Update momentum and weights\n self.V_output = self.output_learning_rate * dot(c_[hidden, bias].T, d_output) / n_samples\n self.W_output+= self.V_output\n\n self.V_hidden = self.hidden_learning_rate * dot(c_[inputs, bias].T, d_hidden) / n_samples\n self.W_hidden+= self.V_hidden", "def _getErrorFunction(self):\n\n\t\treturn (self._setpoint - self._current)", "def getErrRegression(mdlParams, indices, sess, loss_op, prediction, getBatch, placeholders, feed_list_inference):\r\n # Set up sizes\r\n loss = np.zeros([len(mdlParams[indices])])\r\n predictions = np.zeros([len(mdlParams[indices]),len(mdlParams['tar_range'])])\r\n targets = np.zeros([len(mdlParams[indices]),len(mdlParams['tar_range'])])\r\n numBatches = int(math.ceil(len(mdlParams[indices])/mdlParams['batchSize']))\r\n for k in range(numBatches):\r\n feed_list_inference['X'], feed_list_inference['Y'] = getBatch(mdlParams,indices,k)\r\n # Take care of last batch being smaller\r\n if int(math.ceil(len(mdlParams[indices]) / mdlParams['batchSize'])) - 1 == k :\r\n bSize = len(mdlParams[indices]) - mdlParams['batchSize'] * k\r\n else:\r\n bSize = mdlParams['batchSize']\r\n targets[mdlParams['batchSize']*k:(mdlParams['batchSize']*k+bSize),:] = feed_list_inference['Y'] \r\n loss[mdlParams['batchSize']*k:(mdlParams['batchSize']*k+bSize)], predictions[mdlParams['batchSize']*k:(mdlParams['batchSize']*k+bSize)] = sess.run([loss_op, prediction], feed_dict={placeholders[p]: feed_list_inference[p] for p in placeholders})\r\n # Transform targets and predictions\r\n if mdlParams['scale_targets']:\r\n targets = mdlParams['scaler'].inverse_transform(targets)\r\n predictions = mdlParams['scaler'].inverse_transform(predictions)\r\n # Error metrics\r\n # MAE\r\n mae = np.mean(np.abs(predictions-targets),1)\r\n mae_mean = np.mean(mae)\r\n mae_std = np.std(mae)\r\n # Relative MAE\r\n tar_std = np.std(targets,0)\r\n rmae_mean = np.mean(np.mean(np.abs(predictions-targets),0)/tar_std)\r\n rmae_std = np.mean(np.std(np.abs(predictions-targets),0)/tar_std)\r\n # Avg. Corr. Coeff.\r\n corr = np.corrcoef(np.transpose(predictions),np.transpose(targets))\r\n # Extract relevant components for aCC\r\n num_tar = len(mdlParams['tar_range'])\r\n acc = 0\r\n for k in range(num_tar):\r\n acc += corr[num_tar+k,k]\r\n acc /= num_tar\r\n return np.mean(loss), mae_mean, mae_std, rmae_mean, rmae_std, acc", "def get_loss(self, Loss, results, inputs, device):\n return", "def test_source_error(self):\n # reproducible arbitrariness\n np.random.seed(12321)\n\n nsteps = 10\n nchan = 3\n tmax = nsteps*self.dt\n sequence = np.random.randn(nsteps, self.N)\n\n target = np.random.randn(nchan, nsteps)\n controller = LinearController(self.G, target, tau=None)\n controller.W = np.random.randn(*controller.W.shape)\n\n self.G.out_fct = lambda i: sequence[i]\n\n class SourceErrorGrabber(object):\n def __init__(self, target):\n self.target = target\n self.order = 10\n \n def prepare(self, tmax, dt):\n nsteps = int_r(tmax/dt)\n self.motor_error = np.zeros((nsteps, self.target.source.N))\n\n def evolve(self, t, dt):\n i = int_r(t/dt)\n self.motor_error[i, :] = self.target.get_source_error()\n\n M = SourceErrorGrabber(controller)\n M1 = simulation.StateMonitor(controller, 'out')\n\n sim = simulation.Simulation(self.G, controller, M, M1, dt=self.dt)\n sim.run(tmax)\n\n for i in xrange(int_r(tmax/self.dt)):\n diff = M1.out[:, i] - target[:, i]\n self.assertTrue(np.allclose(M.motor_error[i],\n np.dot(diff, controller.W)))", "def relative_error(Eth_original, Eph_original,Eth_model, Eph_model,theta, phi, dsf=1,kf=-1):\n\n st = np.sin(theta).reshape((len(theta), 1))\n #\n # Construct difference between reference and reconstructed\n #\n if kf!=-1:\n dTh = (Eth_model[kf, :, :] - Eth_original[kf, ::dsf, ::dsf])\n dPh = (Eph_model[kf, :, :] - Eph_original[kf, ::dsf, ::dsf])\n #\n # squaring + Jacobian\n #\n dTh2 = np.real(dTh * np.conj(dTh)) * st\n dPh2 = np.real(dPh * np.conj(dPh)) * st\n\n vTh2 = np.real(Eth_original[kf, ::dsf, ::dsf] \\\n * np.conj(Eth_original[kf, ::dsf, ::dsf])) * st\n vPh2 = np.real(Eph_original[kf, ::dsf, ::dsf] \\\n * np.conj(Eph_original[kf, ::dsf, ::dsf])) * st\n\n mvTh2 = np.sum(vTh2)\n mvPh2 = np.sum(vPh2)\n\n errTh = np.sum(dTh2)\n errPh = np.sum(dPh2)\n else:\n dTh = (Eth_model[:, :, :] - Eth_original[:, ::dsf, ::dsf])\n dPh = (Eph_model[:, :, :] - Eph_original[:, ::dsf, ::dsf])\n #\n # squaring + Jacobian\n #\n dTh2 = np.real(dTh * np.conj(dTh)) * st\n dPh2 = np.real(dPh * np.conj(dPh)) * st\n\n vTh2 = np.real(Eth_original[:, ::dsf, ::dsf] \\\n * np.conj(Eth_original[:, ::dsf, ::dsf])) * st\n vPh2 = np.real(Eph_original[:, ::dsf, ::dsf] \\\n * np.conj(Eph_original[:, ::dsf, ::dsf])) * st\n\n mvTh2 = np.sum(vTh2)\n mvPh2 = np.sum(vPh2)\n\n errTh = np.sum(dTh2)\n errPh = np.sum(dPh2)\n\n errelTh = (errTh / mvTh2)\n errelPh = (errPh / mvPh2)\n errel =( (errTh + errPh) / (mvTh2 + mvPh2))\n\n return(errelTh, errelPh, errel)" ]
[ "0.73773164", "0.7060913", "0.69024837", "0.68995315", "0.6740334", "0.66105175", "0.6591121", "0.6574008", "0.65447205", "0.653256", "0.65291804", "0.64597213", "0.6338442", "0.6333448", "0.6331635", "0.6329993", "0.6325577", "0.63097376", "0.63089925", "0.62953514", "0.6232458", "0.62002254", "0.6170006", "0.6145786", "0.6142563", "0.61360204", "0.6125068", "0.6119836", "0.6109353", "0.6107925", "0.6093664", "0.60576534", "0.6056668", "0.6043041", "0.6041759", "0.60331804", "0.60328287", "0.60219944", "0.60143554", "0.6009721", "0.5998234", "0.59952635", "0.5993965", "0.5992321", "0.5980454", "0.59669983", "0.59629685", "0.59624547", "0.59509116", "0.59422183", "0.59408283", "0.59387946", "0.5935771", "0.5928962", "0.5915762", "0.59147525", "0.59122354", "0.5910257", "0.588039", "0.5871555", "0.58668363", "0.5863641", "0.5863641", "0.5859641", "0.5853134", "0.58528394", "0.5844545", "0.5841834", "0.5822789", "0.5821267", "0.5821098", "0.58102244", "0.57962996", "0.5781182", "0.5775778", "0.57745177", "0.5774437", "0.5774306", "0.5754414", "0.57517403", "0.5738037", "0.57352036", "0.57284945", "0.5723428", "0.5716623", "0.5713011", "0.5706825", "0.570627", "0.5694446", "0.5692435", "0.56832355", "0.56814253", "0.5676481", "0.56749517", "0.5674115", "0.5670487", "0.5667713", "0.566727", "0.5665201", "0.56613505" ]
0.73490536
1
it computes the sum of quadratic error of a given input, stored at the last layer
def error2(input_, output): error(input_, output) layers[-1]["error2"] = layers[-1]["error"].T @ layers[-1]["error"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __error(self, R, P, Q, K, beta):\n e = 0\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # loss function error sum( (y-y_hat)^2 )\n e = e + pow(R[i][j]-numpy.dot(P[i,:],Q[:,j]), 2)\n\n # add regularization\n for k in xrange(K):\n\n # error + ||P||^2 + ||Q||^2\n e = e + (beta/2) * ( pow(P[i][k], 2) + pow(Q[k][j], 2) )\n return e", "def evaluate_quadratic(shape,x):\n d = ((shape.a*x)** 2) + (shape.b * x) + shape.c\n return d", "def backprop_error(self,t=0, external_error=0):\n\t\taux = external_error\n\t\t# Other layers' contributions\n\t\tif t != 0:\n\t\t\tfor layer in self.next_recurrent:\n\t\t\t\taux += layer.get_error_contribution(self, t=t+1)\n\t\tfor layer in self.next:\n\t\t\taux += layer.get_error_contribution(self, t=t)\n\n\t\tif t != 0:\n\t\t\taux += np.dot(\n\t\t\t\tself.get_o_error(t=t+1) * self.__dev_sigm__(self.get_o(t=t+1)),\n\t\t\t\tnp.transpose(self.W_o_atprev)\n\t\t\t) + \\\n\t\t\tnp.dot(\n\t\t\t\tself.get_c_error(t=t+1) * self.get_i(t=t+1) * self.__dev_tanh__(self.get_c(t=t+1)),\n\t\t\t\tnp.transpose(self.W_c_atprev)\n\t\t\t) + \\\n\t\t\tnp.dot(\n\t\t\t\tself.get_i_error(t=t+1) * self.__dev_sigm__(self.get_i(t=t+1)),\n\t\t\t\tnp.transpose(self.W_i_atprev)\n\t\t\t) + \\\n\t\t\tnp.dot(\n\t\t\t\tself.get_f_error(t=t+1) * self.__dev_sigm__(self.get_f(t=t+1)),\n\t\t\t\tnp.transpose(self.W_f_atprev)\n\t\t\t)\n\t\t\n\t\tif isinstance(aux, np.ndarray):\n\t\t\tself.error_a = [aux]+self.error_a\n\n\t\t# error in o\n\t\tself.error_o = [self.get_a_error(t=t) * self.__tanh__(self.get_c(t=t))] + self.error_o\n\n\t\t# error in c\n\t\taux = self.w_o_c * self.get_o_error(t=t) * self.__dev_sigm__(self.get_o(t=t)) \\\n\t\t+ self.get_a_error(t=t) * self.get_o(t=t) * self.__dev_tanh_z__(self.get_c(t=t))\n\t\tif t!=0:\n\t\t\taux += self.get_c_error(t=t+1) * self.get_f(t=t+1) \\\n\t\t\t+ self.w_i_ctprev * self.get_i_error(t=t+1) * self.__dev_sigm__(self.get_i(t=t+1)) \\\n\t\t\t+ self.w_f_ctprev * self.get_f_error(t=t+1) * self.__dev_sigm__(self.get_f(t=t+1))\n\t\tself.error_c = [aux] + self.error_c\n\n\t\t# error in f\n\t\tself.error_f = [self.get_c(t=t-1)*self.get_c_error(t=t)] + self.error_f\n\n\t\t# error in i\n\t\tself.error_i = [self.get_c_error(t=t)*self.get_tanh_zc(t=t)] + self.error_i", "def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum", "def get_error(deltas, sums, weights):\n \n print(deltas)\n print(sums)\n print(weights)\n print('===================================')\n \n # here goes your code\n A = weights.T.dot(deltas.T)\n print(A)\n B = sigmoid_prime(sums)\n print(B)\n \n print(A.shape)\n print(B.shape)\n C = A.T * B\n print(C)\n D = C.mean(axis=0)\n print(D)\n print(D.shape)\n \n return ((weights.T.dot(deltas.T)).T * sigmoid_prime(sums)).mean(axis=0)", "def ErrorFunction(p,x,y,z):\n \n return TargetFunction(p,x,y) - z", "def linear_regression(d, ind, dep):\n\n\ty=d.get_data([dep])\n\tprint \"y :\",y\n\tA=d.get_data(ind)\n\tprint \"A :\",A\n\tones = np.asmatrix(np.ones( (A.shape[0]) )).transpose()\n\tA=np.concatenate((A, ones), axis=1)\n\tprint \"concatenated A :\",A\n\tAAinv=np.linalg.inv( np.dot(A.transpose(), A))\n\tprint \"AAinv: \\n\",AAinv\n\t\"\"\"\n\tprint \"A :\",A\n\tprint \"y: \",y\n\tprint \"AAinv: \",AAinv\"\"\"\n\tprint \"shape A:\t \",A.shape\n\tprint \"shape y\t:\", y.shape\n\tx=np.linalg.lstsq(A,y)\n\tprint \"x :\\n\",x\n\tb=x[0]\n\tprint \"\\n b : \\n\",b\n\tN=len(y)\n\tprint \"N :\t\\n\",N\n\tC=len(b)\n\tprint \"C :\t \",C\n\tdf_e=N-C\n\tdf_r=C-1\n\terror=y - np.dot(A, b)\n\tprint \"error:\t\",error\n\tsse=np.dot(error.transpose(), error) / df_e\n\tprint \"sse\t:\",sse\n\tstderr=np.sqrt( np.diagonal( sse[0, 0] * AAinv ) )\n\tprint \"stderr: \",stderr\n\tt = b.transpose() / stderr\n\tprint \"t :\", t\n\tp=2*(1 - scipy.stats.t.cdf(abs(t), df_e))\n\tprint \"p:\t\",p\n\tr2=1 - error.var() / y.var()\n\tprint \"R^2\t :\",r2, \"\\n \\n \\n \\n*************************************\"\n\t\n\t\n\treturn [b,sse,r2,t,p]", "def rms_error(self, X, y) :\n ### ========== TODO : START ========== ###\n # part h: compute RMSE\n n, d = X.shape\n error = np.sqrt(self.cost(X,y)/n)\n ### ========== TODO : END ========== ###\n return error", "def squaredError(label, prediction):\n return (label-prediction)*(label-prediction)", "def _wer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def expsumeval(self, p, x, y=None, C=None, sumsq=False, weights=None):\n yd = p[0] + (p[1] * numpy.exp(-x / p[2])) + \\\n (p[3] * numpy.exp(-x / p[4]))\n if y is None:\n return yd\n else:\n yerr = y - yd\n if weights is not None:\n yerr = yerr * weights\n if sumsq is True:\n return numpy.sum(yerr ** 2)\n else:\n return yerr", "def error(beta_0: float, beta_1: float, x_i: float, y_i: float) -> float:\n return predict(beta_0, beta_1, x_i) - y_i", "def MeanSqError(self):\r\n\t\treturn self.mse", "def costFunction(self, x, y ):\n self.yEst = self.forward_propagate(x)\n sqErrors = ( self.yEst - y ) ** 2\n J = sqErrors.sum() / 2\n return J", "def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err", "def error(line, data): # error function\n # Metric: Sum of squared Y-axis differences\n err = np.sum((data[:, 1] - (line[0] * data[:, 0] + line[1])) ** 2)\n return err", "def compute_error(data, user_features, item_features, nz):\n sum_err = 0\n for d, n in nz:\n err = data[d,n] - np.dot(item_features[d,:],user_features[:,n])\n sum_err += err**2\n rmse = 0.5*sum_err/len(nz)\n return rmse", "def _mse(self):\n error = self._input * self._weights - self._label\n sum_ = 0.0\n for i in range(self._input.shape[0]):\n sum_ += error[i, 0]**2\n return sum_/self._input.shape[0]", "def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)", "def calculate_error(self):\n \n delta = self.Y - self.T\n error = delta.dot(delta) / self.N\n error = format(error, '.5f')\n \n self.errors.append(error)", "def error_func(x, a0, a1, a2, a3):\n return (a0 / 2) * sp.special.erfc((a1 - x) / a2) + a3", "def model_quadratic(train_x, train_y, test_x):\n train_x = train_x.rename('x', axis=1)\n train_x = sm.add_constant(train_x)\n train_df = train_x.copy()\n train_df['y'] = train_y\n model_fit = sm.formula.ols('y ~ np.power(x, 2) + x + const', data=train_df).fit()\n model_info = {'model': 'quadratic', 'R2': model_fit.rsquared, 'f_pvalue': model_fit.f_pvalue,\n 'const': model_fit.params.const}\n test_x = test_x.rename('x')\n test_x = sm.add_constant(test_x)\n predictions = model_fit.predict(test_x)\n return predictions, model_info", "def _cer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def root_square_error(input_img, output_img):\n rse = np.sqrt(np.sum(np.power(output_img - input_img, 2)))\n\n return rse", "def calc_error(W: np.ndarray, C: np.ndarray, U_in: np.ndarray, U_out: np.ndarray) -> float:\n W_hat = np.einsum('whnm,in,om->whio', C, U_in, U_out)\n elemental_error = np.abs(W - W_hat)\n error_bound = np.mean(elemental_error) / np.mean(np.abs(W))\n return error_bound", "def sum_of_square_error(data_x, data_y, len_data, theta):\n error = 0.0\n #prod represent the out_result\n prod = np.dot(theta, data_x.transpose())\n # the error matrix\n prod -= data_y\n #np.square(prod) prod is a 1*n matrix, every_element get square\n sum_elem = np.sum(np.square(prod))\n # np.sum fet sum of the\n error = sum_elem / (2 * len_data)\n return error", "def error(Y, X):\n return (Y - X) ** 2", "def Error_estimation(params, qu=None, qu_err=None, p_maxL=None, par=False):\n \n if par is True:\n print('Estimate uncertainties for the parameters')\n sigma = par_err(params)\n return(sigma)\n elif qu is not None:\n N = len(qu[0,:])\n model_err = np.zeros((3, N))\n star_err = np.zeros((3,N))\n msg = 'Estimate model uncertainties'\n print(msg)\n if qu_err is None:\n print(' -> by varying the model')\n star_err[:-1,:] = np.std(QU_func(params, qu, star=True), axis=2)\n model_err[:-1,:] = np.std(QU_func(params, qu), axis=2)\n bkgr_err = np.std(background(params[N:,:]), axis=2) #?\n \n else:\n print('-> using data and parameters')\n err = par_err(params)\n s_ax = (p_maxL[:N]*qu_err)**2 + (qu*err[:N])**2\n s_b = np.array([(err[N:-1]*np.cos(2*p_maxL[-1]))**2 +\\\n (2*p_maxL[N:-1]*err[-1]*np.sin(2*p_maxL[-1]))**2,\\\n (err[N:-1]*np.sin(2*p_maxL[-1]))**2 +\\\n (2*p_maxL[N:-1]*err[-1]*np.sin(2*p_maxL[-1]))**2])\n model_err[:-1,:] = np.sqrt(s_ax + s_b)\n star_err[:-1,:] = np.sqrt(s_ax)\n bkgr_err = np.sqrt(s_b)\n star_err[-1,:] = np.sqrt(Cov(star_err[0,:], star_err[1,:]))\n model_err[-1,:] = np.sqrt(Cov(model_err[0,:], model_err[1,:]))\n return(model_err, star_err, bkgr_err)", "def linear_model(inp, w1, b1):\n y = inp @ w1 + b1\n centered_y = y - y.mean()\n return centered_y.sum()", "def _mean_squared_error_compute(sum_squared_error: Tensor, n_obs: int, squared: bool=True) ->Tensor:\n return sum_squared_error / n_obs if squared else torch.sqrt(sum_squared_error / n_obs)", "def _eval_coeff(self, pt):\n return sum(a.coeff(pt) for a in self.args)", "def error(b, m, x_data):\n\n totalError = 0\n for i in range(0, len(x_data)):\n x = x_data[i]\n y = 2 * x_data[i] + 50 + 5 * np.random.random()\n\n totalError += (y - (m * x + b)) ** 2 # total error of gradient\n\n return totalError / float(len(x_data))", "def error_poly(c, data):\n\n #Metric: Sum of squared y-axis differences\n err = np.sum((data[:,1] - np.polyval(c, data[:, 0])) ** 2)\n return err", "def quadratic_cost(output_out, target_out):\r\n total = 0\r\n for target_node in range(len(target_out)): # For each target data set\r\n for output_node in range(len(output_out)): # For each output node\r\n total += (0.5 * (target_out[target_node][output_node] - output_out[output_node])) ** 2\r\n\r\n total = 1 / total\r\n return total", "def compute_global_error(g):\n Fx = 0\n for edge in g.edges:\n\n # pose-pose constraint\n if edge.Type == 'P':\n\n # compute idx for nodes using lookup table\n fromIdx = g.lut[edge.fromNode]\n toIdx = g.lut[edge.toNode]\n\n # get node state for the current edge\n x1 = g.x[fromIdx:fromIdx + 3]\n x2 = g.x[toIdx:toIdx + 3]\n\n # get measurement and information matrix for the edge\n z12 = edge.measurement\n info12 = edge.information\n\n # (TODO) compute the error due to this edge`\n z12 = v2t(z12)\n x1 = v2t(x1)\n x2 = v2t(x2)\n eij = t2v(np.linalg.inv(z12) @ np.linalg.inv(x1) @ x2)\n Fx = Fx + eij.transpose() @ info12 @ eij\n # pose-pose constraint\n elif edge.Type == 'L':\n print(\"You shouldn't be here.\")\n # compute idx for nodes using lookup table\n fromIdx = g.lut[edge.fromNode]\n toIdx = g.lut[edge.toNode]\n\n # get node states for the current edge\n x = g.x[fromIdx:fromIdx + 3]\n l = g.x[toIdx:toIdx + 2]\n\n # get measurement and information matrix for the edge\n z = edge.measurement\n info12 = edge.information\n\n # (TODO) compute the error due to this edge\n # TODO2 : do on homo matrices\n xtr = v2t(x)\n R = xtr[0:2, 0:2]\n eil = R.transpose() @ (l - x[0:2]) - z\n Fx = Fx + eil.transpose() @ info12 @ eil\n\n return Fx", "def error(self,pt,eta):\n return self._data[self.__ptBin(pt)][self.__etaBin(eta)][1]", "def cost_derivative(self,output_results,y):\r\n\t\treturn (output_results-y)", "def compute_gradient(self, input, error):\n raise NotImplementedError()", "def back_prop(self, error):\t\n\n\t\terror_h = np.transpose(np.multiply(error,self.d_a_func(self.h)))\n\t\t#print ('error_h should be a vector: ,', error_h)\n\t\tnext_error = error_h.dot(self.weights)\n\t\t#print ('next error should be a vector: ', next_error)\n\t\tself.grad_weights = np.outer(error_h, self.input)\n\n\t\treturn next_error", "def get_error(self, params):\n return self.endog - self.predict(params)", "def rmse3 (a, p) :\n s = len(a)\n z = zip(a, p)\n v = reduce(lambda w, (x, y) : w + sqre_diff(x, y), z, 0.0)\n return math.sqrt(v / s)", "def _mer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total", "def find_error(p_s, p_t, A_d,\n A, b):\n def T(x):\n return(A.dot(x) + b)\n\n# TODO: add in w_j here\n second_sum = np.array([np.sqrt(np.linalg.norm(T(p_s[i]) - p_t[i]))\n for i in A_d])\n #error = second_sum.sum() / len(A_d)\n# TODO: the below is temprorary!! Need to figure out something not a hack!!\n# the 1/det(A) is to prevent us from pushing A towards zero\n error = second_sum.sum() / len(A_d) + 1 / np.linalg.det(A) + np.linalg.det(A)\n return(error)", "def expsumeval2(self, p, x, y=None, C=None, sumsq=False, weights=None):\n yd = p[0] + (p[1] * numpy.exp(-x / C[0])) + \\\n (p[2] * numpy.exp(-x / C[1]))\n if y is None:\n return yd\n else:\n if sumsq is True:\n return numpy.sum((y - yd) ** 2)\n else:\n return y - yd", "def error(input_, output):\n global number_of_neurons_by_layer\n if len(output) != number_of_neurons_by_layer[-1]:\n raise IndexError(\n f\"\\033[91mDesired output length is incorrect. It must be {number_of_neurons_by_layer[-1]}.\\033[m\")\n output = np.array(output).reshape(len(output), 1)\n flow(input_)\n layers[-1][\"error\"] = output - layers[-1][\"v\"]", "def coefficient(self) -> float:\n ...", "def objective(z, x):\n num_tr_data_to_use = _get_tr_dataset_size_from_z0(z[0])\n return salsa_compute_negative_validation_error(x, num_tr_data_to_use)", "def _mse(self, weights):\n error = self._input * weights - self._label\n sum_ = 0.0\n for i in range(self._input.shape[0]):\n sum_ += error[i, 0]**2\n return sum_ / self._input.shape[0]", "def _delta(self, output, err, neuron):\n return neuron._g_prime(output) * err", "def _fit_err(x_data, y_data, formula_function, coefficient_vector):\n\tsum = 0;\n\tm = 1.0 / len(y_data)\n\tfor n in range(0,len(x_data)):\n\t\tt = x_data[n]\n\t\tobs = y_data[n]\n\t\tsim = formula_function(*( t, coefficient_vector ) )\n\t\ter = sim - obs\n\t\tsum += er * er * m\n\treturn sum", "def evaluate(self, input):\n\t\treturn self.function(np.dot(self.weights, np.array([-1] + list(input))))", "def sol_cost ( S, D, R ) :\n\tmask = array([0]* len(R))\n\tmask[R>0] = 1\n\tmaxerr = max(mask * abs(S - D))\n\tmeanerr = float(sum(mask * abs(S - D)))\n\tmeanerr /= sum(mask)\n\t\n\treturn (sum(R * (S - D)**2),maxerr,meanerr)", "def _error(self, xy_tuple, coord_pairs, rcut_sq, kl_pairs):\n # set up target Bravais lattice\n kx1 = kl_pairs[:,0] * xy_tuple[0]\n lx2 = kl_pairs[:,1] * xy_tuple[2]\n ky1 = kl_pairs[:,0] * xy_tuple[1]\n ly2 = kl_pairs[:,1] * xy_tuple[3]\n bravais_pairs = np.vstack((kx1 + lx2, ky1 + ly2)).transpose()\n \n # get squared distance between every Bravais point and every coord point\n # sq_dists has shape (n_bravais_pairs, n_coord_pairs)\n sq_dists = spatial.distance.cdist(bravais_pairs, coord_pairs,\n 'sqeuclidean')\n # get min dist for each coord\n min_sq_dists = np.min(sq_dists, axis=0)\n \n # apply error function\n scaled_sq_dists = min_sq_dists / rcut_sq\n errors = np.where(scaled_sq_dists < 1.0, scaled_sq_dists, 1.0)\n error = np.mean(errors)\n \n # error = 0\n # for coord in coords:\n # find closest Bravais point to each actual particle\n # closest_dist_sq = min([(coord.x-bp.x)**2 + (coord.y-bp.y)**2 for bp in bravais])\n # piecewise error function\n # error += min(closest_dist_sq / rcut_sq, 1.0)\n # error /= len(coords)\n # error = sum([min(min([(coord.x-bp.x)**2 + (coord.y-bp.y)**2 for bp in bravais]) / rcut_sq, 1.0)]) / len(coords)\n \n return error", "def calcError(self, inherited_error):\r\n\t\tif inherited_error == None:\t\t# output neurons\r\n\t\t\tself.error = (self.target - self.value) * self.activate_der()\r\n\t\telse:\r\n\t\t\tself.error = inherited_error * self.activate_der()", "def sumsquares(self):\n return np.dot((self.demeaned ** 2).T, self.weights)", "def calcEout(model_type): \n b, v = calcStatistics(model_type)\n return b + v", "def error_function(x):\n T = [9.60497373987051638749E0,\n 9.00260197203842689217E1,\n 2.23200534594684319226E3,\n 7.00332514112805075473E3,\n 5.55923013010394962768E4]\n U = [3.35617141647503099647E1,\n 5.21357949780152679795E2,\n 4.59432382970980127987E3,\n 2.26290000613890934246E4,\n 4.92673942608635921086E4]\n\n if np.abs(x) > 1.0:\n return 1.0 - error_function_complemented(x)\n else:\n z = x * x\n y = x * pol_evl(z, T, 4) / p1_evl(z, U, 5)\n return y", "def squared_error(a, b):\n return (a - b)**2", "def simple_quadratic_function(x):\r\n\r\n value = x[0]*x[0] + 3*x[0]*x[1]\r\n\r\n grad = np.zeros(2)\r\n grad[0] = 2*x[0] + 3*x[1]\r\n grad[1] = 3*x[0]\r\n\r\n return value, grad", "def quadratic(a, b, c):\n A, B, C = K(a), K(b), K(c)\n AXX = mul_fns(A, mul_fns(X, X))\n BX = mul_fns(B, X)\n return add_fns(AXX, add_fns(BX, C))", "def relative_error(Eth_original, Eph_original,Eth_model, Eph_model,theta, phi, dsf=1,kf=-1):\n\n st = np.sin(theta).reshape((len(theta), 1))\n #\n # Construct difference between reference and reconstructed\n #\n if kf!=-1:\n dTh = (Eth_model[kf, :, :] - Eth_original[kf, ::dsf, ::dsf])\n dPh = (Eph_model[kf, :, :] - Eph_original[kf, ::dsf, ::dsf])\n #\n # squaring + Jacobian\n #\n dTh2 = np.real(dTh * np.conj(dTh)) * st\n dPh2 = np.real(dPh * np.conj(dPh)) * st\n\n vTh2 = np.real(Eth_original[kf, ::dsf, ::dsf] \\\n * np.conj(Eth_original[kf, ::dsf, ::dsf])) * st\n vPh2 = np.real(Eph_original[kf, ::dsf, ::dsf] \\\n * np.conj(Eph_original[kf, ::dsf, ::dsf])) * st\n\n mvTh2 = np.sum(vTh2)\n mvPh2 = np.sum(vPh2)\n\n errTh = np.sum(dTh2)\n errPh = np.sum(dPh2)\n else:\n dTh = (Eth_model[:, :, :] - Eth_original[:, ::dsf, ::dsf])\n dPh = (Eph_model[:, :, :] - Eph_original[:, ::dsf, ::dsf])\n #\n # squaring + Jacobian\n #\n dTh2 = np.real(dTh * np.conj(dTh)) * st\n dPh2 = np.real(dPh * np.conj(dPh)) * st\n\n vTh2 = np.real(Eth_original[:, ::dsf, ::dsf] \\\n * np.conj(Eth_original[:, ::dsf, ::dsf])) * st\n vPh2 = np.real(Eph_original[:, ::dsf, ::dsf] \\\n * np.conj(Eph_original[:, ::dsf, ::dsf])) * st\n\n mvTh2 = np.sum(vTh2)\n mvPh2 = np.sum(vPh2)\n\n errTh = np.sum(dTh2)\n errPh = np.sum(dPh2)\n\n errelTh = (errTh / mvTh2)\n errelPh = (errPh / mvPh2)\n errel =( (errTh + errPh) / (mvTh2 + mvPh2))\n\n return(errelTh, errelPh, errel)", "def solve(self, values, errors, const={}, combo=None, check=True, stdev=False):\n if check:\n self.check(values, errors, combo)\n\n val, err = self.used_vars(values, errors, combo)\n n, m = len(val), len(err)\n\n xk = np.array([errors[k] for k in err], dtype='float').reshape(-1, 1)\n jac = self.jacobian(values, errors, combo)\n ju, jk = jac[:,:n], jac[:,n:n+m]\n jui = np.abs(np.linalg.inv(ju))\n ju, jk = np.abs(ju), np.abs(jk)\n\n if const:\n ck = np.array([const.get(k, 0) for k in err], dtype='float').reshape(-1, 1)\n cu = np.array([const.get(k, 0) for k in val], dtype='float').reshape(-1, 1)\n\n if stdev:\n xk **= 2\n ck **= 2\n cu **= 2\n xk += ck\n else:\n xk = np.abs(xk)\n ck = np.abs(ck)\n cu = np.abs(cu)\n xk += ck\n\n xu = jui.dot(jk.dot(xk) + ju.dot(cu))\n del jac, jui, ju, jk, cu, ck\n\n else:\n if stdev:\n xk **= 2\n else:\n xk = np.abs(xk)\n\n xu = jui.dot(jk.dot(xk))\n del jac, jui, ju, jk\n\n if stdev:\n xu **= 0.5\n xk **= 0.5\n\n xu = xu.ravel()\n xk = xk.ravel()\n\n # Create data frame of results\n df = pd.DataFrame()\n df['var'] = val + err\n df['value'] = [values[k] for k in df['var']]\n df['error'] = np.concatenate([xu, xk])\n df['pct_error'] = 100 * abs(df['error'] / df['value'])\n df['is_calc'] = np.concatenate([np.ones(n), np.zeros(m)]).astype('bool')\n\n df.sort_values('var', inplace=True)\n df.replace(float('inf'), np.nan, inplace=True)\n df.set_index('var', inplace=True)\n\n return df", "def eAdd(P, Q): #adds 2 points by using the slope to find where the line intersects and returns the negation of that point\r\n R = point(0,0,P.c) #creates point object to store result\r\n if (P.x == 0 and P.y == 0) and (Q.x == 0 and Q.y == 0): #(0,0) is the identity\r\n return P #returns the identity\r\n elif P.x == 0 and P.y == 0:\r\n return Q\r\n elif Q.x == 0 and Q.y == 0:\r\n return P\r\n elif P == Q: #in case it is called when double should be\r\n R = eDouble(P)\r\n else: #this preforms the actual addition\r\n i = P.y-Q.y\r\n j = P.x-Q.x\r\n s = (i * modInv(j, P.c.p) ) % P.c.p\r\n R.x = ( ( (s**2) - P.x - Q.x) % P.c.p)\r\n R.y = ( (-P.y + s * (P.x - R.x) ) % P.c.p)\r\n return R", "def test_error1(version='scalar'):\n Lx = 10\n Ly = 10\n c = 1.0\n\n def exact(x, y, t):\n kx = pi/Lx; ky = pi/Ly; omega = sqrt(kx*kx + ky*ky)\n return cos(omega*t)*sin(kx*x)*sin(ky*y)\n\n def I1(x, y):\n return exact(x, y, 0)\n\n def bc(x, y, t):\n return exact(x, y, t)\n\n def f(x, y, t):\n if isinstance(x, ndarray) and isinstance(y, ndarray):\n return zeros((x.shape[0], y.shape[1]))\n else:\n return 0.0\n \n error = []\n def action(u, xv, yv, t):\n e = exact(xv, yv, t) - u\n error.append((t, sqrt(innerproduct(e.flat,e.flat))))\n\n t0 = time.clock()\n implementation = {'ic': version, 'inner': version, 'bc': version}\n nx = 10; ny = 4; tstop = 20\n solver(I1, f, c, bc, Lx, Ly, nx, ny, 0, tstop,\n user_action=action, implementation=implementation)\n for t, e in error:\n print 't=%10.2E error=%10.2E' % (t, e)", "def lsq_fun(c: npt.NDArray, x: npt.NDArray, y: npt.NDArray) -> npt.NDArray:\n Ri = calc_R(x, y, c[0], c[1])\n mean: float = Ri.mean()\n return Ri - mean", "def Cost(self, input_data: list, target_output_data: list):\n error = 0\n for input_, target_output in zip(input_data, target_output_data):\n generated_output = self.Evaluate(input_)\n for target_output_value, generated_output_value in zip(target_output, generated_output):\n error += (target_output_value - generated_output_value) ** 2\n return error / (2 * len(input_data))", "def solve_part2(input, verbose=False):\n equations = parse(input)\n\n result = []\n for eq in equations:\n result.append(solve_equation_addition_precendence(eq, verbose))\n\n if verbose:\n print(f\"results: {result}\")\n\n return sum(result)", "def r_squared(beta_0: float, beta_1: float, x: np.ndarray, y: np.ndarray) -> float:\n return 1.0 - (sum_of_sq_errors(beta_0, beta_1, x, y) / total_sum_of_squares(y))", "def calc_error(self):\n if self._fit_data.y is not None and self._fit_data.y_fit is not None:\n self._fit_data.error_vector = self._fit_data.y - self._fit_data.y_fit", "def error_estimation_simplex(vertex_vector_h, vertex_chi_sq_h, func):\n # print(\"\\nvertex_vector\")\n # print(vertex_vector_h)\n # print(\"\\nvertex_chi_sq\")\n # print(vertex_chi_sq_h)\n\n # temporary solution\n k, hh = vertex_vector_h.shape # hh = k-1\n theta_0 = vertex_vector_h[0, :]\n m_q = numpy.zeros((k-1, k-1))\n vertex_vector = numpy.zeros(vertex_vector_h.shape, dtype=float)\n vertex_vector[0, :] = theta_0\n max_radius = numpy.zeros(k-1, dtype=float)\n for i in range(1, k):\n theta_i = vertex_vector_h[i, :]\n rand_radius = numpy.abs(theta_i-theta_0)\n max_radius = numpy.max(numpy.vstack([max_radius, rand_radius]), axis=0)\n # print(\"max_radius \", max_radius)\n for i in range(1, k):\n radius_h = numpy.zeros(k-1, dtype=float)\n radius_h[i-1] = max_radius[i-1]\n vertex_vector[i, :] = theta_0+radius_h\n\n l_chi_sq = []\n for i in range(0, k):\n theta_i = vertex_vector_h[i, :]\n chi_sq = func(theta_i)\n l_chi_sq.append(chi_sq)\n vertex_chi_sq = numpy.array(l_chi_sq, dtype=float)\n\n # print(\"hh, k: \", hh, k)\n # print(\"theta_0: \", theta_0)\n chi_sq_0 = vertex_chi_sq[0]\n # print(\"chi_sq_0: \", chi_sq_0)\n v_a = numpy.zeros(k-1)\n m_b = numpy.zeros((k-1, k-1))\n m_q = numpy.zeros((k-1, k-1))\n m_chi_sq_0i = numpy.zeros(k-1)\n # print(\"step 1\")\n for i in range(1, k):\n theta_i = vertex_vector[i, :]\n theta_0i = 0.5*(theta_0+theta_i)\n chi_sq_0i = func(theta_0i)\n # print(\"ii: {:} {:}\".format(i, chi_sq_0i))\n m_chi_sq_0i[i-1] = chi_sq_0i\n m_q[i-1, :] = theta_i-theta_0\n\n # print(\"step 2\")\n for i in range(1, k):\n chi_sq_i = vertex_chi_sq[i]\n theta_i = vertex_vector[i, :]\n chi_sq_0i = m_chi_sq_0i[i-1]\n\n a_i = 4.*chi_sq_0i - chi_sq_i - 3.*chi_sq_0\n v_a[i-1] = a_i\n\n b_ii = 2.*(chi_sq_i + chi_sq_0 - 2.*chi_sq_0i)\n m_b[i-1, i-1] = b_ii\n\n for j in range(i+1, k):\n chi_sq_0j = m_chi_sq_0i[j-1]\n theta_j = vertex_vector[j, :]\n theta_ij = 0.5*(theta_i+theta_j)\n chi_sq_ij = func(theta_ij)\n # print(\"ij: {:} {:} {:}\".format(i, j, chi_sq_ij))\n b_ij = 2.*(chi_sq_ij + chi_sq_0 - chi_sq_0i - chi_sq_0j)\n m_b[i-1, j-1] = b_ij\n m_b[j-1, i-1] = b_ij\n # print(\"step 3\")\n m_ib = numpy.linalg.inv(m_b)\n m_qib = numpy.matmul(m_q, m_ib)\n v_qiba = numpy.matmul(m_qib, v_a)\n # theta_min = theta_0 - v_qiba\n m_qibqt = numpy.matmul(m_qib, m_q.transpose())\n m_error = 2.*chi_sq_0*m_qibqt\n\n # print(\"\\nm_q\")\n # print(m_q)\n # print(\"\\nm_b\")\n # print(m_b)\n # print(\"\\nm_ib\")\n # print(m_ib)\n # print(\"\\nv_a\")\n # print(v_a)\n # print(\"\\ntheta_min: \", theta_min)\n # print(\"\\ntheta_0: \", theta_0)\n\n # print(\"\\nm_error: \", m_error)\n # print(50*\"*\")\n return m_error, numpy.abs(v_qiba)", "def compute_errors(u_exact, u):\n\n # Compute error norm (for very small errors, the value can be\n # negative so we run abs(assemble(error)) to avoid failure in sqrt\n\n V = u.function_space()\n\n # Function - Expression\n error = (u - u_exact)**2*dx\n E1 = sqrt(abs(assemble(error)))\n\n # Explicit interpolation of u_e onto the same space as u:\n u_e = interpolate(u_exact, V)\n error = (u - u_e)**2*dx\n E2 = sqrt(abs(assemble(error)))\n\n # Explicit interpolation of u_exact to higher-order elements,\n # u will also be interpolated to the space Ve before integration\n Ve = FunctionSpace(V.mesh(), 'P', 5)\n u_e = interpolate(u_exact, Ve)\n error = (u - u_e)**2*dx\n E3 = sqrt(abs(assemble(error)))\n\n # fenics.errornorm interpolates u and u_e to a space with\n # given degree, and creates the error field by subtracting\n # the degrees of freedom, then the error field is integrated\n # TEMPORARY BUG - doesn't accept Expression for u_e\n #E4 = errornorm(u_e, u, normtype='l2', degree=3)\n # Manual implementation errornorm to get around the bug:\n def errornorm(u_exact, u, Ve):\n u_Ve = interpolate(u, Ve)\n u_e_Ve = interpolate(u_exact, Ve)\n e_Ve = Function(Ve)\n # Subtract degrees of freedom for the error field\n e_Ve.vector()[:] = u_e_Ve.vector().array() - u_Ve.vector().array()\n # More efficient computation (avoids the rhs array result above)\n #e_Ve.assign(u_e_Ve) # e_Ve = u_e_Ve\n #e_Ve.vector().axpy(-1.0, u_Ve.vector()) # e_Ve += -1.0*u_Ve\n error = e_Ve**2*dx(Ve.mesh())\n return sqrt(abs(assemble(error))), e_Ve\n E4, e_Ve = errornorm(u_exact, u, Ve)\n\n # Infinity norm based on nodal values\n u_e = interpolate(u_exact, V)\n E5 = abs(u_e.vector().array() - u.vector().array()).max()\n\n # H1 seminorm\n error = dot(grad(e_Ve), grad(e_Ve))*dx\n E6 = sqrt(abs(assemble(error)))\n\n # Collect error measures in a dictionary with self-explanatory keys\n errors = {'u - u_exact': E1,\n 'u - interpolate(u_exact,V)': E2,\n 'interpolate(u,Ve) - interpolate(u_exact,Ve)': E3,\n 'errornorm': E4,\n 'infinity norm (of dofs)': E5,\n 'grad(error) H1 seminorm': E6}\n\n return errors", "def grad(self,x,out):\n out.zero()\n self.prior.M.mult(x,out)", "def OF1_CalcErrorEstimation(param_list, args):\n #return (sum( \\\n #( OF1_SumOfGauss(param_list, classNum, g_lvls) - histogram ) ** 2) / g_lvls.size) + \\\n #(abs(sum(param_list[:classNum]) - 1) * o)\n return (sum( \\\n ( OF1_SumOfGauss(param_list, args[0], args[1]) - args[2] ) ** 2) / args[1].size) + \\\n (abs(sum(param_list[:args[0]]) - 1) * args[3])", "def sum1(cur,p1,p2):\n\tlam = (p2[1]-p1[1])/(p2[0]-p1[0])\n\tnu = p1[1] - lam*p1[0]\n\tx3 = lam**2 - cur[0] - p1[0] - p2[0]\n\ty3 = -(lam * x3 + nu)\n\treturn (x3,y3)", "def test_quadratic_problem():\n\n @tf.function\n def fn(w):\n a = tf.constant(_TEST_MATRIX, tf.float32)\n return tf.matmul(tf.matmul(w, a, transpose_a=True), w) / 2\n\n @tf.function\n def grad_fn(w):\n a = tf.constant(_TEST_MATRIX, tf.float32)\n return tf.matmul(a, w)\n\n @tf.function\n def initial_w():\n return tf.constant(_INITIAL_W, tf.float32)\n\n return initial_w, fn, grad_fn", "def val_sum(self, axis = None):\n f = self.to_Poly()\n return f.val_sum(axis).to_PolyMesh(self.params)", "def sum_hessian(\n self, module: Module, g_inp: Tuple[Tensor], g_out: Tuple[Tensor]\n ) -> Tensor:\n self._check_2nd_order_make_sense(module, g_out)\n return self._sum_hessian(module, g_inp, g_out)", "def final_cost(self, x):\n return self.x_M_x(x[-1,:,:],self.R)", "def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss", "def _mean_squared_log_error_compute(sum_squared_log_error: Tensor, n_obs: int) ->Tensor:\n return sum_squared_log_error / n_obs", "def mse(self):\n xs, ys = self.R.nonzero()\n predicted = self.full_matrix()\n error = 0\n for x, y in zip(xs, ys):\n # print(predicted[x, y], self.R[x, y] )\n error += pow(self.R[x, y] - predicted[x, y], 2)\n return np.sqrt(error)", "def CalculateFitError(self, *args):\n return _ITKCostFunctionsPython.itkCumulativeGaussianCostFunction_CalculateFitError(self, *args)", "def ridge_regression(y, tx, lambda_):\n N = tx.shape[0]\n a = tx.T.dot(tx) + 2 * N * lambda_ * np.identity(tx.shape[1])\n b = tx.T.dot(y)\n w = np.linalg.solve(a, b)\n loss = compute_loss_LS(y, tx, w) \n return loss, w", "def output(self):\n\n\t\tself._previousError = self._getErrorFunction()\n\t\n\t\treturn self._proportionalTerm() + self._derivativeTerm() + self._integralTerm()", "def mean_squared_error(y_true, y_pred, *, sample_weight=..., multioutput=..., squared=...):\n ...", "def _compute_error(self,expected_out,actual_out,error_func):\n\n error = error_func(expected_out,actual_out)\n return error", "def calculate_submodel_error(self, stage_idx, submodel_idx):\n return rqrmilib.calculate_submodel_error(self._get_native_object(), self.probe, stage_idx, submodel_idx)", "def get_quantization_error(self, X: Optional[Sequence] = None) -> float:\n if not self.fitted_:\n raise RuntimeError(\"SOM is not fitted!\")\n\n if X is None:\n X = self.X_\n\n weights_per_datapoint = self._get_weights_per_datapoint(X)\n\n quantization_errors = np.linalg.norm(\n np.subtract(weights_per_datapoint, X), axis=1\n )\n\n return np.mean(quantization_errors)", "def additional_equations(self, k):\n ######################################################################\n # equation for saturated gas at hot side outlet\n o1 = self.outl[0].to_flow()\n self.residual[k] = o1[2] - h_mix_pQ(o1, 1)", "def calculate_total_loss(self, train_x, train_y):\n return np.sum([self.calculate_error(x, y)\n for x, y in zip(train_x, train_y)])", "def calc_error(y_real, y_pred):\n if len(y_real) > 0:\n curr_err = rmse(y_pred, y_real)\n else:\n curr_err = np.nan\n return curr_err", "def huber_loss(y, q_value):\n error = K.abs(y - q_value)\n quadratic_part = K.clip(error, 0.0, 1.0)\n linear_part = error - quadratic_part\n loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)\n return loss", "def _residual(function, p, x, y, y_err):\n return (y - function(p, x)) / y_err", "def loss_func(coefs: np.ndarray, X: pd.DataFrame, y: pd.Series, n: int) -> float:\n est = np.dot(X, coefs)\n err = np.sum(np.power(est - y, 2))\n rmse = np.sqrt(err/n)\n return rmse", "def gradFun(self, S, x):", "def error_coefficient(self,tree,mode='exact'):\n from numpy import dot\n from sympy import Rational, simplify\n code=elementary_weight_str(tree)\n A,b,c = self.A,self.b,self.c\n\n if A.dtype == object:\n exec('coeff = simplify({} - Rational(1, {}))'.format(code, tree.density()))\n else:\n exec(\"coeff = ({} - 1.0 / {})\".format(code, tree.density()))\n return locals()[\"coeff\"] / tree.symmetry()", "def mse_cost_function(predicted_output, actual_output):\n error = predicted_output - actual_output\n mse_cost = np.sum(error ** 2) /(2 * len(actual_output),)\n return mse_cost, error", "def input_error(self, out_influence, new_weights):\n in_influence = np.dot(np.transpose(new_weights), out_influence)\n return in_influence", "def vae_loss(self, inputs, outputs, z_mean, z_log):\n\n reconstruction_loss = self.loss(inputs, outputs)\n reconstruction_loss *= self.n_features_\n kl_loss = 1 + z_log - K.square(z_mean) - K.exp(z_log)\n kl_loss = -0.5 * K.sum(kl_loss, axis=-1)\n kl_loss = self.gamma * K.abs(kl_loss - self.capacity)\n\n return K.mean(reconstruction_loss + kl_loss)", "def rmse(self):\n lam = self.lam()\n weights = lam / lam.sum()\n weighted_var = self.var() * weights\n rmse = np.sqrt(weighted_var.sum())\n return rmse" ]
[ "0.6533413", "0.62851167", "0.61784077", "0.6051464", "0.6023184", "0.5898153", "0.5836756", "0.58310413", "0.58302575", "0.5818229", "0.57950467", "0.5793478", "0.5786754", "0.57682145", "0.57595587", "0.5749468", "0.5714533", "0.57093656", "0.5703172", "0.5694905", "0.56835437", "0.56599706", "0.5656564", "0.56349933", "0.5634075", "0.5622921", "0.56179816", "0.56094664", "0.5605314", "0.55944586", "0.55821055", "0.5575781", "0.55572283", "0.55518746", "0.5542391", "0.5536453", "0.5533215", "0.5528651", "0.5524857", "0.54953927", "0.54735684", "0.54700863", "0.5469814", "0.5465803", "0.54616904", "0.54599965", "0.54574543", "0.5455994", "0.54525316", "0.5445146", "0.5442226", "0.54357547", "0.5429407", "0.5423522", "0.541205", "0.5409626", "0.53973716", "0.539492", "0.5387241", "0.53808165", "0.53797525", "0.5372849", "0.5368821", "0.53502166", "0.53475827", "0.5341109", "0.53239506", "0.53238636", "0.5323344", "0.53178066", "0.5308863", "0.53032196", "0.53005964", "0.52897555", "0.5287502", "0.5287013", "0.52835554", "0.5281872", "0.5280337", "0.52794826", "0.5276171", "0.52731925", "0.52729267", "0.5266379", "0.5263517", "0.5261064", "0.5259164", "0.52573717", "0.5256101", "0.5253653", "0.5251353", "0.5247003", "0.52455944", "0.5242616", "0.52397937", "0.5238031", "0.5233316", "0.5230885", "0.5227022", "0.52239853" ]
0.5932937
5
it computes "delta" and "Delta_w"
def backpropagate(eta, momentum): for i_lay in range(len(layers)-1, 0, -1): lay = layers[i_lay] if i_lay == len(layers)-1: lay["delta"] = lay["error"] * dlogistic(lay["v"]) else: lay["delta"] = (layers[i_lay+1]["weigths"][:, 1:].T @ layers[i_lay+1] ["delta"]) * dlogistic(lay["v"]) lay["Delta_w"] = eta * lay["delta"] @ layers[i_lay - 1]["y"].T +\ momentum * lay["Delta_w"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDelta(self,u,w,v=None):\r\n if v==None :\r\n return self._deltaDot[u,w]\r\n elif self._sigma[u,v]==0 or self._sigma[u,w]==0 or self._sigma[w,v]==0:\r\n return 0.0\r\n elif (self._d[u,v]==self._d[u,w]+self._d[w,v]):\r\n return 1.0 * self._sigma[u,w]*self._sigma[w,v]/self._sigma[u,v]\r\n else:\r\n return 0.0", "def optimise(w, w_delta):\n return w.assign(w - w_delta)", "def delta(self) -> None:", "def backward_pass(self, delta):\r\n self.d_x = np.dot(delta, self.w.T)\r\n self.d_b = np.matmul(np.ones((1, delta.shape[0])), delta)\r\n self.d_w = np.dot(self.x.T, delta)\r\n return self.d_x", "def delta(self):\r\n return self.nd1()", "def getdelta(self):\n\t\tmyhmag.initializehelmholtz()\n\t\tabar = 13.714285714285715\n\t\tzbar = abar/2.0\n\t\tself.data[\"delta\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tfor i in range(len(self.data[\"rho\"])):\n\t\t\tadgradred,hydrograd,my_nu,my_alpha,self.data[\"delta\"][i],my_gamma1,my_cp,my_cph,my_c_s,failtrig = myhmag.gethelmgrads(self.data[\"T\"][i], self.data[\"rho\"][i], 0.,abar,zbar,True)", "def epsilon_delta(self):", "def delta(self):\n return (self.upper-self.lower) / float(self.num_cells)", "def calculate_delta_weights(self, out_tensors: List[Tensor], in_tensors: List[Tensor]) -> List[Tensor]:\n return None", "def compose_after_from_vector_inplace(self, delta):\n model_jacobian = self.pdm.model.jacobian\n points = self.pdm.model.mean.points\n n_points = self.pdm.model.mean.n_points\n\n # compute:\n # -> dW/dp when p=0\n # -> dW/dp when p!=0\n # -> dW/dx when p!=0 evaluated at the source landmarks\n\n # dW/dp when p=0 and when p!=0 are the same and simply given by\n # the Jacobian of the model\n dW_dp_0 = model_jacobian\n dW_dp = dW_dp_0\n # dW_dp_0: n_points x n_params x n_dims\n # dW_dp: n_points x n_params x n_dims\n\n dW_dx = self.transform.jacobian_points(points)\n # dW_dx: n_points x n_dims x n_dims\n\n #TODO: Can we do this without splitting across the two dimensions?\n dW_dx_x = dW_dx[:, 0, :].flatten()[..., None]\n dW_dx_y = dW_dx[:, 1, :].flatten()[..., None]\n dW_dp_0_mat = np.reshape(dW_dp_0, (n_points * self.n_dims,\n self.n_parameters))\n dW_dx_dW_dp_0 = dW_dp_0_mat * dW_dx_x + dW_dp_0_mat * dW_dx_y\n dW_dx_dW_dp_0 = np.reshape(dW_dx_dW_dp_0,\n (n_points, self.n_parameters, self.n_dims))\n # dW_dx: n_points x n_dims x n_dims\n # dW_dp_0: n_points x n_params x n_dims\n # dW_dx_dW_dp_0: n_points x n_params x n_dims\n\n J = np.einsum('ijk, ilk -> jl', dW_dp, dW_dx_dW_dp_0)\n H = np.einsum('ijk, ilk -> jl', dW_dp, dW_dp)\n\n Jp = np.linalg.solve(H, J)\n # Jp: n_params x n_params\n\n self.from_vector_inplace(self.as_vector() + np.dot(Jp, delta))\n return self", "def delta(flag, S, K, t, r, sigma, q): \n\n b = r-q\n\n return numerical_delta(flag, S, K, t, r, sigma, b, f)", "def delta_w(w_k, x, t, learning_rate):\n return -learning_rate * np.mean(gradient(w_k, x, t))", "def delta(z, a, y):\n return (a-y)", "def m_delta(r, m_x, **kwargs):\n return m_x", "def gen_delta(self):\n delta = self.delta.gen_delta(self.mask.good_pix, self.mask.bad_pix,\n self.params.nside, self.params.npix)\n return delta", "def delta(self):\n return self.get_dim_attribute('delta')", "def backward_pass(self, delta):\n\n a = config['learning_rate']\n y = config['momentum_gamma']\n m = config['momentum']\n l = config['L2_penalty']\n\n # print(\"shape of delta incoming: \", delta.shape, \"shape of x: \", self.x.shape)\n self.d_x = delta.T @ self.x\n # print(\"SHAPE OF GRADIENT: \", self.d_x.shape)\n\n # gradient momentum\n self.w_inc = (a * self.d_x.T) + (y * self.d_v) - l * self.w\n \n # saving \n if m:\n self.d_v = self.w_inc\n else:\n self.d_v = np.zeros(self.w.shape)\n\n # backprop for bias weights\n x_0 = np.ones([len(delta), 1])\n\n self.d_b = delta.T @ x_0\n\n # print(\"shape of BIAS GRAD: \", self.d_b.shape)\n\n self.d_w = delta @ self.w.T\n # print(\"shape of w.T: \", self.w.T.shape, \"shape of RETURN delta: \", self.d_w.shape)\n #print(self.w.shape)\n return self.d_w", "def _w_diff_dcm(self, otherframe):\n dcm2diff = self.dcm(otherframe)\n diffed = dcm2diff.diff(dynamicsymbols._t)\n angvelmat = diffed * dcm2diff.T\n w1 = trigsimp(expand(angvelmat[7]), recursive=True)\n w2 = trigsimp(expand(angvelmat[2]), recursive=True)\n w3 = trigsimp(expand(angvelmat[3]), recursive=True)\n return -Vector([(Matrix([w1, w2, w3]), self)])", "def calculate_W(self, D_k, theta_hat_k, delta_theta_k):\n\n W = np.zeros(shape=(3,2))\n W.setflags(write=1)\n # print(D_k)\n # print(np.sin(theta_hat_k + delta_theta_k))\n # print(np.cos(theta_hat_k + delta_theta_k))\n W[0, 0] = -D_k*np.sin(theta_hat_k + delta_theta_k)\n W[0, 1] = np.cos(theta_hat_k + delta_theta_k)\n W[1, 0] = D_k*np.cos(theta_hat_k + delta_theta_k)\n W[1, 1] = np.sin(theta_hat_k + delta_theta_k)\n W[2, 0] = 1\n return(W)", "def _update_weights(self, alpha, delta):\n res = []\n for j, weight in enumerate(self._weights):\n self._weights[j] = weight + (alpha * delta * self._g_prime(self._in_j))\n #print(\"Prev weight: {} New weight: {}\".format(weight, self._weights[j]))\n res.append(self._weights[j] - weight)\n return res[0]", "def delta(self):\r\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def backward(self, delta):\n self.d_x = np.dot(delta, np.transpose(self.w))\n self.d_w = np.dot(np.transpose(self.x), delta)/self.x.shape[0]\n self.d_b = np.mean(delta, axis=0).reshape((1, delta.shape[1]))\n return self.d_x\n\n raise NotImplementedError(\"Backprop for Layer not implemented.\")", "def updateweigths():\n for i_lay in range(1, len(layers)):\n layers[i_lay][\"weigths\"] += layers[i_lay][\"Delta_w\"]", "def deltaCalc(self, expected):\n \n n = len(self.structure)\n self.delta = [None] * n\n self.delta[n - 1] = []\n \n for i in xrange(len(expected)):\n curr = self.a[n - 1][i]\n self.delta[n - 1].append(self.derivativeFunc(curr) * (expected[i] - curr))\n self.delta[n - 1] = np.array(self.delta[n - 1])\n \n # From n - 1 to 1 layer \n for i in xrange(n - 1, 0, -1):\n currDelta = self.delta[i]\n if i != (n - 1):\n currDelta = currDelta[0][:-1]\n \n self.delta[i - 1] = np.array(np.dot(currDelta, self.theta[i]))\n self.delta[i - 1][0] *= self.a[i - 1]\n \n return", "def compute_hidden_delta(self):\r\n out = self.activation\r\n outedges = self.out_edges\r\n wsum = 0.0\r\n for edge in outedges:\r\n wsum += edge.old_weight*(edge.dest.delta)\r\n self.delta = out*(1-out)*wsum", "def make_delta(self, delta):\n\n return delta", "def backward(self, delta_W_next):\n delta = delta_W_next * self._act.a_prime(self._z)\n delta_W = np.dot(delta, self._W.T)\n grad_w = np.dot(self._X.T, delta)\n grad_b = np.array(([np.sum(delta, axis=0)]))\n return grad_w, grad_b, delta_W", "def delta(self):\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def delta(self):\n return self._delta", "def delta(self):\n return self._delta", "def delta(self):\n return self._delta", "def delta(self):\n return self._delta", "def _forces_moments(self, delta):\n # assert delta.shape == (4,1)\n da = delta[0]\n de = delta[1]\n dt = delta[2]\n dr = delta[3]\n\n e0 = self._state[3]\n e1 = self._state[4]\n e2 = self._state[5]\n e3 = self._state[6]\n u = self._state[7]\n v = self._state[8]\n w = self._state[9]\n p = self._state[10]\n q = self._state[11]\n r = self._state[12]\n\n self._Va = np.sqrt(u**2 + v**2 + w**2)\n self._alpha = np.arctan(1.0*w/u)\n self._beta = np.arcsin(1.0*v/self._Va)\n\n\n\n Fg = self.mass*self.gravity*np.array([2*(e1*e3-e2*e0),\n 2*(e2*e3 + e1*e0),\n e3**2 + e0**2 - e1**2 - e2**2,\n ])\n\n # Fg = self.mass*self.gravity*np.array([2*(e1*e3 - e2*e0),\n # 2*(e2*e3 + e1*e0),\n # e3**2 + e0**2 - e1**2 - e2**2,\n # ])\n\n M_e = 25\n sig = lambda a: (1+np.exp(-M_e*(a-self.alpha0))+np.exp(M_e*(a+self.alpha0)))/((1+np.exp(-M_e*(a-self.alpha0)))*(1+np.exp(M_e*(a+self.alpha0))))\n cla = lambda a: (1-sig(a))*(self.C_L_0+self.C_L_alpha*a)+sig(a)*(2*np.sign(a)*np.sin(a)**2*np.cos(a))\n cda = lambda a: self.C_D_p + (self.C_L_0+self.C_L_alpha*a)**2/(np.pi*self.e*self.AR)\n\n cxa = lambda a: -(cda(a)) * np.cos(a) + (cla(a)) * np.sin(a)\n\n cxq = lambda a: -self.C_D_q * np.cos(a) +self.C_L_q * np.sin(a)\n\n cxde = lambda a: -self.C_D_delta_e * np.cos(a) + self.C_L_delta_e * np.sin(a)\n\n cza = lambda a: -(cda(a)) * np.sin(a) - (cla(a)) * np.cos(a)\n\n czq = lambda a: -self.C_D_q * np.sin(a) - self.C_L_q * np.cos(a)\n\n czde = lambda a: -self.C_D_delta_e * np.sin(a) - self.C_L_delta_e * np.cos(a)\n\n c = self.c/(2.0*self._Va)\n b = self.b/(2.0*self._Va)\n\n\n\n one = 0.5*self.rho*self._Va**2*self.S_wing\n # two = np.array([[1,0,0],[0,1,0],[0,0,1]])\n three = np.array([[cxa(self._alpha)+cxq(self._alpha)*c*q+cxde(self._alpha)*de],\n [self.C_Y_0+self.C_Y_beta*self._beta+self.C_Y_p*b*p+self.C_Y_r*b*r+self.C_Y_delta_a*da+self.C_Y_delta_r*dr],\n [cza(self._alpha)+czq(self._alpha)*c*q+czde(self._alpha)*de]])\n\n Fa = np.squeeze(three) * one\n # pdb.set_trace()\n Fa = Fa.reshape((3,-1))\n\n F = Fg + Fa\n #\n # print(\"Fa:\",Fa)\n\n Fp = 0.5*self.rho*self.S_prop*self.C_prop*((self.k_motor*dt)**2-self._Va**2)\n\n # print(\"FP:\", Fp)\n\n fx = F[0] + Fp\n # + 0.5*MAV.rho*self._Va**2*MAV.S_wing*(\\\n # +cxa(self._alpha)\\\n # + cxq(self._alpha)*c*q\\\n # + cxde(self._alpha)*de\n # )\n\n fy = F[1]\n fz = F[2]\n\n # Moment time!!!\n one = 0.5*self.rho*self._Va**2*self.S_wing\n two = np.array([\\\n [self.b*(self.C_ell_0+self.C_ell_beta*self._beta+self.C_ell_p*b*p+self.C_ell_r*b*r+self.C_ell_delta_a*da+self.C_ell_delta_r*dr)],\n [self.c*(self.C_m_0+(self.C_m_alpha*self._alpha)+(self.C_m_q*c*q)+(self.C_m_delta_e*de))],\n [self.b*(self.C_n_0+(self.C_n_beta*self._beta)+(self.C_n_p*b*p)+(self.C_n_r*b*r)+(self.C_n_delta_a*da)+(self.C_n_delta_r*dr))]\n ])\n Ma = one * np.squeeze(two)\n # print(\"\\nMa:\", Ma)\n # pdb.set_trace()\n Ma = Ma.reshape((3,-1))\n\n size = Ma.shape[1]\n\n Mp = np.block([[np.ones(size)*-self.kTp*(self.kOmega*dt)**2],\n [np.zeros(size)],\n [np.zeros(size)]\n ])\n\n M = Mp + Ma\n\n Mx = M[0]\n My = M[1]\n Mz = M[2]\n\n # self._forces[0] = fx\n # self._forces[1] = fy\n # self._forces[2] = fz\n # pdb.set_trace()\n # print(fx, fy, fz, Mx, My, Mz)\n\n return np.array([fx, fy, fz, Mx, My, Mz])", "def worddist(self):\n #return (self.n_z_t + self.beta) / (self.n_z[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)\n return (self.n_z_t + self.n_w_d + self.beta) / (self.n_z[:, numpy.newaxis] + self.n_w[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)", "def get_Delta_weigths():\n ls = []\n for i_lay in range(1, len(layers)):\n ls.append(layers[i_lay][\"Delta_w\"])\n return ls", "def downwashGradW(self):\n A = self.r / (self.r**2 + self.mTV**2)\n B = 0.4876 / (sqrt(self.r**2 + 0.6319 + self.mTV**2))\n C = 1 + (self.r**2 / (self.r**2 + 0.7915 + 5.0734 * self.mTV**2))**0.3113\n D = 1 - sqrt(self.mTV**2 / (1 + self.mTV**2))\n return self.Kepsilon * (A * B + C * D) * self.clAlphaW / (pi * self.aspectRatioW)", "def compose_after_from_vector_inplace(self, delta):\n model_jacobian = self.pdm.model.jacobian\n points = self.pdm.model.mean.points\n n_points = self.pdm.model.mean.n_points\n\n # compute:\n # -> dW/dp when p=0\n # -> dW/dp when p!=0\n # -> dW/dx when p!=0 evaluated at the source landmarks\n\n # dW/dq when p=0 and when p!=0 are the same and given by the\n # Jacobian of the global transform evaluated at the mean of the\n # model\n dW_dq = self._global_transform_jacobian(points)\n # dW_dq: n_points x n_global_params x n_dims\n\n # dW/db when p=0, is the Jacobian of the model\n dW_db_0 = model_jacobian\n # dW_db_0: n_points x n_weights x n_dims\n\n # dW/dp when p=0, is simply the concatenation of the previous\n # two terms\n dW_dp_0 = np.hstack((dW_dq, dW_db_0))\n # dW_dp_0: n_points x n_params x n_dims\n\n # by application of the chain rule dW_db when p!=0,\n # is the Jacobian of the global transform wrt the points times\n # the Jacobian of the model: dX(S)/db = dX/dS * dS/db\n dW_dS = self.pdm.global_transform.jacobian_points(points)\n dW_db = np.einsum('ilj, idj -> idj', dW_dS, dW_db_0)\n # dW_dS: n_points x n_dims x n_dims\n # dW_db: n_points x n_weights x n_dims\n\n # dW/dp is simply the concatenation of dX_dq with dX_db\n dW_dp = np.hstack((dW_dq, dW_db))\n # dW_dp: n_points x n_params x n_dims\n\n dW_dx = self.transform.jacobian_points(points)\n #dW_dx = np.dot(dW_dx, self.global_transform.linear_component.T)\n # dW_dx: n_points x n_dims x n_dims\n\n #TODO: Can we do this without splitting across the two dimensions?\n dW_dx_x = dW_dx[:, 0, :].flatten()[..., None]\n dW_dx_y = dW_dx[:, 1, :].flatten()[..., None]\n dW_dp_0_mat = np.reshape(dW_dp_0, (n_points * self.n_dims,\n self.n_parameters))\n dW_dx_dW_dp_0 = dW_dp_0_mat * dW_dx_x + dW_dp_0_mat * dW_dx_y\n dW_dx_dW_dp_0 = np.reshape(dW_dx_dW_dp_0,\n (n_points, self.n_parameters, self.n_dims))\n # dW_dx: n_points x n_dims x n_dims\n # dW_dp_0: n_points x n_params x n_dims\n # dW_dx_dW_dp_0: n_points x n_params x n_dims\n\n J = np.einsum('ijk, ilk -> jl', dW_dp, dW_dx_dW_dp_0)\n H = np.einsum('ijk, ilk -> jl', dW_dp, dW_dp)\n\n Jp = np.linalg.solve(H, J)\n # Jp: n_params x n_params\n\n self.from_vector_inplace(self.as_vector() + np.dot(Jp, delta))", "def delta(self, day, F_min, sigma, lmda_center):\n lmdas, flux = self.data[day]\n dev = flux - self.flux_model(lmdas, F_min, sigma, lmda_center)\n return np.sum(np.power(dev, 2))", "def delta(self, k, s):\n if not self.time_dependent_delta:\n return self._delta\n else:\n return self._delta * (6 / (np.pi**2 * self.count[k][s] **2))", "def _derW(self, w, x, y, z):\n # This may look strange, as we call the derivativeX() method to get the\n # derivative with respect to w, but that's just a quirk of 4D interpolations\n # beginning with w rather than x. The derivative wrt the first dimension\n # of an element of wxInterpolators is the w-derivative of the main function.\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n beta = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n dfdw = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[y_pos - 1][z_pos - 1].derivativeX(w, x)\n + (1 - alpha)\n * beta\n * self.wxInterpolators[y_pos - 1][z_pos].derivativeX(w, x)\n + alpha\n * (1 - beta)\n * self.wxInterpolators[y_pos][z_pos - 1].derivativeX(w, x)\n + alpha * beta * self.wxInterpolators[y_pos][z_pos].derivativeX(w, x)\n )\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdw = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n beta = (z[c] - self.z_list[j - 1]) / (\n self.z_list[j] - self.z_list[j - 1]\n )\n dfdw[c] = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[i - 1][j - 1].derivativeX(w[c], x[c])\n + (1 - alpha)\n * beta\n * self.wxInterpolators[i - 1][j].derivativeX(w[c], x[c])\n + alpha\n * (1 - beta)\n * self.wxInterpolators[i][j - 1].derivativeX(w[c], x[c])\n + alpha\n * beta\n * self.wxInterpolators[i][j].derivativeX(w[c], x[c])\n )\n return dfdw", "def delta(self) -> float:\n return self._delta", "def calcDeltaHiddenLayer(self, WeightedDelta):\r\n return self.prevZ*(1.0-self.prevZ)*(WeightedDelta)", "def calcDeltaHiddenLayer(self, WeightedDelta):\r\n return self.prevZ*(1.0-self.prevZ)*(WeightedDelta)", "def delta(self):\n return (self._stages[EStage.CURRENT] - self._stages[EStage.START]) \\\n / (self._stages[EStage.END] - self._stages[EStage.START])", "def _derW(self, w, x, y, z):\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n gamma = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n dfdw = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos - 1]._der(w)\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos]._der(w)\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos][z_pos - 1]._der(w)\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos][z_pos]._der(w)\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos - 1]._der(w)\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos][y_pos - 1][z_pos]._der(w)\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos][z_pos - 1]._der(w)\n + alpha\n * beta\n * gamma\n * self.wInterpolators[x_pos][y_pos][z_pos]._der(w)\n )\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list, x)\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdw = np.zeros(m) + np.nan\n for i in range(1, self.x_n):\n for j in range(1, self.y_n):\n for k in range(1, self.z_n):\n c = np.logical_and(\n np.logical_and(i == x_pos, j == y_pos), k == z_pos\n )\n if np.any(c):\n alpha = (x[c] - self.x_list[i - 1]) / (\n self.x_list[i] - self.x_list[i - 1]\n )\n beta = (y[c] - self.y_list[j - 1]) / (\n self.y_list[j] - self.y_list[j - 1]\n )\n gamma = (z[c] - self.z_list[k - 1]) / (\n self.z_list[k] - self.z_list[k - 1]\n )\n dfdw[c] = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i - 1][j - 1][k - 1]._der(w[c])\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[i - 1][j - 1][k]._der(w[c])\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[i - 1][j][k - 1]._der(w[c])\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[i - 1][j][k]._der(w[c])\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i][j - 1][k - 1]._der(w[c])\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[i][j - 1][k]._der(w[c])\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[i][j][k - 1]._der(w[c])\n + alpha\n * beta\n * gamma\n * self.wInterpolators[i][j][k]._der(w[c])\n )\n return dfdw", "def calculate_delta(self):\n rho_des_index, distance, data_size = self.rho_des_index, self.distance, self.data_size\n self.result[rho_des_index[0]][1] = -1\n for i in range(1, data_size):\n for j in range(0, i):\n old_i, old_j = rho_des_index[i], rho_des_index[j]\n min_pos, max_pos = min(old_j, old_i), max(old_j, old_i)\n if distance[(min_pos, max_pos)] < self.result[old_i][1]:\n self.result[old_i][1] = distance[(min_pos, max_pos)]\n self.master[old_i] = old_j\n self.result[rho_des_index[0]][1] = max(self.result[:, 1])", "def find_delta(w, bw):\n maxabs_w = np.max(np.abs(w.d)) + np.finfo(np.float32).eps\n\n if bw > 4:\n return 2**(np.ceil(np.log2(maxabs_w/(2**(bw-1)-1))))\n else:\n return 2**(np.floor(np.log2(maxabs_w/(2**(bw-1)-1))))", "def W(self):\n if not self.isVaild():\n pass\n return self.Wq() + 1.0/self.muy", "def update_delta_field(self, static, moving):\n if not self.setup_called:\n self.setup(static,moving)\n\n self.delta = (static - moving)", "def get_delta(self, index):\n return self.function[index] - self.function[index-1]", "def ou_change(self,dt,mu,L,delta):\n\n dW = self.bm_change(dt=dt,delta=delta)\n ds = L * (mu - self.Coord) * dt + dW\n return ds", "def delta(z, a, y):\n\t\treturn np.subtract(a, y) * sigmoid_derivative(z) ######## A MODIFIER", "def w_DE(self, z):\n return self.w0+self.wa*z/(1.+z)", "def bm_change(self,dt,delta):\n change = norm.rvs(loc=0,size=1,scale=delta**2*dt)\n return change", "def define_ufl_equations_diff(self):\n\n # Derivatives of velocity integration equation.\n if self.f1 != 0:\n self.df1_du = dlf.derivative(self.f1, self.displacement, self.trial_vector)\n self.df1_dv = dlf.derivative(self.f1, self.velocity, self.trial_vector)\n else:\n self.df1_du = 0\n self.df1_dv = 0\n self.df1_dp = 0 # This is always zero.\n\n # Derivatives of momentum equation.\n if self.displacement != 0:\n self.df2_du = dlf.derivative(self.f2, self.displacement, self.trial_vector)\n else:\n self.df2_du = 0\n\n if self.velocity != 0:\n self.df2_dv = dlf.derivative(self.f2, self.velocity, self.trial_vector)\n else:\n self.df2_dv = 0\n\n if self.pressure != 0:\n self.df2_dp = dlf.derivative(self.f2, self.pressure, self.trial_scalar)\n else:\n self.df2_dp = 0\n\n # Derivatives of incompressibility equation.\n if self.f3 != 0:\n if self.displacement != 0:\n self.df3_du = dlf.derivative(self.f3, self.displacement, self.trial_vector)\n else:\n self.df3_du = 0\n\n if self.velocity != 0:\n self.df3_dv = dlf.derivative(self.f3, self.velocity, self.trial_vector)\n else:\n self.df3_dv = 0\n\n self.df3_dp = dlf.derivative(self.f3, self.pressure, self.trial_scalar)\n else:\n self.df3_du = 0\n self.df3_dv = 0\n self.df3_dp = 0\n\n return None", "def select_delta(self, dist_post_update, current_iteration):\n if current_iteration == 1:\n delta = 0.1 * (self.clip_max - self.clip_min)\n else:\n if self.constraint == \"l2\":\n delta = np.sqrt(self.d) * self.theta * dist_post_update\n elif self.constraint == \"linf\":\n delta = self.d * self.theta * dist_post_update\n\n return delta", "def Delta(z):\n return (18*np.pi**2 - 82*cosmology.Ode(z) - 39*cosmology.Ode(z)**2) / cosmology.Om(z)", "def w_to_d(self, wx, wy):\r\n dx = (wx - self.wxmin) * self.xscale + self.dxmin\r\n dy = (wy - self.wymin) * self.yscale + self.dymin\r\n return dx, dy", "def _derW(self, w, x, y, z):\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list, w), self.w_n - 1), 1)\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n w_pos = self.wSearchFunc(self.w_list, w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n - 1] = self.w_n - 1\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n beta = (x - self.x_list[j - 1]) / (self.x_list[j] - self.x_list[j - 1])\n gamma = (y - self.y_list[k - 1]) / (self.y_list[k] - self.y_list[k - 1])\n delta = (z - self.z_list[l - 1]) / (self.z_list[l] - self.z_list[l - 1])\n dfdw = (\n (\n (1 - beta)\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i, j - 1, k - 1, l - 1]\n + (1 - beta) * (1 - gamma) * delta * self.f_values[i, j - 1, k - 1, l]\n + (1 - beta) * gamma * (1 - delta) * self.f_values[i, j - 1, k, l - 1]\n + (1 - beta) * gamma * delta * self.f_values[i, j - 1, k, l]\n + beta * (1 - gamma) * (1 - delta) * self.f_values[i, j, k - 1, l - 1]\n + beta * (1 - gamma) * delta * self.f_values[i, j, k - 1, l]\n + beta * gamma * (1 - delta) * self.f_values[i, j, k, l - 1]\n + beta * gamma * delta * self.f_values[i, j, k, l]\n )\n - (\n (1 - beta)\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i - 1, j - 1, k - 1, l - 1]\n + (1 - beta)\n * (1 - gamma)\n * delta\n * self.f_values[i - 1, j - 1, k - 1, l]\n + (1 - beta)\n * gamma\n * (1 - delta)\n * self.f_values[i - 1, j - 1, k, l - 1]\n + (1 - beta) * gamma * delta * self.f_values[i - 1, j - 1, k, l]\n + beta\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i - 1, j, k - 1, l - 1]\n + beta * (1 - gamma) * delta * self.f_values[i - 1, j, k - 1, l]\n + beta * gamma * (1 - delta) * self.f_values[i - 1, j, k, l - 1]\n + beta * gamma * delta * self.f_values[i - 1, j, k, l]\n )\n ) / (self.w_list[i] - self.w_list[i - 1])\n return dfdw", "def delta_func(self, st):\n res0 = st._state['visible']['reserve'][0]\n res1 = st._state['visible']['reserve'][1]\n number = st._state['visible']['number']\n if st._state['visible']['turn'] is 0:\n delta = res0-res1\n else:\n delta = res1-res0\n return number, delta", "def calculate_delta(self, a, label, layer):\n diff = a - label\n if self.loss == 'mse':\n delta = diff * layer.activation_derivative(layer.z)\n layer.delta = delta\n elif self.loss == 'ce':\n delta = diff\n else:\n raise ValueError('delta for this loss function is not implemented')\n return delta", "def Gradient(Walker,particle):\n\n h=0.001\n dPsi = zeros(shape=shape(Walker.Re[particle]))\n for i in range(Walker.sys_dim):\n Y=Walker.Re[particle][i]\n Walker.Re[particle][i]-=h\n wfs1=wfs(Walker)\n Walker.Re[particle][i]+=2.0*h\n wfs2=wfs(Walker)\n dPsi[i] = (wfs2-wfs1)/2/h\n Walker.Re[particle][i]=Y\n\n return dPsi", "def get_delta(k, c=1.0874, s=1.0187):\n delta_max = k*k * (c + s*k)\n return 0.84 * delta_max", "def test_delta_i_output_multi(self):\n\n # Current node\n n = Node(value=0.993)\n\n # K-layer nodes\n k1 = Node(value=0.767)\n k1.delta = 0.021\n k1.weights = [0.6, 0.6]\n\n k2 = Node(value=0.767)\n k2.delta = 0.021\n k2.weights = [0.6, 0.6]\n\n # K-layer\n k = Layer(0)\n k.nodes.append(k1)\n k.nodes.append(k2)\n\n self.assertEqual(round(n._delta_i(k, 0), 6), 0.000175)", "def test_liemdframe_calc_deltaE(self):\n\n self.mdframe.calc_deltaE()\n self.assertTrue({'vdw_1', 'coul_1', 'vdw_2', 'coul_2', 'vdw_3',\n 'coul_3', 'vdw_4', 'coul_4', 'vdw_5', 'coul_5'}.issubset(set(self.mdframe.columns)))\n self.assertEqual(self.mdframe['vdw_1'].count(), 1000)", "def ke(self):\n self._obj['w'] = (self._obj['u'])**2 + (self._obj['v'])**2\n\n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append(f'({vel_units})^2')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = (f'({vel_units})^2')\n return self._obj", "def _delta(self, output, err, neuron):\n return neuron._g_prime(output) * err", "def _derW(self, w, x, y, z):\n raise NotImplementedError()", "def prop(self, delta: float) -> tuple[float, float, float, float, float]:\n mean_motion, ecc, raan, argp, nu = \\\n MathCore.coe_update(delta, self.mean_motion, self.ndot2, self.ecc, self.eccdot, self.raan,\n self.raandot, self.argp, self.argpdot, self.mean_anom)\n\n return mean_motion, ecc, raan, argp, nu", "def define_ufl_stress_work_diff(self):\n\n if hasattr(self, 'ufl_stress_work_diff'):\n return None\n\n if self.displacement != 0:\n # Derivative of stress term w.r.t. to displacement.\n self.ufl_stress_work_du = dlf.derivative(self.ufl_stress_work,\n self.displacement,\n self.trial_vector)\n else:\n self.ufl_stress_work_du = 0\n\n if self.velocity != 0:\n self.ufl_stress_work_dv = dlf.derivative(self.ufl_stress_work,\n self.velocity,\n self.trial_vector)\n else:\n self.ufl_stress_work_dv = 0\n\n if self.pressure != 0:\n self.ufl_stress_work_dp = dlf.derivative(self.ufl_stress_work,\n self.pressure,\n self.trial_scalar)\n else:\n self.ufl_stress_work_dp = 0\n\n return None", "def test_delta_i_output_single(self):\n\n # Current node\n n = Node(value=0.767)\n\n # K-layer node\n k1 = Node(value=0.823)\n k1.delta = 0.120\n k1.weights = [0.2, 1.0]\n\n # K-layer\n k = Layer(0)\n k.nodes.append(k1)\n\n self.assertEqual(round(n._delta_i(k, 1), 3), 0.021)", "def deltaW(N, m, h):\n return np.random.normal(0., np.sqrt(h), (N, m))\n # return levy.rvs(0., 1e-11, (N, m))+np.random.normal(0., np.sqrt(h), (N, m)) #levy distribution\n # return cauchy.rvs(0., 1e-4, (N, m)) #Cauchy distribution", "def acW(self):\n return self.fuselageLength * self.posFraction", "def derivativeW(self, *args):\n if self.n_dims >= 4:\n j = 0\n else:\n assert (\n False\n ), \"Derivative with respect to W can't be called when n_dims < 4!\"\n if self.i_dim == j:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])", "def calcDelta(self, energy1, energy2):\n \n return math.fabs(energy2-energy1)", "def computeW(self):\n E = np.where(self.v > 0, 1, -1)\n # theshold the connections to only -1,1\n binary_weights = np.where(self.c > 0, 1, self.c)\n binary_weights = np.where(binary_weights < 0, -1, binary_weights)\n W = np.sum(binary_weights * np.dot(E.reshape(-1,1), E.reshape(1,-1))) # W = C * E * E\n self.W = W\n if np.sum(binary_weights) != 0:\n self.W = self.W / np.sum(binary_weights) # W / W*\n return self.W", "def weight_update(u_ff, u_wc, alpha, beta, w, fan_all):\r\n mult_wc = np.matmul(np.reshape(hard_sigmoid_array(u_wc), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_wc), (1, fan_all)))\r\n mult_ff = np.matmul(np.reshape(hard_sigmoid_array(u_ff), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_ff), (1, fan_all)))\r\n delta_w = alpha * (1 / beta) * (mult_wc - mult_ff)\r\n delta_w[np.diag_indices(fan_all)] = 0\r\n w = w + delta_w\r\n return w", "def calculate_w(dataframe_row, prev_w, prev_y, df_len):\n vector_w = prev_w + (prev_y / df_len * (dataframe_row - prev_y * prev_w))\n norm_vector_w = vector_w / np.linalg.norm(vector_w)\n return norm_vector_w.to_numpy()", "def update_weights(self):\n activation, activation_p = self.feedforward()\n # initialize delta_weights\n delta_w = np.zeros(2)\n # simultaneous calculate delta_weights\n for i, element in enumerate(self.y):\n delta_w += (activation[i]-element)*(activation_p[i])*self.X[i]\n # update weight\n self.weights -= self.alfa*delta_w", "def __call__(self, w):\n l1_term = self.alpha * np.linalg.norm(w, 1)\n l2_term = self.alpha * 0.5 * np.linalg.norm(w, 2)\n\n return self.r * l1_term + (1 - self.r) * l2_term", "def update(self, final_delta = None):\n l = len(self.derivatives)\n\n if final_delta:\n #self.derivatives[ l - 1 ] += final_delta NOTE: not supported in CodeSkulptor\n self.derivatives[ l - 1 ] = self.derivatives[ l - 1 ] + final_delta\n\n for i in range(l - 2, -1, -1):\n #self.derivatives[ i ] += self.derivatives[ i + 1 ] NOTE: not supported in CodeSkulptor\n self.derivatives[ i ] = self.derivatives[ i + 1 ] + self.derivatives[ i ]", "def calc_net_generation_wind (self):\n self.net_generation_wind = self.generation_wind_proposed - \\\n self.transmission_losses -\\\n self.excess_energy\n #~ print 'self.net_generation_wind',self.net_generation_wind", "def delta_e_76(lab1, lab2):\n\n l1, a1, b1 = lab1\n l2, a2, b2 = lab2\n return (l1 - l2) ** 2 + (a1 - a2) ** 2 + (b1 - b2) ** 2", "def psi_wf(self, vw, d1, d2, ns, tl):\n\t osmotic = (R*299./VW)*np.log((((vw/self.ZW)*self.ZW)/(VW))/((((vw/self.ZW)*self.ZW)/(VW))+ns))/10**6 #MPa\n\t turgor = ((vw/self.ZW) - d1)**d2#MPa\n\t return turgor+osmotic #MPa ", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def delta_P(P_old,P_new):\n delta = 0\n\n n = P_old.shape[0]\n\n for i in range(n):\n for j in range(n):\n delta += (P_old[i,j] - P_new[i,j])**2\n\n return (delta / 4.)**(0.5)", "def calcUpdateByRows(self, rows):\n\n delta_w, delta_hb, delta_vb = \\\n zeros((self.rbm.visibleDim, self.rbm.hiddenDim)), \\\n zeros(self.rbm.hiddenDim), zeros(self.rbm.visibleDim)\n\n for row in rows:\n dw, dhb, dvb = self.calcUpdateByRow(row)\n delta_w += dw\n delta_hb += dhb\n delta_vb += dvb\n\n delta_w /= len(rows)\n delta_hb /= len(rows)\n delta_vb /= len(rows)\n\n # !!! note that this delta is only the 'theoretical' delta\n return delta_w, delta_hb, delta_vb", "def equalize_wp_delta(waypoints, delta_wp=0.5):\n dist = np.zeros(waypoints.shape[0])\n dist_vector = np.sum((waypoints[1:]\n - waypoints[:-1])**2, axis=1)**0.5\n dist[1:] = np.cumsum(dist_vector)\n\n xa, xb = np.zeros((dist.size, 2)), np.zeros((dist.size, 2))\n for j in range(dist.size - 1):\n xa[j, :] = np.matmul(np.linalg.inv([[dist[j], 1], [dist[j + 1], 1]]),\n [waypoints[j, 0], waypoints[j+1, 0]])\n xb[j, :] = np.matmul(np.linalg.inv([[dist[j], 1], [dist[j + 1], 1]]),\n [waypoints[j, 1], waypoints[j+1, 1]])\n\n d = np.arange(dist[0], dist[-1], delta_wp)\n new_wp = np.zeros((d.size, 2))\n\n k = 0\n for i, di in enumerate(d):\n while di > dist[k]:\n k += 1\n x = xa[k, 0]*di + xa[k, 1]\n y = xb[k, 0]*di + xb[k, 1]\n new_wp[i, :] = [x, y]\n\n return new_wp[:-1]", "def diff(self, delta: int | float | pd.Timedelta) -> Stairs:\n return self - self.shift(delta)", "def diff_fn(\n mu_i: tf.Tensor,\n ddu_n_i: tf.Tensor,\n ddu_t_i: tf.Tensor,\n ) -> tf.Tensor:\n return mu_i * (4.0 / 3.0 * ddu_n_i + 1.0 / 3.0 * ddu_t_i)", "def set_wdiff(self):\n try:\n self.wdiff=self.mdiff*self.ws.coef[1]\n except:\n self.wdiff=self.mdiff", "def adapt(self, d, x):\n y = np.dot(self.w, x)\n e = d - y\n self.w += self.mu * e * x\n return y, e", "def _get_gradient(self, delta=0.01) -> (float, float, float):\n mse_before = _get_mse(deepcopy(self._controller), len(self._ts), self._dt)\n\n dp_controller = PIDController(deepcopy(self._sim), self._setpoint, self._controller.kp + delta, self._controller.ki, self._controller.kd)\n dp = _get_mse(dp_controller, len(self._ts), self._dt) - mse_before\n\n di_controller = PIDController(deepcopy(self._sim), self._setpoint, self._controller.kp, self._controller.ki + delta, self._controller.kd)\n di = _get_mse(di_controller, len(self._ts), self._dt) - mse_before\n\n dd_controller = PIDController(deepcopy(self._sim), self._setpoint, self._controller.kp, self._controller.ki, self._controller.kd + delta)\n dd = _get_mse(dd_controller, len(self._ts), self._dt) - mse_before\n\n return (dp/delta, di/delta, dd/delta)", "def _update_w(self, idx):\n self.w = ((self._w - 0.4) * (self._generations - idx)) /\\\n (self._generations + 0.4)", "def get_up(t_, w_):\n return t_ - tf.constant(w_)", "def KroDelta(a,b):\n \n if (a==b):\n return 1\n else:\n return 0", "def calculate(self):\n\n fx_r = self.fx + self.r\n fy_r = self.fy\n gx_r = self.gx + self.r\n gy_r = self.gy\n\n fg_dx = np.linspace(self.fx[0], gx_r[-1], self.N)\n ff_dx = np.linspace(self.fx[0], fx_r[-1], self.N)\n gg_dx = np.linspace(self.gx[0], gx_r[-1], self.N)\n\n xCorrfg = integrate.trapz(self.fy*gy_r, fg_dx)\n aCorrff = integrate.trapz(self.fy*fy_r, ff_dx)\n aCorrgg = integrate.trapz(self.gy*gy_r, gg_dx)\n\n xCorrfg_w = integrate.trapz(self.w*xCorrfg, self.r)\n aCorrff_w = integrate.trapz(self.w*aCorrff, self.r)\n aCorrgg_w = integrate.trapz(self.w*aCorrgg, self.r)\n \n return xCorrfg_w / np.sqrt(aCorrff_w * aCorrgg_w)", "def butter_lp_ord(wp, ws, deltap, deltas, fs=1):\n r = ((1/deltas)**2 - 1) / ((1/(1-deltap))**2 - 1)\n print(r)\n t = mp.tan(mp.pi*ws/fs)/mp.tan(mp.pi*wp/fs)\n print(t)\n n = mp.log(r) / (2*mp.log(t))\n return n", "def get_p_weights(self, delta, batch, batch_ixs):\n # if self.is_rnn:\n # s = s[:, -1]\n if self.training_params.prioritize:\n p_total = self.replay.tree[0].sum\n p = np.array([h.delta for h in batch]) / p_total\n w = 1. / p\n w /= np.max(w)\n for ix, h, d in zip(batch_ixs, batch, delta):\n h.delta = np.nan_to_num(np.abs(d)) # catch nans\n self.replay.update(ix)\n else:\n w = np.ones(len(batch))\n return w", "def compute_deltas(self, target):\n self.deltas[-1] = self.zetas[-1] - target\n for i in reversed(range(self.n_layers-1)):\n self.deltas[i] = np.multiply(np.dot(self.deltas[i+1], self.weights[i+1].T),\n self.activation_fdx(self.activations[i]))\n return self.deltas", "def updateW(self, trj_Sp_theta, W_0):\n def fun(x):\n global trj_Sp_theta_z\n #W_0 = [[x[0], x[1]], [x[2], x[3]], [x[4], x[5]], [x[6], x[7]]] # sin cos\n W_0 = [[x[0], x[1]],[x[2], x[3]]] # with dir\n #W_0 = x\n r_0 = self.reward_trj(trj_Sp_theta, W_0) \n return -1*r_0 \n import numpy as np\n from scipy.optimize import minimize\n \n global trj_Sp_theta_z \n trj_Sp_theta_z = trj_Sp_theta\n alpha = 0.005\n alpha = 0.1\n delta = alpha\n cons = ({'type': 'eq',\n 'fun' : lambda x: np.array([np.sum(x)-1])},\n {'type': 'ineq',\n 'fun' : lambda x: np.array([np.min(x)])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[0]-x0[0])+delta])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[1]-x0[1])+delta])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[2]-x0[2])+delta])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[3]-x0[3])+delta])}) # greater than zero\n\n #x0 = W_0\n x0 = [W_0[0][0], W_0[0][1], W_0[1][0], W_0[1][1]] # with dir\n res = minimize(fun, x0, constraints=cons)\n x = res.x\n W = [[x[0], x[1]],[x[2], x[3]]] # with dir\n return W" ]
[ "0.7014136", "0.67604864", "0.67501944", "0.6740648", "0.6670336", "0.65870315", "0.6544045", "0.6426848", "0.6362435", "0.6287381", "0.62232274", "0.6205806", "0.6161899", "0.6146763", "0.6138199", "0.6127331", "0.6109769", "0.60939455", "0.60689414", "0.6068591", "0.6068253", "0.6063533", "0.60500026", "0.60496473", "0.60162145", "0.6010421", "0.6001335", "0.5991094", "0.59758884", "0.59758884", "0.59758884", "0.59758884", "0.59694266", "0.595667", "0.59465915", "0.5938026", "0.59238124", "0.5923655", "0.59214437", "0.58999866", "0.5897762", "0.5886341", "0.5886341", "0.5882236", "0.5879578", "0.58768475", "0.5855937", "0.5846813", "0.58316725", "0.5824389", "0.5801482", "0.5781898", "0.57760394", "0.5755953", "0.57469636", "0.57463086", "0.57447743", "0.5743687", "0.5740998", "0.57308245", "0.5728683", "0.5711692", "0.57084006", "0.56998324", "0.5696935", "0.56879914", "0.56802535", "0.567846", "0.56624913", "0.56611097", "0.565975", "0.5649975", "0.56494653", "0.56324154", "0.5629486", "0.56290096", "0.5628914", "0.56225437", "0.5601193", "0.55979383", "0.5595541", "0.55884147", "0.55872273", "0.5565695", "0.55654067", "0.55618775", "0.5555242", "0.5554495", "0.55508816", "0.5549449", "0.5542557", "0.55418", "0.5532093", "0.5529588", "0.5522384", "0.55171293", "0.5514769", "0.55111194", "0.55107653", "0.54986644", "0.5496654" ]
0.0
-1
once you have "Delta_w", it makes $ w < w + Delta_w
def updateweigths(): for i_lay in range(1, len(layers)): layers[i_lay]["weigths"] += layers[i_lay]["Delta_w"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optimise(w, w_delta):\n return w.assign(w - w_delta)", "def update_before(self, x: int, y: float, w: float) -> None:\n old_value = 0\n if x < len(self.x):\n old_value = self.y[x]\n self.update(x + 1, y, w)\n while len(self.x) < x:\n self.x.append(len(self.x))\n self.y.append(y)\n self.w.append(w)\n if self.w[x] <= w:\n self.x[x] = x\n self.y[x] = y\n self.w[x] = w\n pl = x - 1\n while pl >= 0 and (self.y[pl] == 0 or (self.w[pl] <= w and self.y[pl] == old_value)):\n self.y[pl] = y\n self.w[pl] = w\n pl -= 1", "def step(self, delta_l11, delta_l12, delta_l13, delta_l21, delta_l22, delta_l23):\n self.l11 += delta_l11; self.l12 += delta_l12; self.l13 += delta_l13\n self.l21 += delta_l11; self.l22 += delta_l12; self.l23 += delta_l13\n self.l21 += delta_l21; self.l22 += delta_l22; self.l23 += delta_l23\n # check that all tendon lenghts are within limit\n self.l11 = self.l1min if self.l11 < self.l1min else self.l11\n self.l12 = self.l1min if self.l12 < self.l1min else self.l12\n self.l13 = self.l1min if self.l13 < self.l1min else self.l13\n self.l11 = self.l1max if self.l11 > self.l1max else self.l11\n self.l12 = self.l1max if self.l12 > self.l1max else self.l12\n self.l13 = self.l1max if self.l13 > self.l1max else self.l13\n self.l21 = self.l2min if self.l21 < self.l2min else self.l21\n self.l22 = self.l2min if self.l22 < self.l2min else self.l22\n self.l23 = self.l2min if self.l23 < self.l2min else self.l23\n self.l21 = self.l2max if self.l21 > self.l2max else self.l21\n self.l22 = self.l2max if self.l22 > self.l2max else self.l22\n self.l23 = self.l2max if self.l23 > self.l2max else self.l23\n old_tip_vec = self.tip_vec2 # used for potential reward\n self.update_variables()\n new_tip_vec = self.tip_vec2 # used for potential reward\n reward = self.r_static\n return reward", "def downwashGradW(self):\n A = self.r / (self.r**2 + self.mTV**2)\n B = 0.4876 / (sqrt(self.r**2 + 0.6319 + self.mTV**2))\n C = 1 + (self.r**2 / (self.r**2 + 0.7915 + 5.0734 * self.mTV**2))**0.3113\n D = 1 - sqrt(self.mTV**2 / (1 + self.mTV**2))\n return self.Kepsilon * (A * B + C * D) * self.clAlphaW / (pi * self.aspectRatioW)", "def update(self, x: int, y: float, w: float) -> None:\n while len(self.x) < x - 1:\n self.x.append(len(self.x))\n self.y.append(y)\n self.w.append(w)\n if self.w[x - 1] <= w:\n self.x[x - 1] = x\n self.y[x - 1] = y\n self.w[x - 1] = w", "def backward_pass(self, delta):\r\n self.d_x = np.dot(delta, self.w.T)\r\n self.d_b = np.matmul(np.ones((1, delta.shape[0])), delta)\r\n self.d_w = np.dot(self.x.T, delta)\r\n return self.d_x", "def w_win(self, diff, draw_margin):\n x = diff - draw_margin\n v = self.v_win(diff, draw_margin)\n w = v * (v + x)\n if 0 < w < 1:\n return w\n raise _floating_point_error(self)", "def _update_w(self, idx):\n self.w = ((self._w - 0.4) * (self._generations - idx)) /\\\n (self._generations + 0.4)", "def new_w(w, d):\n\n if w.sum() > 0:\n next_w = w.copy()\n next_w[next_w > 0] -= 1\n return next_w\n else:\n if d[0] == 1:\n return np.array([51,0,0])\n elif d[1] == 1:\n return np.array([0,51,0])\n else:\n return np.array([0,0,51])", "def delta(self):\n return (self.upper-self.lower) / float(self.num_cells)", "def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4", "def delta(self) -> None:", "def epsilon_delta(self):", "def KroDelta(a,b):\n \n if (a==b):\n return 1\n else:\n return 0", "def delta_w(w_k, x, t, learning_rate):\n return -learning_rate * np.mean(gradient(w_k, x, t))", "def expose(self, w):\n # Compute the weighted sum of the firing inputs\n s = self.strength[list(w.offset)].sum()\n if self.training:\n return s >= self.H\n else:\n return s >= self.H*self.G", "def backward(self, delta_W_next):\n delta = delta_W_next * self._act.a_prime(self._z)\n delta_W = np.dot(delta, self._W.T)\n grad_w = np.dot(self._X.T, delta)\n grad_b = np.array(([np.sum(delta, axis=0)]))\n return grad_w, grad_b, delta_W", "def ccw(a, b, c):\n return (c.y - a.y) * (b.x - a.x) > (b.y - a.y) * (c.x - a.x)", "def equalize_wp_delta(waypoints, delta_wp=0.5):\n dist = np.zeros(waypoints.shape[0])\n dist_vector = np.sum((waypoints[1:]\n - waypoints[:-1])**2, axis=1)**0.5\n dist[1:] = np.cumsum(dist_vector)\n\n xa, xb = np.zeros((dist.size, 2)), np.zeros((dist.size, 2))\n for j in range(dist.size - 1):\n xa[j, :] = np.matmul(np.linalg.inv([[dist[j], 1], [dist[j + 1], 1]]),\n [waypoints[j, 0], waypoints[j+1, 0]])\n xb[j, :] = np.matmul(np.linalg.inv([[dist[j], 1], [dist[j + 1], 1]]),\n [waypoints[j, 1], waypoints[j+1, 1]])\n\n d = np.arange(dist[0], dist[-1], delta_wp)\n new_wp = np.zeros((d.size, 2))\n\n k = 0\n for i, di in enumerate(d):\n while di > dist[k]:\n k += 1\n x = xa[k, 0]*di + xa[k, 1]\n y = xb[k, 0]*di + xb[k, 1]\n new_wp[i, :] = [x, y]\n\n return new_wp[:-1]", "def find_delta(w, bw):\n maxabs_w = np.max(np.abs(w.d)) + np.finfo(np.float32).eps\n\n if bw > 4:\n return 2**(np.ceil(np.log2(maxabs_w/(2**(bw-1)-1))))\n else:\n return 2**(np.floor(np.log2(maxabs_w/(2**(bw-1)-1))))", "def weight_update(u_ff, u_wc, alpha, beta, w, fan_all):\r\n mult_wc = np.matmul(np.reshape(hard_sigmoid_array(u_wc), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_wc), (1, fan_all)))\r\n mult_ff = np.matmul(np.reshape(hard_sigmoid_array(u_ff), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_ff), (1, fan_all)))\r\n delta_w = alpha * (1 / beta) * (mult_wc - mult_ff)\r\n delta_w[np.diag_indices(fan_all)] = 0\r\n w = w + delta_w\r\n return w", "def diffEquation(self): \n self.posn_x += self.velocity_x * time_scaling \n self.velocity_y = self.velocity_y + GRAVITY # a crude equation incorporating gravity. \n self.posn_y += self.velocity_y * time_scaling \n canvas_1.create_oval( self.posn_x, self.posn_y, self.posn_x + self.ball_width, \n self.posn_y + self.ball_height, fill= self.color) \n self.detectWallCollision() # Has the ball collided with any container wall? ", "def update_recurrent_weights_step(self):\n \n # update weights: hebbian term\n self.delta_Wee=self.learn_rate*(self.rr[0:self.N_e]-self.input_mean)*\\\n (self.rr[0:self.N_e].T-self.input_mean)\n \n self.W_ee+=self.dt*self.delta_Wee\n\n # update weights: normalize to fixed mean of incoming and outgoing weights\n self.W_ee-=(self.W_ee.mean(axis=1)-self.W_av_star)[:,np.newaxis]\n self.W_ee-=(self.W_ee.mean(axis=0)-self.W_av_star)[np.newaxis,:]\n \n # clip weights \n self.W_ee=np.clip(self.W_ee,0,self.W_max_ee)\n \n # update excitatory weights in the big weight matrix\n self.W[:self.N_e,:self.N_e]=self.W_ee", "def get_up(t_, w_):\n return t_ - tf.constant(w_)", "def _update_weights(self, alpha, delta):\n res = []\n for j, weight in enumerate(self._weights):\n self._weights[j] = weight + (alpha * delta * self._g_prime(self._in_j))\n #print(\"Prev weight: {} New weight: {}\".format(weight, self._weights[j]))\n res.append(self._weights[j] - weight)\n return res[0]", "def computeW(self):\n E = np.where(self.v > 0, 1, -1)\n # theshold the connections to only -1,1\n binary_weights = np.where(self.c > 0, 1, self.c)\n binary_weights = np.where(binary_weights < 0, -1, binary_weights)\n W = np.sum(binary_weights * np.dot(E.reshape(-1,1), E.reshape(1,-1))) # W = C * E * E\n self.W = W\n if np.sum(binary_weights) != 0:\n self.W = self.W / np.sum(binary_weights) # W / W*\n return self.W", "def updateW(self, trj_Sp_theta, W_0):\n def fun(x):\n global trj_Sp_theta_z\n #W_0 = [[x[0], x[1]], [x[2], x[3]], [x[4], x[5]], [x[6], x[7]]] # sin cos\n W_0 = [[x[0], x[1]],[x[2], x[3]]] # with dir\n #W_0 = x\n r_0 = self.reward_trj(trj_Sp_theta, W_0) \n return -1*r_0 \n import numpy as np\n from scipy.optimize import minimize\n \n global trj_Sp_theta_z \n trj_Sp_theta_z = trj_Sp_theta\n alpha = 0.005\n alpha = 0.1\n delta = alpha\n cons = ({'type': 'eq',\n 'fun' : lambda x: np.array([np.sum(x)-1])},\n {'type': 'ineq',\n 'fun' : lambda x: np.array([np.min(x)])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[0]-x0[0])+delta])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[1]-x0[1])+delta])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[2]-x0[2])+delta])}, # greater than zero\n {'type': 'ineq',\n 'fun' : lambda x: np.array([-np.abs(x[3]-x0[3])+delta])}) # greater than zero\n\n #x0 = W_0\n x0 = [W_0[0][0], W_0[0][1], W_0[1][0], W_0[1][1]] # with dir\n res = minimize(fun, x0, constraints=cons)\n x = res.x\n W = [[x[0], x[1]],[x[2], x[3]]] # with dir\n return W", "def sweep25W(self):\n return 28.8", "def test_simplify_delta_of_two_ranges(free_alg):\n\n dr = free_alg\n p = dr.names\n tensor = dr.sum(KroneckerDelta(p.i, p.alpha) * p.v)\n assert tensor.n_terms == 1\n assert tensor.simplify_deltas() == 0\n assert tensor.simplify() == 0", "def unsafe(self): \n return self.distmin < self.distmax*0.5", "def _derW(self, w, x, y, z):\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list, w), self.w_n - 1), 1)\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n w_pos = self.wSearchFunc(self.w_list, w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n - 1] = self.w_n - 1\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n beta = (x - self.x_list[j - 1]) / (self.x_list[j] - self.x_list[j - 1])\n gamma = (y - self.y_list[k - 1]) / (self.y_list[k] - self.y_list[k - 1])\n delta = (z - self.z_list[l - 1]) / (self.z_list[l] - self.z_list[l - 1])\n dfdw = (\n (\n (1 - beta)\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i, j - 1, k - 1, l - 1]\n + (1 - beta) * (1 - gamma) * delta * self.f_values[i, j - 1, k - 1, l]\n + (1 - beta) * gamma * (1 - delta) * self.f_values[i, j - 1, k, l - 1]\n + (1 - beta) * gamma * delta * self.f_values[i, j - 1, k, l]\n + beta * (1 - gamma) * (1 - delta) * self.f_values[i, j, k - 1, l - 1]\n + beta * (1 - gamma) * delta * self.f_values[i, j, k - 1, l]\n + beta * gamma * (1 - delta) * self.f_values[i, j, k, l - 1]\n + beta * gamma * delta * self.f_values[i, j, k, l]\n )\n - (\n (1 - beta)\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i - 1, j - 1, k - 1, l - 1]\n + (1 - beta)\n * (1 - gamma)\n * delta\n * self.f_values[i - 1, j - 1, k - 1, l]\n + (1 - beta)\n * gamma\n * (1 - delta)\n * self.f_values[i - 1, j - 1, k, l - 1]\n + (1 - beta) * gamma * delta * self.f_values[i - 1, j - 1, k, l]\n + beta\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i - 1, j, k - 1, l - 1]\n + beta * (1 - gamma) * delta * self.f_values[i - 1, j, k - 1, l]\n + beta * gamma * (1 - delta) * self.f_values[i - 1, j, k, l - 1]\n + beta * gamma * delta * self.f_values[i - 1, j, k, l]\n )\n ) / (self.w_list[i] - self.w_list[i - 1])\n return dfdw", "def eq(w, x):\n return (-w[1]*x - w[0]) / w[2]", "def test_W_end(self):\t\t\n self.assertAlmostEqual(attempt.W[-1], 9.494852380803035)", "def increase_left_boundary(self):\n self.L = self.L - 1.0\n self.Ne = self.Ne + 1", "def compute_ul_ur(self, dt):\n\n g = self.grid\n # compute the piecewise linear slopes\n # here we define the range cells considered, including 1 ghost cell\n # on each side\n ib = g.ng-1\n ie = g.ng + g.N\n\n u = g.u\n \n # initialize empty arrays\n dc = np.zeros((g.N+2*g.ng))\n dl = np.zeros((g.N+2*g.ng))\n dr = np.zeros((g.N+2*g.ng))\n \n dc[ib:ie+1] = 0.5*(u[ib+1:ie+2] - u[ib-1:ie ])\n dl[ib:ie+1] = u[ib+1:ie+2] - u[ib :ie+1]\n dr[ib:ie+1] = u[ib :ie+1] - u[ib-1:ie ]\n\n # minmod()\n # fabs is the absolute value for real valued inputs\n # where does np.where(condition,if true keep this value, else this)\n # d1 = 2.0*np.where(np.fabs(dl) < np.fabs(dr), dl, dr)\n d1 = np.zeros((len(dl)))\n for i in range(len(dl)):\n if np.fabs(dl[i]) < np.fabs(dr[i]):\n d1[i] = dl[i]\n else:\n d1[i] = dr[i]\n d1 = 2.0*d1\n \n d2 = np.zeros((len(dc)))\n for i in range(len(dc)):\n if np.fabs(dc[i]) < np.fabs(d1[i]):\n d2[i] = dc[i]\n else:\n d2[i] = d1[i] \n \n # d2 = np.where(np.fabs(dc) < np.fabs(d1), dc, d1)\n ldeltau = np.zeros((len(d2)))\n for i in range(len(d2)):\n if dl[i]*dr[i] > 0.0:\n ldeltau[i] = d2[i]\n else:\n ldeltau[i] = 0.0\n # ldeltau = np.where(dl*dr > 0.0, d2, 0.0)\n\n # interface states. \n # Note from hydro_examples:\n # note that there are 1 more interfaces than zones\n ul = np.zeros((g.N+2*g.ng))\n ur = np.zeros((g.N+2*g.ng))\n\n ur[ib:ie+2] = u[ib:ie+2] - \\\n 0.5*(1.0 + u[ib:ie+2]*dt/self.grid.dx)*ldeltau[ib:ie+2]\n\n ul[ib+1:ie+2] = u[ib:ie+1] + \\\n 0.5*(1.0 - u[ib:ie+1]*dt/self.grid.dx)*ldeltau[ib:ie+1]\n\n return ul, ur", "def compose_after_from_vector_inplace(self, delta):\n model_jacobian = self.pdm.model.jacobian\n points = self.pdm.model.mean.points\n n_points = self.pdm.model.mean.n_points\n\n # compute:\n # -> dW/dp when p=0\n # -> dW/dp when p!=0\n # -> dW/dx when p!=0 evaluated at the source landmarks\n\n # dW/dp when p=0 and when p!=0 are the same and simply given by\n # the Jacobian of the model\n dW_dp_0 = model_jacobian\n dW_dp = dW_dp_0\n # dW_dp_0: n_points x n_params x n_dims\n # dW_dp: n_points x n_params x n_dims\n\n dW_dx = self.transform.jacobian_points(points)\n # dW_dx: n_points x n_dims x n_dims\n\n #TODO: Can we do this without splitting across the two dimensions?\n dW_dx_x = dW_dx[:, 0, :].flatten()[..., None]\n dW_dx_y = dW_dx[:, 1, :].flatten()[..., None]\n dW_dp_0_mat = np.reshape(dW_dp_0, (n_points * self.n_dims,\n self.n_parameters))\n dW_dx_dW_dp_0 = dW_dp_0_mat * dW_dx_x + dW_dp_0_mat * dW_dx_y\n dW_dx_dW_dp_0 = np.reshape(dW_dx_dW_dp_0,\n (n_points, self.n_parameters, self.n_dims))\n # dW_dx: n_points x n_dims x n_dims\n # dW_dp_0: n_points x n_params x n_dims\n # dW_dx_dW_dp_0: n_points x n_params x n_dims\n\n J = np.einsum('ijk, ilk -> jl', dW_dp, dW_dx_dW_dp_0)\n H = np.einsum('ijk, ilk -> jl', dW_dp, dW_dp)\n\n Jp = np.linalg.solve(H, J)\n # Jp: n_params x n_params\n\n self.from_vector_inplace(self.as_vector() + np.dot(Jp, delta))\n return self", "def test_warping_distance(self):\n t = np.linspace(0, 1, 1000)\n w1 = FDataGrid([t**5], t)\n w2 = FDataGrid([t**3], t)\n\n d = warping_distance(w1, w2)\n np.testing.assert_allclose(d, np.arccos(np.sqrt(15) / 4), atol=1e-3)\n\n d = warping_distance(w2, w2)\n np.testing.assert_allclose(d, 0, atol=2e-2)", "def bm_change(self,dt,delta):\n change = norm.rvs(loc=0,size=1,scale=delta**2*dt)\n return change", "def adjust(variable, target, d=20):\n if variable>d+target:\n variable -= d\n elif variable<target-d:\n variable += d\n else:\n variable = target\n return variable", "def update(w, g, alpha = 0.03):\n return w + alpha * g", "def compute_subgradient(w, data):\n x, y = data\n return -x if w @ x < y else x", "def write(self, x, w):\n h = np.logical_xor(x, self.A).sum(axis=1)\n y = np.where(h < self.d)\n self.C[y] += -1 + 2*w", "def __le__(self, other):\n return self.x ** 2 + self.y ** 2 <= other.x ** 2 + other.y ** 2", "def W(self):\n if not self.isVaild():\n pass\n return self.Wq() + 1.0/self.muy", "def getDelta(self,u,w,v=None):\r\n if v==None :\r\n return self._deltaDot[u,w]\r\n elif self._sigma[u,v]==0 or self._sigma[u,w]==0 or self._sigma[w,v]==0:\r\n return 0.0\r\n elif (self._d[u,v]==self._d[u,w]+self._d[w,v]):\r\n return 1.0 * self._sigma[u,w]*self._sigma[w,v]/self._sigma[u,v]\r\n else:\r\n return 0.0", "def apply_gravity(pos, vels):\n vels = compare(0, 1, pos, vels)\n vels = compare(0, 2, pos, vels)\n vels = compare(0, 3, pos, vels)\n vels = compare(1, 2, pos, vels)\n vels = compare(1, 3, pos, vels)\n vels = compare(2, 3, pos, vels)\n return vels", "def _adj(w):\r\n return (w[:d * d] - w[d * d:]).reshape([d, d])", "def _encoder_diff(ref_count, new_count, forward=True):\n if forward and ref_count > new_count:\n return (Drive.MAX_DIST - ref_count) - (Drive.MIN_DIST - new_count)\n elif not forward and new_count > ref_count:\n return (Drive.MIN_DIST - new_count) - (Drive.MAX_DIST - ref_count)\n else:\n return new_count - ref_count", "def lev(w1, w2):\n\n if len(w1) < len(w2):\n # check if length of word1 is smaller than word2.\n # if so, call function and switch parameters\n return lev(w2, w1)\n elif len(w1) == 0:\n # if the length of word1 equals 0, that means that\n # the Lev' distance is the length of word2\n return len(w2)\n elif len(w2) == 0:\n # if the length of word2 equals 0, that means that\n # the Lev' distance is the length of word1\n return len(w1)\n elif w1 == w2:\n # check if words are simply the same\n return 0\n\n # thanks to the check above, we can assume that w2 is the longest word\n # we use this information to determine the range of 'previous'\n previous = range(len(w2) + 1)\n\n # DEBUG\n # print(previous)\n\n # iterate over the characters of the first word\n for a, char1 in enumerate(w1):\n # DEBUG\n # print(\"i -> \" + str(a))\n # print(\"char1 -> \" + str(char1))\n\n current = [a + 1]\n\n # iterate over the characters of the second word\n for b, char2 in enumerate(w2):\n # DEBUG\n # print(\"j -> \" + str(b))\n # print(\"\\tchar2 -> \" + str(char2))\n\n inserts = previous[b + 1] + 1\n deletions = current[b] + 1\n subs = previous[b] + (char1 != char2)\n\n # DEBUG\n # print(str(char1 != char2))\n # print(\"INSERTS -> \" + str(inserts))\n # print(\"DELS -> \" + str(deletions))\n # print(\"SUBS -> \" + str(subs))\n\n current.append(min(inserts, deletions, subs))\n\n # DEBUG\n # print(\"CURRENT -> \" + str(current))\n previous = current\n\n return previous[-1]", "def sweep50W(self):\n return 25.9", "def calc_tolerance(wt):\n return 1 - wt", "def calculate_w(dataframe_row, prev_w, prev_y, df_len):\n vector_w = prev_w + (prev_y / df_len * (dataframe_row - prev_y * prev_w))\n norm_vector_w = vector_w / np.linalg.norm(vector_w)\n return norm_vector_w.to_numpy()", "def increase_right_boundary(self):\n self.R = self.R + 1.0\n self.Ne = self.Ne + 1", "def vwf(self, vw, ev, gp, psi_l, lai, dt):\n\t return min(vw - self.qwf(self.vw, self.th, self.gp, self.psi_l, self.LAI, dt)*dt/10.**6, self.ZW)", "def backward_pass(self, delta):\n\n a = config['learning_rate']\n y = config['momentum_gamma']\n m = config['momentum']\n l = config['L2_penalty']\n\n # print(\"shape of delta incoming: \", delta.shape, \"shape of x: \", self.x.shape)\n self.d_x = delta.T @ self.x\n # print(\"SHAPE OF GRADIENT: \", self.d_x.shape)\n\n # gradient momentum\n self.w_inc = (a * self.d_x.T) + (y * self.d_v) - l * self.w\n \n # saving \n if m:\n self.d_v = self.w_inc\n else:\n self.d_v = np.zeros(self.w.shape)\n\n # backprop for bias weights\n x_0 = np.ones([len(delta), 1])\n\n self.d_b = delta.T @ x_0\n\n # print(\"shape of BIAS GRAD: \", self.d_b.shape)\n\n self.d_w = delta @ self.w.T\n # print(\"shape of w.T: \", self.w.T.shape, \"shape of RETURN delta: \", self.d_w.shape)\n #print(self.w.shape)\n return self.d_w", "def set_delta_equal(self, coord: str, c: str, d: str, e: str, f: str) -> None:\n a = self.blank()\n a[self.index(coord, c)] += 1.0\n a[self.index(coord, d)] -= 1.0\n a[self.index(coord, e)] -= 1.0\n a[self.index(coord, f)] += 1.0\n self.add_constraint(a, 0.0)", "def update_weights_positive(self):\n eta = self.config.eta\n self.w_xh += eta * (self.x.T @ self.h)\n self.w_th += eta * (self.t.T @ self.h)\n self.w_ho += eta * (self.h.T @ self.o)\n self.w_hz += eta * (self.h.T @ self.z)", "def delta(self):\r\n return self.nd1()", "def update_weights_negative(self):\n eta = self.config.eta\n self.w_xh -= eta * (self.x.T @ self.h)\n self.w_th -= eta * (self.t.T @ self.h)\n self.w_ho -= eta * (self.h.T @ self.o) \n self.w_hz -= eta * (self.h.T @ self.z)", "def delta(self):\r\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def __ge__(self, other):\n return self.x ** 2 + self.y ** 2 >= other.x ** 2 + other.y ** 2", "def check_cflcushion(delt=0.1, cfl_cushion_upper=0.5, cfl_cushion_lower=0.1, code_dt_max=0.1, nstep=100):\n \n # Define some characteristic delta t's as log10()\n vec_cfl_dt_discrete = [-1., -2., -3., -3., -3., -3., -2., -3., -1., -1] \n vec_code_dt = [delt]; changes_in_delt = []\n print(0.1/0.22)\n print(0.1, 0.1/0.22*0.5)\n \n # Construct a continues vector of time steps\n vec_cfl_dt = []\n for i in range(len(vec_cfl_dt_discrete)-1):\n vec_cfl_dt += list(vec_cfl_dt_discrete[i] + np.array(range(nstep))/nstep*(vec_cfl_dt_discrete[i+1]-vec_cfl_dt_discrete[i]))\n vec_cfl_dt = 10**np.array(vec_cfl_dt) \n vec_step = range(len(vec_cfl_dt))\n \n # Mimic the CFL decrease condition\n for i, cfl_dt in enumerate(vec_cfl_dt):\n if (vec_code_dt[-1] > cfl_dt*cfl_cushion_upper):\n print(10**((np.log10(cfl_cushion_upper)+np.log10(cfl_cushion_lower))/2))\n vec_code_dt.append(cfl_dt*10**((np.log10(cfl_cushion_upper)+np.log10(cfl_cushion_lower))/2))\n changes_in_delt.append(i)\n print()\n print(f\"DECREASE! Because {vec_code_dt[-2]:6.2e} > {cfl_dt*cfl_cushion_upper:6.2e}\")\n print(f\" {cfl_dt*cfl_cushion_upper:6.2e} = cfl_dt*cfl_cushion_upper\")\n print(f\" {cfl_dt:6.2e} = cfl_dt\")\n print(f\" {vec_code_dt[-2]:6.2e} = code_dt\") \n print(f\" ==> code_dt = {vec_code_dt[-1]}\")\n elif (vec_code_dt[-1] < np.min([cfl_dt*cfl_cushion_lower, code_dt_max])):\n vec_code_dt.append(np.min([cfl_dt*10**((np.log10(cfl_cushion_upper)+np.log10(cfl_cushion_lower))/2), code_dt_max]))\n changes_in_delt.append(i)\n print()\n print(f\"INCREASE! Because {vec_code_dt[-2]:6.2e} < {np.min([cfl_dt*cfl_cushion_lower, code_dt_max]):6.2e}\")\n print(f\" {cfl_dt*cfl_cushion_lower:6.2e} = cfl_dt*cfl_cushion/delt_adjust\")\n print(f\" {cfl_dt:6.2e} = cfl_dt\")\n print(f\" {vec_code_dt[-2]:6.2e} = code_dt\") \n print(f\" ==> code_dt = {vec_code_dt[-1]}\")\n else:\n vec_code_dt.append(vec_code_dt[-1])\n \n # Create a figure\n fig = plt.figure(figsize=(18, 9)); fig.set_tight_layout(False)\n grid_specifications = gridspec.GridSpec(1,1)\n grid_specifications.update(top=0.98, left=0.05, right=0.95, bottom=0.06, wspace=0.35, hspace=0.45)\n ax = plt.subplot(grid_specifications[0])\n \n # Plot dt(istep)\n ax.plot(vec_step, vec_cfl_dt, color='black', label='CFL dt')\n ax.plot(vec_step, vec_cfl_dt*cfl_cushion_upper, color='black', alpha=0.5, label='CFL dt*CFL cushion upper')\n ax.plot(vec_step, vec_cfl_dt*cfl_cushion_lower, color='black', alpha=0.2, label='CFL dt*CFL cushion lower')\n ax.plot(vec_step, vec_code_dt[1:], color='maroon', label='code dt')\n \n # Highlight the changes \n if False:\n for change in changes_in_delt:\n ax.axvline(x=change, color='maroon', alpha=0.5, zorder=1)\n \n # Show figure\n ax.set_yscale('log')\n ax.autoscale()\n ax.legend(labelspacing=0.0, handlelength=1, shadow=True)\n plt.show()\n return", "def _hill_diff(self, position):\n if position < 0:\n return 2 * position + 1\n else:\n return (1/math.sqrt(1 + 5 * position ** 2)\n - 5 * position ** 2 * (1 + 5 * position ** 2)**-1.5)", "def distance(v, w):\n\treturn magnitude(vector_subtract(v, w))", "def local_extrema_seuil(sweep, seuil1, seuil2, span) :\n\n #temporary elements\n temp_min = 0\n temp_min_arg = -1\n temp_max = 0\n temp_max_arg = -1\n\n #This holds the result\n up_i = 0\n down_i = 0\n up = array([])\n arg_up = array([])\n down = array([])\n arg_down = array([])\n #init the writing bolean\n min_write = True\n max_write = True\n sweep_size = size(sweep)\n\n for i in range(sweep_size) :\n value = sweep[i]\n #check if we are below the threshold, if yes, next point\n if abs(value) < seuil1 :\n max_write = True\n min_write = True\n if temp_max_arg != -1 :\n #Reshape the array\n s_up = array(shape(up))\n s_up[0] = s_up[0] + 1\n s_up = tuple(s_up)\n up = resize(up,s_up)\n arg_up = resize(arg_up,s_up)\n #Assign values\n up[up_i] = temp_max\n arg_up[up_i] = temp_max_arg\n up_i = up_i + 1\n temp_max = 0\n temp_max_arg = -1\n\n if temp_min_arg != -1 :\n #Reshape the array\n s_down = array(shape(down))\n s_down[0] = s_down[0] + 1\n s_down = tuple(s_down)\n down = resize(down,s_down)\n arg_down = resize(arg_down,s_down)\n #Assign values\n down[down_i] = temp_min\n arg_down[down_i] = temp_min_arg\n down_i = down_i + 1\n temp_min = 0\n temp_min_arg = -1\n\n continue\n\n\n #if we are in beetween the two threshold\n if abs(value) > seuil1 and abs(value) < seuil2 :\n if value < temp_min and min_write :\n temp_min = value\n temp_min_arg = i\n if value > temp_max and max_write:\n temp_max = value\n temp_max_arg = i\n\n #if we are above the threshold\n if abs(value) > seuil2 :\n #Make sure than min and max cannot be accessed before going back below seuil1\n if value < - seuil2 :\n min_write = False\n if(temp_min_arg + span > i) :\n temp_min = 0\n temp_min_arg = -1\n if value > seuil2 :\n max_write = False\n if(temp_max_arg + span > i) :\n temp_max = 0\n temp_max_arg = -1\n\n return [down, arg_down, up, arg_up]", "def cmp(space, w_x, w_y):\n return space.cmp(w_x, w_y)", "def w_update(self, x, y, pred_class, alpha=0.0001):\n w_new = self.w + alpha * (y - pred_class) * np.append(x, 1)\n self.w = w_new", "def update_weights(self):\n activation, activation_p = self.feedforward()\n # initialize delta_weights\n delta_w = np.zeros(2)\n # simultaneous calculate delta_weights\n for i, element in enumerate(self.y):\n delta_w += (activation[i]-element)*(activation_p[i])*self.X[i]\n # update weight\n self.weights -= self.alfa*delta_w", "def test_delta_val6(self):\n d = Delta(\"+50-25=%\")\n self.assertEqual(d.cmp(0, 1), False)\n self.assertEqual(d.cmp(8, 4), True)\n self.assertEqual(d.cmp(8, 6), True)\n self.assertEqual(d.cmp(6, 8), False)\n self.assertEqual(d.cmp(6, 9), True)", "def distance_between_wheels():", "def get_transitions(self, w):\n return np.array([((i, j), self.dij(j, i), np.abs(e1 - e2), 0)\n for j, e1 in enumerate(self.ev)\n for i, e2 in enumerate(self.ev)\n if np.isclose(e1 - e2, w)], dtype=DTYPE_JUMP)", "def _add_grad_wFilter(self, coords:torch.tensor, trust:torch.tensor,\n update:torch.tensor, fltr:torch.tensor ) -> torch.tensor:\n # view as to avoid deprecated point wise semantics\n trust = trust[fltr]\n update = update[fltr]\n dat = coords[fltr]\n step = trust.reshape(-1,1,1)*update.view_as(dat)\n \n coords.data[fltr] += step\n return trust #* cutoff_multiplier", "def extra(d):\n return min(d.l * d.w, d.w * d.h, d.h * d.l)", "def ou_change(self,dt,mu,L,delta):\n\n dW = self.bm_change(dt=dt,delta=delta)\n ds = L * (mu - self.Coord) * dt + dW\n return ds", "def cool(self):\n self.t = self.t - 1", "def buildWPriorTerm(self):\r\n\r\n # self.w_prior.shape == (minibatch size,)\r\n self.w_prior = 0.5*T.sum(1 + T.log(self.qwgy_var) - self.qwgy_mu**2-self.qwgy_var, axis=1)\r\n\r\n self.w_prior_modif = - T.maximum(self.hyper['treshold_w_prior'], -self.w_prior)", "def updateW(self, trj_Sp_theta, W_0):\n def fun(x):\n global trj_Sp_theta_z\n\n W_0 = x\n r_0 = self.reward_trj(trj_Sp_theta, W_0)\n return -1*r_0 \n import numpy as np\n from scipy.optimize import minimize\n \n global trj_Sp_theta_z \n trj_Sp_theta_z = trj_Sp_theta\n alpha = 0.2\n delta = alpha\n cons = ({'type': 'eq',\n 'fun' : lambda x: np.array([np.sum(x)-1])},\n {'type': 'ineq',\n 'fun' : lambda x: np.array([np.min(x)])},\n {'type': 'ineq',\n 'fun' : lambda x: np.array([np.abs(np.sum(x-x0))+delta])})\n\n x0 = W_0\n res = minimize(fun, x0, constraints=cons)\n\n x = res.x\n\n W = x/(np.sum(x)) # changed\n return W", "def w_lin_update(u, Lin_lhs, Lin_rhs):\n w_lin_next = Lin_lhs.dot(u) \n violation_indices = w_lin_next - Lin_rhs > 0\n w_lin_next[violation_indices] = Lin_rhs[violation_indices]\n return w_lin_next", "def ccw(A, B, C):\n return (B.x - A.x) * (C.y - A.y) > (B.y - A.y) * (C.x - A.x)", "def isDominated(wvalues1, wvalues2):\n not_equal = False\n for self_wvalue, other_wvalue in zip(wvalues1, wvalues2):\n print(\"self_wvalue: \"+str(self_wvalue))\n print(\"other_wvalue: \"+str(other_wvalue))\n if self_wvalue > other_wvalue:\n return False\n elif self_wvalue < other_wvalue:\n not_equal = True\n return not_equal", "def select_delta(self, dist_post_update, current_iteration):\n if current_iteration == 1:\n delta = 0.1 * (self.clip_max - self.clip_min)\n else:\n if self.constraint == \"l2\":\n delta = np.sqrt(self.d) * self.theta * dist_post_update\n elif self.constraint == \"linf\":\n delta = self.d * self.theta * dist_post_update\n\n return delta", "def _derW(self, w, x, y, z):\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n gamma = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n dfdw = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos - 1]._der(w)\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos]._der(w)\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos][z_pos - 1]._der(w)\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos][z_pos]._der(w)\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos - 1]._der(w)\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos][y_pos - 1][z_pos]._der(w)\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos][z_pos - 1]._der(w)\n + alpha\n * beta\n * gamma\n * self.wInterpolators[x_pos][y_pos][z_pos]._der(w)\n )\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list, x)\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdw = np.zeros(m) + np.nan\n for i in range(1, self.x_n):\n for j in range(1, self.y_n):\n for k in range(1, self.z_n):\n c = np.logical_and(\n np.logical_and(i == x_pos, j == y_pos), k == z_pos\n )\n if np.any(c):\n alpha = (x[c] - self.x_list[i - 1]) / (\n self.x_list[i] - self.x_list[i - 1]\n )\n beta = (y[c] - self.y_list[j - 1]) / (\n self.y_list[j] - self.y_list[j - 1]\n )\n gamma = (z[c] - self.z_list[k - 1]) / (\n self.z_list[k] - self.z_list[k - 1]\n )\n dfdw[c] = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i - 1][j - 1][k - 1]._der(w[c])\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[i - 1][j - 1][k]._der(w[c])\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[i - 1][j][k - 1]._der(w[c])\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[i - 1][j][k]._der(w[c])\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i][j - 1][k - 1]._der(w[c])\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[i][j - 1][k]._der(w[c])\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[i][j][k - 1]._der(w[c])\n + alpha\n * beta\n * gamma\n * self.wInterpolators[i][j][k]._der(w[c])\n )\n return dfdw", "def at_pos(self, pos):\n return pos-self.deadband < self.wm() < pos+self.deadband", "def grad(self, w):\n l1_grad = self.r * np.sign(w)\n l2_grad = np.asarray(1 - self.r) * w \n\n gradient_penalty = self.alpha * (l1_grad + l2_grad)\n\n # Insert 0 for bias term.\n return np.insert(gradient_penalty, 0, 0, axis=0)", "def __call__(self, **kwargs):\n x = kwargs[\"x_new\"]\n return (sum(x)<self.maxT)", "def sidebounce(self):\r\n self.dx=-self.dx", "def _derW(self, w, x, y, z):\n # This may look strange, as we call the derivativeX() method to get the\n # derivative with respect to w, but that's just a quirk of 4D interpolations\n # beginning with w rather than x. The derivative wrt the first dimension\n # of an element of wxInterpolators is the w-derivative of the main function.\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n beta = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n dfdw = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[y_pos - 1][z_pos - 1].derivativeX(w, x)\n + (1 - alpha)\n * beta\n * self.wxInterpolators[y_pos - 1][z_pos].derivativeX(w, x)\n + alpha\n * (1 - beta)\n * self.wxInterpolators[y_pos][z_pos - 1].derivativeX(w, x)\n + alpha * beta * self.wxInterpolators[y_pos][z_pos].derivativeX(w, x)\n )\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdw = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n beta = (z[c] - self.z_list[j - 1]) / (\n self.z_list[j] - self.z_list[j - 1]\n )\n dfdw[c] = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[i - 1][j - 1].derivativeX(w[c], x[c])\n + (1 - alpha)\n * beta\n * self.wxInterpolators[i - 1][j].derivativeX(w[c], x[c])\n + alpha\n * (1 - beta)\n * self.wxInterpolators[i][j - 1].derivativeX(w[c], x[c])\n + alpha\n * beta\n * self.wxInterpolators[i][j].derivativeX(w[c], x[c])\n )\n return dfdw", "def spread_dye(self, dt=0.1):\n #advection operator\n #This moves quite well now\n du_dt = -np.add.reduce(self.u*np.array(np.gradient(self.dye)))\n #diffusion \n du_dt += ndimage.laplace(self.dye)/100\n self.dye += du_dt*dt\n #Prevent negative density\n #self.dye = np.maximum(self.dye,0)\n self.dye *= self.dye_total/np.sum(self.dye)", "def space_increase(MBR1, MBR2):\n xmin = min(MBR1['xmin'], MBR2['xmin'])\n ymin = min(MBR1['ymin'], MBR2['ymin'])\n xmax = max(MBR1['xmax'], MBR2['xmax'])\n ymax = max(MBR1['ymax'], MBR2['ymax'])\n return 1.0 * ((xmax - xmin) * (ymax - ymin) - (MBR1['xmax'] - MBR1['xmin']) * (MBR1['ymax'] - MBR1['ymin']))", "def calc_generation_wind_proposed (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.load_offset_proposed = \\\n self.comp_specs['proposed capacity']\n self.generation_wind_proposed = \\\n self.comp_specs['proposed generation']\n\n if self.generation_wind_proposed == UNKNOWN:\n self.generation_wind_proposed = self.load_offset_proposed *\\\n float(self.comp_specs\\\n ['capacity factor'])*\\\n constants.hours_per_year\n\n return\n\n self.load_offset_proposed = 0\n\n offset = self.average_load*\\\n (self.comp_specs['percent generation to offset'] / 100.0)\n #~ print self.forecast.generation['generation hydro'].sum()\n\n # removed on purpose\n #~ hydro = \\\n #~ self.forecast.generation['generation hydro'].fillna(0).sum()\n #~ if hydro > 0:\n #~ offset *= 2\n\n # existing very variable RE\n existing_RE = \\\n int(float(self.cd['wind capacity'])) + \\\n int(float(self.cd['solar capacity']))\n\n if existing_RE < (round(offset/25) * 25): # ???\n #~ print \"True\"\n self.load_offset_proposed = round(offset/25) * 25 - existing_RE\n\n\n\n # not needed for now\n #~ self.total_wind_generation = self.generation_load_proposed + \\\n #~ int(self.comp_specs['wind capacity'])\n\n self.generation_wind_proposed = self.load_offset_proposed * \\\n float(self.comp_specs['capacity factor'])*\\\n constants.hours_per_year\n #~ print 'self.load_offset_proposed',self.load_offset_proposed\n #~ print 'self.generation_wind_proposed',self.generation_wind_proposed", "def qwf(self, vw, ev, gp, psi_l, lai, dt):\n\t\t#if the amount of water in storage is less than amount that will be absorbed by plant in timestep dt, then what's left will be absorbed \n\t qw = (self.gwf(self.psi_wf(self.vw,self.d1, self.d1, self.ns, self.tl), self.H, self.J)*(self.psi_wf(self.vw, self.d1, self.d1, self.ns, self.tl) - (ev*(1. - self.F_CAP))/(lai*gp) - psi_l)*lai)\n\t if self.vw == 0:\n\t return 0.\n\t elif self.vw*10**6 <= qw*dt:\n\t return (self.vw*10**6/dt)\n\t else:\n\t return qw", "def delta(z, a, y):\n return (a-y)", "def gated_weighting(g, w_content, w_previous):\n\n return g * w_content + (1-g) * w_previous", "def __call__(self, w):\n l1_term = self.alpha * np.linalg.norm(w, 1)\n l2_term = self.alpha * 0.5 * np.linalg.norm(w, 2)\n\n return self.r * l1_term + (1 - self.r) * l2_term", "def has_increased_significantly(old, new, sig_fig=10**(-4)):\n return(new > old and np.log10(1.-old/new) > -sig_fig)", "def test_delta_plus(self):\n d = Delta(\"+50\")\n self.assertEqual(d.cmp(0, 50), False)\n self.assertEqual(d.cmp(0, 51), True)\n self.assertEqual(d.cmp(10, 5), False)\n d = Delta(\"+50=\")\n self.assertEqual(d.cmp(0, 50), True)\n d = Delta(\"+50%\")\n self.assertEqual(d.cmp(10, 25), True)\n self.assertEqual(d.cmp(25, 10), False)", "def test_W_start(self):\t\t\n self.assertAlmostEqual(attempt.W[0], 12)", "def _w_diff_dcm(self, otherframe):\n dcm2diff = self.dcm(otherframe)\n diffed = dcm2diff.diff(dynamicsymbols._t)\n angvelmat = diffed * dcm2diff.T\n w1 = trigsimp(expand(angvelmat[7]), recursive=True)\n w2 = trigsimp(expand(angvelmat[2]), recursive=True)\n w3 = trigsimp(expand(angvelmat[3]), recursive=True)\n return -Vector([(Matrix([w1, w2, w3]), self)])", "def _update_windTriangle(self):\n _awa_ = lambda awa: self.vb * np.sin(awa / 180.0 * np.pi) - self.tws * np.sin(\n (self.twa - awa) / 180.0 * np.pi\n )\n self.awa = fsolve(_awa_, self.twa)[0]\n self.aws = np.sqrt(\n (self.tws * np.sin(self.twa / 180.0 * np.pi)) ** 2\n + (self.tws * np.cos(self.twa / 180.0 * np.pi) + self.vb) ** 2\n )", "def _hill_diff_diff(self, position):\n if position < 0:\n return 2\n else:\n return position * ((75 * (position ** 2)/((1 + 5 * position**2)**2.5)) - 5/((1 + 5 * position ** 2)**2.5)) \\\n - 10 * position/((1 + 5 * position ** 2)**1.5)" ]
[ "0.665439", "0.6648047", "0.5889416", "0.5889381", "0.588397", "0.5883008", "0.57802653", "0.5774275", "0.57511485", "0.57244766", "0.5699618", "0.56226575", "0.5588297", "0.5588012", "0.55859", "0.5545932", "0.55233616", "0.5523173", "0.55197215", "0.5486571", "0.5483729", "0.54824954", "0.548049", "0.54625696", "0.54572564", "0.5389813", "0.53887796", "0.53739196", "0.5372225", "0.5358828", "0.53587633", "0.5339671", "0.53389704", "0.5331089", "0.5330108", "0.5318232", "0.5288102", "0.5284484", "0.5281534", "0.5278078", "0.5274105", "0.52702147", "0.5264579", "0.52571833", "0.5254491", "0.5244817", "0.5232679", "0.52237517", "0.52136874", "0.52118057", "0.5208946", "0.5208233", "0.52076995", "0.5206713", "0.5206151", "0.52033454", "0.52022195", "0.51961875", "0.5186505", "0.5184788", "0.51819617", "0.5168918", "0.5168248", "0.5162998", "0.51613146", "0.51521647", "0.51516145", "0.5145155", "0.5144353", "0.5143969", "0.51284146", "0.5122759", "0.51201755", "0.51165277", "0.51151246", "0.51125836", "0.5111685", "0.5111334", "0.51093197", "0.5106605", "0.51035315", "0.50990826", "0.5092266", "0.5090875", "0.5089801", "0.50837374", "0.5080321", "0.50720793", "0.5071262", "0.50645274", "0.50614744", "0.5060394", "0.50499177", "0.5049761", "0.50477904", "0.503972", "0.50353086", "0.5034326", "0.5033398", "0.50329655" ]
0.52951324
36
it gets the list of weigths
def getweigths(): ls = [] for i_lay in range(1, len(layers)): ls.append(layers[i_lay]["weigths"]) return ls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weights(self) -> List[float]:", "def get_weights(self):", "def show_rel_wt(list_obj):\r\n total = sum_list(list_obj)\r\n wt_list = []\r\n \r\n for num in list_obj:\r\n weight = int((num / total) * 100)\r\n wt_list.append(f\"{weight}%\")\r\n \r\n return wt_list", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] if w is not None]", "def updateweigths():\n for i_lay in range(1, len(layers)):\n layers[i_lay][\"weigths\"] += layers[i_lay][\"Delta_w\"]", "def get_weights(self):\n return self.weights\n #print(W)", "def hwt(data):\n sz = len(data)\n i = 0\n res1 = []\n res2 = []\n while i < sz:\n s0 = data[i]\n s1 = data[i+1]\n res1.append((s0+s1)/2.)\n res2.append((s0-s1)/2.)\n i += 2\n return (res1,res2)", "def get_weights(self):\n return [self.w, self.b]", "def abbott_steam():\n per_klb = 20 # dollars per klb of steam\n kwh_eq = to_kwh(1) # kwh equivalent of steam\n per_kwh = per_klb / kwh_eq\n return per_kwh", "def get_weight_list(self) -> List[float]:\n return self._weight_list", "def get_weights(self):\n return [self.W]", "def get_weights(self):\n return [self.W]", "def get_Delta_weigths():\n ls = []\n for i_lay in range(1, len(layers)):\n ls.append(layers[i_lay][\"Delta_w\"])\n return ls", "def get_list_powers(self):\r\n s = self.query('LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def get_list_powers(self):\r\n s = self.query('SOUR1:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def get_list_powers(self):\r\n s = self.query('SOUR1:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def getByWeight(list, w):\n itemId = 0\n partialWeight = list[0][1]\n while partialWeight < w:\n itemId += 1\n partialWeight += list[itemId][1]\n return list[itemId]", "def getFWHM(antenna, freq):\n diameter = getDiameter(antenna)\n lam = 299792458.0 / (freq * 1e9)\n fwhmo = lam / math.pi * 180.0 * 60.0\n fwhm = 1.22 * fwhmo / diameter\n return fwhm", "def get_list_powers(self):\r\n _debug('simq03b_api.get_list_powers')\r\n \r\n s = self.query('SOUR:LIST:POW?')\r\n if s == None: return None\r\n a = []\r\n n = 0\r\n for x in s.split(','):\r\n try:\r\n a.append(float(x.strip()))\r\n except:\r\n print('ERROR get_list_powers(): non-float in list ', n, x)\r\n n += 1\r\n return a", "def get_weights(self):\n return []", "def abbott_elec():\n per_kwh = 0.08 # [$/kWh]\n return per_kwh", "def power_list():", "def get_weight(ew1, ew2):\n dw = flu.delta_epiweeks(ew1, ew2)\n yr = 52.2\n hl1, hl2, bw = yr, 1, 4\n a = 0.05\n #b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2\n b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2))\n c = 2 ** -(dw / hl1)\n d = 1 - 2 ** -(dw / hl2)\n return (a + (1 - a) * b) * c * d", "def weight(self):", "def weights(self):\r\n\t\treturn None", "def animal_weights(self):\n herb_weights = []\n carn_weights = []\n for cell in self.land_cells.values():\n for herb in cell.herbivores:\n herb_weights.append(herb.weight)\n for carn in cell.carnivores:\n carn_weights.append(carn.weight)\n\n if not herb_weights:\n return [carn_weights]\n elif not carn_weights:\n return [herb_weights]\n else:\n return [herb_weights, carn_weights]", "def wavelength_ex(hdulist):\n wave = hdulist[1].data['loglam']\n wave = 10**wave\n\n return wave", "def get_weights(self):\r\n return self.weights", "def get_weights(self):\r\n return self.weights", "def get_wattage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? .*? .*? .*? . (.*?) .*? . . . .*?'\n watts = float(re.findall(pattern,summary).pop()) \n return watts", "def getChickWeights(self):\n\t\treturn self.chickWeight", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def heart_weight_oz(self):\r\n grams_to_oz = self.organ_weight_grams*.035\r\n return (float(grams_to_oz))", "def return_weights(self):\n w0 = self.comparator.weight.data.numpy()\n b0 = self.comparator.bias.data.numpy()\n\n w1 = self.matcher.weight.data.numpy()\n b1 = self.matcher.bias.data.numpy()\n\n w2 = self.head.weight.data.numpy()\n b2 = self.head.bias.data.numpy()\n\n return w0, b0, w1, b1, w2, b2", "def brain_weight_oz(self):\r\n return Heart.heart_weight_oz(self) # Used method from Heart Class\r", "def get_weights(self, index):\n weight = self._weights[index]\n if isinstance(weight, dict):\n return list(weight.items())\n else:\n return [(Term(\"t\"), weight)]", "def getWeights(self):\n return self.W1, self.W2", "def getWeight(self) -> float:\n ...", "def GetHWEP(trrecord, samplelists=[], uselength=True):\n if len(samplelists)==0: samplelists.append(None)\n pvals = []\n for sl in samplelists:\n allele_freqs = trrecord.GetAlleleFreqs(samplelist=sl, uselength=uselength)\n genotype_counts = trrecord.GetGenotypeCounts(samplelist=sl, uselength=uselength)\n pvals.append(utils.GetHardyWeinbergBinomialTest(allele_freqs, genotype_counts))\n return pvals", "def _get_cookie_weights(self) -> list:\n cookie_weights = []\n for cookie_type in self.cookie_data:\n cookie_weights.append(cookie_type['weight'])\n\n return cookie_weights", "def get_weight(self):\n pass", "def get_weight(self):\n pass", "def get_dwi(data, weight=\"400\"):\n wt_dict = {'10':3, '100':4, '400':5, '800':6, '2000':7}\n di = wt_dict[weight]\n return [patient[di] for i, patient in enumerate(data) if i in good_patients]", "def get_weights(self):\n return self.weights", "def get_weights(self):\n return self.weights", "def weights(self):\n return [x.numpy() for x in self.core.w]", "def _welch_ingredients(x):\n\n numerator = x.var(ddof=1) / x.size\n denominator = np.power(x.var(ddof=1) / x.size, 2) / (x.size - 1)\n return [numerator, denominator]", "def WHD(q, p, h, weights):\n \n num = 0.0\n for j in range(q, p):\n num += weights[j]\n den = 0.0\n for j in range(1, h):\n den += weights[j]\n return 0 if num == 0 else num/den", "def __get_train_weights(train_data: List[TrainSample]) -> np.array:\n train_weights = []\n for sample in train_data:\n num_answers = sample.exam_episode.get_all_answers({sample.player}, sys.maxsize)\n train_weights.append(1 / len(num_answers))\n return np.array(train_weights)", "def calculate_weights():\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights", "def hwt2(data):\n sz = len(data)\n i = 0\n res1 = []\n res2 = []\n while i < sz:\n r0 = data[i][0]\n s0 = data[i][1]\n r1 = data[i+1][0]\n s1 = data[i+1][1]\n res1.append((r0, (s0+s1)/2.))\n res2.append((r0, (s0-s1)/2.))\n i += 2\n return (res1,res2)", "def get_weights(self):\n return self.__weights", "def weights(cae, p):\n weights = []\n for i, j in pairwise(p):\n closed_open = len([e for e in cae if i <= e < j])\n weights.append(closed_open)\n return [value % 2 for value in weights]", "def calculateWeights(self):\n return self.distances #En lo que encontramos una funcion que represente", "def calc_specialist_weights(numsamps):\n weights = 1.0/numsamps\n weights[np.isinf(weights)] = 0.0\n return np.max(numsamps)*weights", "def waveparameterh(L):\r\n return 8.13 - ((250 - 0.7 * L) / 125) ** 3", "def ohms(self):\n # Rwb = Rwiper + Rtotal * (counts / 256)\n # Rwa = Rwiper + Rtotal * ((256 - counts) / 256)\n g = 0\n rtotal=0.0\n reach=[]\n for chan in self.get_channel_list(self.nchans):\n self.rwa[chan] = float( 256 - self.vals[chan] ) / 256.0\n self.rwb[chan] = float( self.vals[chan] ) / 256.0\n self.rwa[chan] *= self.Rtotal\n self.rwb[chan] *= self.Rtotal \n self.rwa[chan] += self.Rwiper\n self.rwb[chan] += self.Rwiper", "def getWeightsForStem(self,stem):\n pass", "def findHighWeightFeatures(self, label):\n featuresWeights = []\n\n \"*** YOUR CODE HERE ***\"\n\n return featuresWeights", "def get_sample_weights(self):\n target_to_weight = {}\n for target, count in self.class_count.items():\n target_to_weight[target] = self.total / count\n\n sample_weights = []\n for _, target in self.imgs:\n sample_weights.append(target_to_weight[target])\n\n return sample_weights", "def s2w(sents,i,f,freq):\n return [w for w,_,_ in sents[i]]", "def dealHand():\n import random\n import string\n \n vowels = 'aeiou'\n constant = 'bcdfghjklmnpqrstvwxyz'\n \n maxint = max(list(map(len, wordlist)))\n \n n = random.randint(5, maxint) \n \n # 1/3 vowls\n n_vowl = n // 3\n n_constant = n - n//3\n \n get_vowl = random.choices(vowels, k = n_vowl)\n get_constant = random.choices(constant, k = n_constant)\n \n strings = ''.join(get_vowl + get_constant)\n \n hand = getFreqDict(strings)\n \n return hand", "def init_weights(no_input, no_hidden, no_output):\r\n weight_lst = []\r\n no_of_weights = (no_input * no_hidden) + (no_hidden * no_output)\r\n\r\n for weight in range(no_of_weights):\r\n weight = 0.01 * random.random()\r\n weight_lst.append(weight)\r\n\r\n return weight_lst", "def get_thb_values(self):\n return (\n float(self.data[2]) / 10, # temp\n int(self.data[3]), # hum\n float(self.data[4]) / 10, # dew\n float(self.data[5]) / 10, # baro\n int(self.data[6]), # forecast\n float(self.data[7]) / 10, # pressure at sealevel\n )", "def getWeights(self, gameState, actton):\n\t\treturn {'successorScore': 1.0}", "def find_worth_playlists(self, part_worths, song_lists):\r\n print(\"\\n\\n*************************\\n\\\r\n Checking Worth of playlist\\n*************************\")\r\n worth_list = []\r\n # index = 0\r\n for song_list in song_lists:\r\n worth = self.find_worth_playlist(part_worths, song_list)\r\n self.plot_test(worth)\r\n worth_list.append(worth[\"worth\"].mean()) #TODO: mean or sum?\r\n # index += 1\r\n print(\"\\nMost valuable playlist is playlist number: \", worth_list.index(max(worth_list))+1)\r\n return worth_list", "def get_list_powers(self):\r\n return self.ps", "def _generate_weights(self):\n weights = []\n for i in range(1, len(self.layers) - 1):\n weights.append(2 * np.random.random(\n (self.layers[i - 1] + 1, self.layers[i] + 1)) - 1)\n weights.append(2 * np.random.random(\n (self.layers[i] + 1, self.layers[i + 1])) - 1)\n return weights", "def _generate_weights(self):\n weights = [random.uniform(0, 1) for x in range(self.num_weights)]\n return self._normalize_weights(weights)", "def weight(self):\n return self._hx711.get_weight()", "def weights(err):\n w = np.power(err, -2)\n w/= np.sum(w)\n return w", "def get_ensemble_weights(self, num_models):\n if self._weight_scheme == \"average\":\n return [1.0 / float(num_models)] * int(num_models)\n # TODO can also directly process weights, like \"0.1,0.1\"\n raise NotImplementedError(\"This weight scheme is not implemented: {}.\"\n .format(self._weight_scheme))", "def pre_flop_strength(hand):\n highs = {}\n highs[4] = [\n \"AA\", \"AKs\", \"AQs\", \"AJs\", \"ATs\", \"AKo\", \"KK\", \"KQs\", \"KJs\", \"AQo\",\n \"QQ\", \"QJs\", \"JJ\", \"TT\"\n ]\n highs[3] = [\n \"A5s\", \"A4s\", \"A3s\", \"KTs\", \"KQo\", \"QTs\", \"AJo\", \"JTs\", \"T9s\", \"99\",\n \"98s\", \"88\", \"87s\", \"77\", \"66\"\n ]\n highs[2] = [\n \"A9s\", \"A8s\", \"A7s\", \"A6s\", \"A2s\", \"K9s\", \"K8s\", \"Q9s\", \"KJo\", \"QJo\",\n \"J9s\", \"ATo\", \"KTo\", \"QTo\", \"JTo\", \"T8s\", \"A9o\", \"J9o\", \"T9o\", \"97s\",\n \"98o\", \"86s\", \"76s\", \"75s\", \"65s\", \"55\", \"44\", \"33\", \"22\"\n ]\n highs[1] = [\n \"K7s\", \"K6s\", \"K5s\", \"K4s\", \"K3s\", \"Q8s\", \"Q7s\", \"Q6s\", \"Q5s\", \"Q4s\",\n \"J8s\", \"J7s\", \"J6s\", \"J5s\", \"T7s\", \"T6s\", \"K9o\", \"Q9o\", \"96s\", \"A8o\",\n \"K8o\", \"Q8o\", \"J8o\", \"T8o\", \"85s\", \"A7o\", \"K7o\", \"Q7o\", \"T7o\", \"97o\",\n \"87o\", \"74s\", \"A6o\", \"K6o\", \"86o\", \"76o\", \"64s\", \"63s\", \"A5o\", \"75o\",\n \"65o\", \"54s\", \"53s\", \"A4o\", \"43s\", \"A3o\"\n ]\n card0, card1 = hand\n if card0[0] == card1[0]:\n pair = \"\".join([card0[0], card1[0]])\n elif card0[1] == card1[1]:\n pair = \"\".join([card0[0], card1[0], \"s\"])\n else:\n pair = \"\".join([card0[0], card1[0], \"o\"])\n for strenght in highs:\n if pair in highs[strenght]:\n return strenght\n return 0", "def GetWavelengths (self) :\n\t\treturn self.run(\"GetWavelengths\")", "def get_weights(model) -> Weights:\n return [val.cpu().numpy() for _, val in model.state_dict().items()]", "def GetHet(trrecord, samplelists=[], uselength=True):\n if len(samplelists) == 0: samplelists.append(None)\n hetvals = []\n for sl in samplelists:\n allele_freqs = trrecord.GetAlleleFreqs(samplelist=sl, uselength=uselength)\n hetvals.append(utils.GetHeterozygosity(allele_freqs))\n return hetvals", "def to_tlwh(self):\n ret = self.mean[:4].copy()\n ret[2] *= ret[3]\n ret[:2] -= ret[2:] / 2\n return ret", "def fwordweights(self):\n return self.prefix + 'wordweights.txt'", "def weights(self) -> Dict[str, DLTypes.WeightType]:\n return self._weights", "def get_meta_weights(self):\n var_list = self.get_transfer_loss_weights(name=self.transfer_loss_name)\n var_list += self.get_transfer_loss_weights(name=self.new_loss_name)\n proto_config = self.config.protonet_config\n transfer_config = self.config.transfer_config\n if proto_config.cosine_softmax_tau:\n var_list += [self._tau_b]\n\n if proto_config.protos_phi:\n var_list += [self._w_p1]\n\n if transfer_config.train_wclass_a:\n var_list += [self.w_class_a]\n if not proto_config.cosine_softmax:\n var_list += [self.b_class_a]\n return var_list", "def Influence(Weight):\n N = len(Weight)\n Expected = [[1/N]]*N\n Out = []\n for i in range(1, N):\n Out.append(Weight[i]/Expected[i])\n return(Out)", "def waveband(self):\n return self.get(\"waveband\", default=\"\", decode=True).split(\"#\")", "def getWeights(self, gameState, action):\n # return {'successorScore': 1.0}\n if self.isOffensive:\n return self.getOffensiveWeights(gameState, action)\n else:\n return self.getDefensiveWeights(gameState, action)", "def get_cw_freq(self):\n return self.get_frequency(self.synth)", "def _convert_rh2w(self):\n sat_vapor = 6.11 * (10.0 ** ((7.5 * self.data['Temperature_C']) /\n (237.7 + self.data['Temperature_C'])))\n\n sat_w = 621.97 * (sat_vapor / (self.data['Pressure'] - sat_vapor))\n\n self.data['Mixing_Ratio'] = (\n self.data['Relative_Humidity'] / 100.0) * sat_w", "def converttolbs( self, wkg ):\n if self.debug == 1:\n print \"wkg\",wkg\n wlb = 0\n if ( wkg > 0 ):\n wlb = wkg * 2.20462\n if self.debug == 1:\n print \"wlb\",wlb\n return wlb\n else:\n return 0", "def get_weights(mc_par, spectral_par):\n r = rate(\"PowerLaw\",\n mc_par['emin'], mc_par['emax'],\n spectral_par, mc_par['cone'], mc_par['area_sim'])\n\n w = weight(\"PowerLaw\",\n mc_par['emin'], mc_par['emax'],\n mc_par['sp_idx'], r,\n mc_par['sim_ev'], spectral_par)\n return w", "def sweep50W(self):\n return 25.9", "def get_gains(self):\n return tuple([lib.is_SetHWGainFactor(self.hcam,0x8000+i,0)/100 for i in range(4)])", "def weight_distribution(self):\n all_scores = []\n for zettel in self.lemma_tokens:\n scores = []\n for word in zettel:\n cur_tf_idf = self.tf_idf_scores[word[0]] / 3 #range: 0-3+\n if word[1] == 'NG':\n word_list = re.split(\" \", word[0])\n cur_word_score = 0\n i = 0\n for new_word in word_list:\n cur_word_score += self.word_scores[new_word]\n i += 1\n cur_word_score = cur_word_score / i / 2 #range: 0-2+\n else:\n cur_word_score = self.word_scores[word[0]] / 2 #range: 0-2+\n cur_keyword_score = self.keyword_scores[word[0]] / 4 #0-4+\n cur_text_rank = self.text_ranks[word[0]] / 10 #range: 0-12+\n cur_pos_score = self.pos_scores[word[0]]\n cur_area_score = self.z_area_scores[word[0]]\n cur_total_score = ((cur_tf_idf * self.score_weights[0]) + (cur_word_score * self.score_weights[1]) +\n (cur_keyword_score * self.score_weights[2]) + (cur_text_rank * self.score_weights[3]) +\n (cur_pos_score * self.score_weights[4]) + (cur_area_score * self.score_weights[5])) / 6\n scores.append(cur_total_score)\n all_scores.append(scores)\n return all_scores", "def _weight_blackman(r,l):\n w = 0.42 + 0.5*np.cos(2*pi*r/l) + 0.08*np.cos(4*pi*r/l) # fase lag-> sign change\n w[np.absolute(r)>l/2.]=0\n return w", "def get_weights(self):\n return self._weight", "def bragg_law(self, d_list, wavelength):\r\n new_twotheta = []\r\n for d in d_list:\r\n new_twotheta.append(2*math.degrees(np.arcsin(wavelength/(2*d))))\r\n return new_twotheta", "def halvesies(numbers):\n halves = []\n for number in numbers:\n halves.append(number / 2)\n return halves" ]
[ "0.6625915", "0.62661403", "0.6248362", "0.6228295", "0.6228295", "0.6228295", "0.6188413", "0.61738515", "0.6153207", "0.6063258", "0.5998551", "0.5988195", "0.59823006", "0.59580696", "0.59580696", "0.59547997", "0.59507", "0.5947943", "0.5947943", "0.5919658", "0.58991927", "0.58605516", "0.5852971", "0.5817886", "0.5795634", "0.5790727", "0.57895565", "0.578825", "0.5769492", "0.57573575", "0.5736708", "0.5736708", "0.57324857", "0.5682334", "0.5679195", "0.5679195", "0.5679195", "0.5679195", "0.56732917", "0.567305", "0.5664927", "0.5649527", "0.564573", "0.564563", "0.5643003", "0.5603962", "0.5598853", "0.5598853", "0.55832905", "0.5576041", "0.5576041", "0.5575604", "0.5574894", "0.5568424", "0.5568204", "0.55599695", "0.5541256", "0.5524414", "0.5510403", "0.5510014", "0.55021214", "0.5498582", "0.5495033", "0.5490775", "0.5479448", "0.54776824", "0.54725575", "0.5465294", "0.54639304", "0.5459682", "0.5453443", "0.54527575", "0.5450829", "0.54457176", "0.5439841", "0.54301053", "0.5421594", "0.5421356", "0.5421291", "0.54208004", "0.5417405", "0.54103726", "0.53964484", "0.53804815", "0.53739583", "0.53633046", "0.53620046", "0.53599995", "0.5356921", "0.5352696", "0.5351429", "0.53472257", "0.5344464", "0.5343282", "0.5338273", "0.5334919", "0.53336513", "0.5332953", "0.53309387", "0.532186" ]
0.78056127
0
it gets the list of "Delta_w"
def get_Delta_weigths(): ls = [] for i_lay in range(1, len(layers)): ls.append(layers[i_lay]["Delta_w"]) return ls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getdelta(self):\n\t\tmyhmag.initializehelmholtz()\n\t\tabar = 13.714285714285715\n\t\tzbar = abar/2.0\n\t\tself.data[\"delta\"] = np.zeros(len(self.data[\"rho\"]))\n\t\tfor i in range(len(self.data[\"rho\"])):\n\t\t\tadgradred,hydrograd,my_nu,my_alpha,self.data[\"delta\"][i],my_gamma1,my_cp,my_cph,my_c_s,failtrig = myhmag.gethelmgrads(self.data[\"T\"][i], self.data[\"rho\"][i], 0.,abar,zbar,True)", "def updateweigths():\n for i_lay in range(1, len(layers)):\n layers[i_lay][\"weigths\"] += layers[i_lay][\"Delta_w\"]", "def getweigths():\n ls = []\n for i_lay in range(1, len(layers)):\n ls.append(layers[i_lay][\"weigths\"])\n return ls", "def _get_current_delta(model):\n delta = []\n _delta = []\n for i in MILPSolver.prob.nn.layers:\n (s, e) = vmodel.get_var_indices(i.depth, 'delta')\n d = model._vars[s:e]\n _d = np.asarray(model.cbGetNodeRel(d))\n delta.append(d)\n _delta.append(_d)\n\n return delta, _delta", "def _w_diff_dcm(self, otherframe):\n dcm2diff = self.dcm(otherframe)\n diffed = dcm2diff.diff(dynamicsymbols._t)\n angvelmat = diffed * dcm2diff.T\n w1 = trigsimp(expand(angvelmat[7]), recursive=True)\n w2 = trigsimp(expand(angvelmat[2]), recursive=True)\n w3 = trigsimp(expand(angvelmat[3]), recursive=True)\n return -Vector([(Matrix([w1, w2, w3]), self)])", "def calculate_delta_weights(self, out_tensors: List[Tensor], in_tensors: List[Tensor]) -> List[Tensor]:\n return None", "def getOscDelta():\n tmp_channels = GetAllSelCh(True)\n delta_store = {}\n for ch in tmp_channels:\n if isTubeChannel(ch) and GetOscType(ch):\n name = GetChName(ch).lower()\n color_name = getChannelColor(name)\n osc_chase = GetOscChase(ch)\n\n if osc_chase and color_name not in delta_store:\n for x in range(ch + 1, 512):\n if isTubeChannel(x) and GetOscType(x):\n namex = GetChName(x).lower()\n color_name = getChannelColor(namex)\n osc_chasex = GetOscChase(x)\n chase_value = abs(osc_chase - osc_chasex)\n delta_store[color_name] = chase_value\n if \"first\" not in delta_store:\n delta_store[\"first\"] = chase_value\n break\n\n if len(delta_store) == 4:\n break\n return delta_store", "def delta(self):\n return self.get_dim_attribute('delta')", "def delta(self) -> None:", "def delta(self):\r\n return self.nd1()", "def deltas(self):\n return self._deltas", "def get_w(self):\n raise NotImplementedError", "def test_delta_layer_iterator(self):\n\n\t\tdelta_iter = self.watcher.make_delta_layer_iterator(base_model=self.model, model=self.model)\n\t\n\t\tfor ww_layer in delta_iter:\n\t\t\t\n\t\t\tprint(ww_layer.layer_id, ww_layer.name)\n\t\t\tself.assertEquals(1, len(ww_layer.Wmats))\n\t\t\tW = ww_layer.Wmats[0]\n\t\t\t\n\t\t\tlayer_norm = np.linalg.norm(W)\n\t\t\tlayer_sum = np.sum(W)\n\n\t\t\tself.assertAlmostEqual(0.0, layer_norm)\n\t\t\tself.assertAlmostEqual(0.0, layer_sum)\n\n\t\treturn", "def get_transitions(self, w):\n return np.array([((i, j), self.dij(j, i), np.abs(e1 - e2), 0)\n for j, e1 in enumerate(self.ev)\n for i, e2 in enumerate(self.ev)\n if np.isclose(e1 - e2, w)], dtype=DTYPE_JUMP)", "def deltai(self,i):\n\t\tif self.deltas[i]!=[]:\n\t\t\treturn self.deltas[i]\n\t\telse:\n\t\t\th=[]\n\t\t\tzgi = [self[i][j][0] for j in range(len(self[i]))]\n\t\t\tzgim1 = [self[i-1][j][0] for j in range(len(self[i-1]))]\n\n\t\t\tfor z in zgim1:\n\t\t\t\tif zgi.count(z)>0:\n\t\t\t\t\th+=[[z,self[i][zgi.index(z)][1]-self[i-1][zgim1.index(z)][1]]]\n\t\t\t\telse:\n\t\t\t\t\th+=[[z,-self[i-1][zgim1.index(z)][1]]]\n\t\t\tfor z in zgi:\n\t\t\t\tif zgim1.count(z)==0:\n\t\t\t\t\th+=[[z,self[i][zgi.index(z)][1]]]\n\t\t\tself.deltas[i]=h\n\t\t\treturn h", "def compute_velocities(self):\n Ddemo_trajs = []\n\n for demo_traj in self._demo_trajs:\n d_traj = np.diff(demo_traj, axis=0)/self._dt\n #append last element to adjust the length\n d_traj = np.hstack([d_traj, d_traj[-1]])\n #add it to the list\n Ddemo_trajs.append(d_traj)", "def extract_wind(source,la,lo,lats,lons,wd,ws):\r\n lat = source[la]\r\n lon = source[lo]\r\n wdir = []\r\n wspd = [] \r\n for coor in zip(lon,lat): \r\n in_lon = coor[0]\r\n in_lat = coor[1]\r\n # since lons are 0 thru 360, convert to -180 thru 180\r\n converted_lons = lons - ( lons.astype(np.int32) / 180) * 360\r\n # get cell of facility\r\n lat_idx = geo_idx(in_lat, lats)\r\n lon_idx = geo_idx(in_lon, converted_lons)\r\n #extract winddirection and wind speed from that cell\r\n d = wd[:,lat_idx,lon_idx][0]\r\n wdir.append(d)\r\n s = ws[:,lat_idx,lon_idx][0]\r\n wspd.append(s)\r\n \r\n return wdir,wspd", "def get_delta_arrays(self):\n delta_as = []\n delta_ds = []\n for sites, deltas in zip(self.nu, self.delta):\n delta_as.extend([deltas[i] for i, nu_s in enumerate(sites) if nu_s < 0])\n delta_ds.extend([deltas[i] for i, nu_s in enumerate(sites) if nu_s > 0])\n return delta_as, delta_ds", "def w_to_d(self, wx, wy):\r\n dx = (wx - self.wxmin) * self.xscale + self.dxmin\r\n dy = (wy - self.wymin) * self.yscale + self.dymin\r\n return dx, dy", "def _get_deltas(event):\n delta_x = round(event.deltaX())\n delta_y = round(event.deltaY())\n delta_z = round(event.deltaZ())\n return delta_x, delta_y, delta_z", "def get_weights(self):\n return [self.W]", "def get_weights(self):\n return [self.W]", "def _update_weights(self, alpha, delta):\n res = []\n for j, weight in enumerate(self._weights):\n self._weights[j] = weight + (alpha * delta * self._g_prime(self._in_j))\n #print(\"Prev weight: {} New weight: {}\".format(weight, self._weights[j]))\n res.append(self._weights[j] - weight)\n return res[0]", "def worddist(self):\n #return (self.n_z_t + self.beta) / (self.n_z[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)\n return (self.n_z_t + self.n_w_d + self.beta) / (self.n_z[:, numpy.newaxis] + self.n_w[:, numpy.newaxis] + self.V * self.beta), len(self.n_z)", "def extract_delta_Q_skewness(batch,index,start_cycle,end_cycle):\n from scipy.stats import skew\n X= []\n for ind in index:\n cell_no = list(batch.keys())[ind]\n Qd_100 = batch[cell_no]['cycles'][str(end_cycle-1)]['Qdlin']\n Qd_10 = batch[cell_no]['cycles'][str(start_cycle-1)]['Qdlin']\n delta = Qd_100-Qd_10\n # delta_rv_mean = delta - np.average(delta)\n # temp = np.average(np.power(delta_rv_mean,3)) / np.power(np.sum(np.power(delta_rv_mean,2)),1.5)\n # Note: Supplementary formular is wrong\n temp = skew(delta)\n skewness = log(abs(temp),10)\n X.append(skewness)\n X = np.reshape(X,(-1,1))\n return X\n pass", "def getDelta(self,u,w,v=None):\r\n if v==None :\r\n return self._deltaDot[u,w]\r\n elif self._sigma[u,v]==0 or self._sigma[u,w]==0 or self._sigma[w,v]==0:\r\n return 0.0\r\n elif (self._d[u,v]==self._d[u,w]+self._d[w,v]):\r\n return 1.0 * self._sigma[u,w]*self._sigma[w,v]/self._sigma[u,v]\r\n else:\r\n return 0.0", "def calc_walked_distance(self, window_size=0):\n walked_distance = []\n walked_distance_window = []\n for i in range(len(self)):\n vel = self.get_absolute_velocity(i)\n if vel is None: vel = 0\n walked_distance.append(\n vel + (walked_distance[i-1] if i>0 else 0)\n )\n walked_distance_window.append(\n walked_distance[i] - (walked_distance[i-window_size] if i>window_size else 0)\n )\n return walked_distance, walked_distance_window", "def get_lw_to_sw_array(self):\n if self.lw_to_sw_array is None:\n lw_to_sw_array = self.basis.get_dO_I_ddelta_alpha(self.sw_survey.geo,self.sw_survey.get_dO_I_ddelta_bar_array())\n else:\n lw_to_sw_array = self.lw_to_sw_array\n return lw_to_sw_array", "def get_weights(self):\n return [self.w, self.b]", "def get_resul(self):\n return {'W': self.W}", "def get_resul(self):\n return {'W': self.W}", "def get_resul(self):\n return {'W': self.W}", "def get_resul(self):\n return {'W': self.W}", "def CC_wdw(self):\n # Setup param\n loc = 'TSdata'\n if 'single' == self.newParam['survey_type']:\n TS_len = dt.utilities.DB_pd_data_load(self.Database, loc).shape[0]\n elif 'multiple' == self.newParam['survey_type']:\n TS_group = dt.utilities.DB_group_names(self.Database, group_name = loc)[0]\n TS_len = dt.utilities.DB_pd_data_load(self.Database, loc+'/'+TS_group).shape[0]\n\n param = self.newParam\n\n # Assign TS processing length to end_wdws if given\n if param['end_wdws']:\n TS_sig_len = param['end_wdws']\n else:\n TS_sig_len = TS_len\n\n ERROR_MESSAGE = 'The length of a TS signal to be processed is', TS_sig_len, \\\n 'which is < end of the last window'\n\n # Calculate wdwPos for overlapping windows of ww_ol if wdwPos is False\n if param['wdwPos'][0] is False:\n # Error checks\n if TS_sig_len < self.newParam['ww'][0]:\n raise Warning(ERROR_MESSAGE)\n\n wdwStep = np.floor(param['ww'][0] *\n (100 - param['ww_ol']) / 100)\n\n if self.verbose: print('* Length fo TSdata', TS_len)\n\n max_wdwPos = TS_sig_len - param['ww'][0] + 1\n wdwStarts = np.arange(0 + param['sta_wdws'], max_wdwPos, wdwStep).astype(int)\n\n if self.verbose: print('* The step in window potions is %s sample points' % wdwStep)\n if self.verbose: print('* The max window postions is %s sample points'% max_wdwPos)\n\n param['wdwPos'] = [ [wdw_start, wdw_start + param['ww'][0]] for\n wdw_start in wdwStarts ]\n\n # Only update wdwPos structure if not already done so\n elif np.array(param['wdwPos'][0]).shape == ():\n param['wdwPos'] = [ [wdw_start, wdw_start + ww] for wdw_start,ww in\n zip(param['wdwPos'], param['ww'])]\n\n self.newParam['wdwPos'] = param['wdwPos']", "def _window(self, get_lims=False):\n\t\timg_h, img_w = self.od_model.img_shape\n\t\th_llim = 0\n\t\tw_llim = img_w // 3\n\t\th_ulim = img_h - (img_h // 4)\n\t\tw_ulim = 1- wllim\n\n\t\tif get_lims:\n\t\t\treturn (h_llim, h_ulim), (w_llim, w_ulim)\n\n\t\twindow = slice(h_llim, h_ulim), slice(w_llim, w_ulim)\n\t\treturn window", "def calculateWeights(self):\n return self.distances #En lo que encontramos una funcion que represente", "def extract_delta_Q_2V(batch,index,start_cycle,end_cycle):\n X= []\n for ind in index:\n cell_no = list(batch.keys())[ind]\n Qd_100 = batch[cell_no]['cycles'][str(end_cycle-1)]['Qdlin']\n Qd_10 = batch[cell_no]['cycles'][str(start_cycle-1)]['Qdlin']\n delta = Qd_100-Qd_10\n delta_q_2v = log(abs(delta[-1]),10)\n X.append(delta_q_2v)\n X = np.reshape(X,(-1,1))\n return X\n pass", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc] if w is not None]", "def _mpo_get_d(self, W):\n din = W.shape[3]\n dout = W.shape[1]\n return dout, din", "def get_w(self):\n return self.w", "def get_weights(self):\n if self.W_tickers is not None:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] + list(self.W_tickers.values())\n if w is not None]\n else:\n return [w for w in [self.W_fwd_conc, self.W_bwd_conc, self.W_red_rec] if w is not None]", "def get_pos_dtdt(self) -> WAVector:\n pass", "def _get_delta(self, out_patterns, expecteds=None, weightLLs=None):\n assert len(out_patterns) == len(self.layers), \\\n \"length of out_patterns must match number of layers\"\n\n if expecteds is None:\n expecteds = [None] * len(self.layers)\n\n if weightLLs is None:\n weightLLs = [False] * len(self.layers)\n\n assert len(expecteds) == len(self.layers), \\\n \"length of expected must match number of layers\"\n\n return [layer._get_delta(out_pattern, expected, weightLL)\n for (layer, out_pattern, expected, weightLL)\n in zip(self.layers, out_patterns, expecteds, weightLLs)]", "def watershedlist():\n opts = watersheds_db()\n return [(opts[opt]['name'] + ' (' + opts[opt]['delineation'] + ')', opt) for opt in opts]", "def gen_delta(self):\n delta = self.delta.gen_delta(self.mask.good_pix, self.mask.bad_pix,\n self.params.nside, self.params.npix)\n return delta", "def get_offsets():\n \n offsets = dict()\n offsets['leiptr'] = [0.0, -0.005, 'left']\n offsets['gjoll'] = [0.15, -0.002, 'left']\n offsets['gd1'] = [0.15, -0.002, 'left']\n offsets['phlegethon'] = [0.0, 0.005, 'center']\n offsets['ylgr'] = [0.15, -0.002, 'left']\n offsets['wambelong'] = [0.0, -0.005, 'left']\n offsets['fimbulthul'] = [0.15, -0.002, 'left']\n offsets['ophiuchus'] = [0.0, -0.005, 'center']\n offsets['elqui'] = [0.15, -0.002, 'left']\n offsets['svol'] = [0.0, -0.004, 'right']\n offsets['ravi'] = [-0.1, 0.002, 'right']\n offsets['sylgr'] = [0.15, -0.002, 'left']\n offsets['jhelum'] = [0.15, -0.002, 'left']\n offsets['indus'] = [0.15, -0.002, 'left']\n offsets['phoenix'] = [0.0, -0.004, 'right']\n offsets['slidr'] = [0.15, 0.002, 'left']\n offsets['atlas'] = [0.1, -0.003, 'left']\n offsets['aliqa_uma'] = [0.15, -0.003, 'left']\n offsets['turbio'] = [-0.15, 0.00, 'right']\n offsets['turranburra'] = [-0.0, -0.003, 'right']\n offsets['fjorm'] = [0.0, -0.004, 'right']\n offsets['triangulum'] = [0.2, -0.005, 'center']\n offsets['willka_yaku'] = [-0.2, 0.005, 'center']\n \n return offsets", "def get_deltaF_spec(self):\n ind = np.where(self.spec_id == 1)\n spec_len = []\n deltaF_ordered = np.empty(shape=(np.size(self.avail_spec),), dtype=np.ndarray)\n for (i,j) in enumerate(self.avail_spec):\n ind = np.where(self.spec_id == j)\n deltaF_ordered[i]= self.spec[:,4][ind]\n\n return deltaF_ordered", "def ou_change(self,dt,mu,L,delta):\n\n dW = self.bm_change(dt=dt,delta=delta)\n ds = L * (mu - self.Coord) * dt + dW\n return ds", "def get_params(self):\r\n return (self.w[:-1], self.w[-1])", "def get_deletes_list(self, w):\n\n deletes = []\n queue = [w]\n for d in range(self.max_edit_distance):\n temp_queue = []\n for word in queue:\n if len(word) > 1:\n for c in range(len(word)): # character index\n word_minus_c = word[:c] + word[c + 1:]\n if word_minus_c not in deletes:\n deletes.append(word_minus_c)\n if word_minus_c not in temp_queue:\n temp_queue.append(word_minus_c)\n queue = temp_queue\n\n return deletes", "def get_deletes_list(self, w):\n\n deletes = []\n queue = [w]\n for d in range(self.max_edit_distance):\n temp_queue = []\n for word in queue:\n if len(word) > 1:\n for c in range(len(word)): # character index\n word_minus_c = word[:c] + word[c + 1:]\n if word_minus_c not in deletes:\n deletes.append(word_minus_c)\n if word_minus_c not in temp_queue:\n temp_queue.append(word_minus_c)\n queue = temp_queue\n\n return deletes", "def uw(self):\n return sm.unitvec(self.w)", "def getWeights(self):\n return self.W1, self.W2", "def w_DE(self, z):\n return self.w0+self.wa*z/(1.+z)", "def get_relative_wires(boards, direction):\n\t\n\tb2c = dict(boards)\n\t\n\tout = []\n\t\n\tfor board, coord in boards:\n\t\tout.append((coord, b2c[board.follow_wire(direction)] - coord))\n\t\n\treturn out", "def get_delta_events(self, win_events_model, delta_days, model_event_id):\n delta_days = int(delta_days)\n ystd = datetime.date.today() - datetime.timedelta(days=1)\n ystd_2 = datetime.date.today() - datetime.timedelta(days=delta_days)\n ystd_number = win_events_model.objects.filter(event_date=ystd).filter(event_id=model_event_id)\n ystd_2_number = win_events_model.objects.filter(event_date=ystd_2).filter(event_id=model_event_id)\n return len(ystd_number) - len(ystd_2_number)", "def distances(self):", "def build_W(points):\n return None", "def _deltas(self):\n istat = self.init\n lstat = self.stats\n uptime = self._uptime()\n delta = float(uptime) - float(self.uptime)\n self.uptime = uptime\n \n for dev in lstat.keys():\n if not istat.has_key(dev):\n del lstat[dev]\n continue\n idev = istat[dev]\n ldev = lstat[dev]\n\n for key,value in ldev.items():\n if re.search(r'(^major\\Z|^minor\\Z)',key):\n continue\n \n if not idev.has_key(key):\n print \"Different keys in statistics\"\n sys.exit(1)\n if not str(value).isdigit and \\\n not str(ldev[key]).isdigit(): \n print \"value of key is not a number\"\n sys.exit(1)\n \n if ldev[key] == idev[key]:\n ldev[key] = self._sprintf('%.2f', 0)\n elif int(delta) > 0:\n ldev[key] = self._sprintf('%.2f',float((ldev[key] - idev[key]) / delta))\n else:\n\t ldev[key] = self._sprintf('%.2f', float(ldev[key] - idev[key]))\n idev[key] = value\n return idev", "def initW_ard(self, alpha=None):\n if alpha is None:\n alpha = self.initAlpha()\n W = [ s.zeros((self.D[m],self.K)) for m in range(self.M) ]\n for m in range(self.M):\n for k in range(self.K):\n W[m][:,k] = norm.rvs(loc=0, scale=1/s.sqrt(alpha[m][k]), size=self.D[m])\n return W,alpha", "def find_delta(w, bw):\n maxabs_w = np.max(np.abs(w.d)) + np.finfo(np.float32).eps\n\n if bw > 4:\n return 2**(np.ceil(np.log2(maxabs_w/(2**(bw-1)-1))))\n else:\n return 2**(np.floor(np.log2(maxabs_w/(2**(bw-1)-1))))", "def create_w_tbl(index: int, entries: int) -> List[float]:\n min_w = StackupTestHelper.index_to_min_width_fn(index)\n return list(map(lambda x: min_w*x, range(1, 4 * entries + 1, 4)))", "def w(self):\n return self._data[3]", "def subtract(wb, wl):\n return set(wb) - set(wl)", "def get_dcm(self):\n control_list = []\n for control in self.__control_list:\n if (control[0] != 'control'):\n control_list.append(control)\n return control_list", "def get_mvts(self, plateau):\n if self.type == \"p\": #Pion\n if self.color == \"w\":\n diags = [[self.x-1, self.y+1],[self.x+1, self.y+1]] #Mouvements possibles de diagonales\n faces = [[self.x, self.y+1]] #Mouvements possibles de face\n if not self.moved: #Si le pion n'a pas encore bougé de la partie\n faces.append([self.x, self.y+2])\n else:\n diags = [[self.x-1, self.y-1], [self.x+1, self.y-1]]\n faces = [[self.x, self.y-1]] #Mouvements possibles de \n if not self.moved:\n faces.append([self.x, self.y-2])\n pos = [] #Position de déplacement validées\n for d in diags:\n if verif_case(d[0], d[1]): #Si la case est sur le plateau \n pion = plateau.get_pion(d[0],d[1])\n if pion != None and pion.color != self.color: #Si il y a un pion ennemi\n pos.append(d)\n for f in faces: \n if verif_case(f[0],f[1]):\n pion = plateau.get_pion(f[0], f[1])\n if pion == None: #Si il n'y a pas de pion\n pos.append(f)\n return pos\n elif self.type == \"t\": #Tour\n pos = []\n dir = [[1,0],[-1,0],[0,1],[0,-1]] #4 directions possibles\n for d in dir:\n x,y = self.x+d[0],self.y+d[1] #Projection de position\n while verif_case(x,y): #Tant que (x, y) est sur le plateau\n pion = plateau.get_pion(x, y)\n if pion != None: #Si il y a un pion\n if pion.color != self.color: #Si il n'est pas allié\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos\n elif self.type == \"c\": #Cavalier\n l = [-2,-1,1,2]\n mvts = [[x,y] for x in l for y in l if abs(x)!=abs(y)]\n pos = []\n for m in mvts:\n x = self.x + m[0]\n y = self.y + m[1]\n if verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion == None or pion.color != self.color:\n pos.append([x, y])\n return pos\n elif self.type == \"f\": #Fou\n dir = [[1,1],[-1,1],[-1,-1],[1,-1]]\n pos = []\n for d in dir:\n x,y = self.x+d[0],self.y+d[1]\n while verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion != None:\n if pion.color != self.color:\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos\n elif self.type == \"k\": #Roi\n mvts = [[1,0],[-1,1],[0,-1],[-1,-1],[-1,0],[-1,1],[0,1],[1,1]] #4 mouvements possibles\n pos = []\n for m in mvts:\n x = self.x + m[0]\n y = self.y + m[1]\n if verif_case(x, y):\n pion = plateau.get_pion(x, y)\n if pion == None or pion.color != self.color:\n pos.append([self.x + m[0], self.y + m[1]])\n return pos\n elif self.type == \"q\": #Dame\n pos = []\n dir = [[1,0],[1,-1],[0,-1],[-1,-1],[-1,0],[-1,1],[0,1],[1,1]]\n for d in dir:\n x,y = self.x+d[0],self.y+d[1]\n while verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion != None:\n if pion.color != joueur:\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos", "def test_delta_layer_iterator_with_filters(self):\n\n\t\tdelta_iter = self.watcher.make_delta_layer_iterator(base_model=self.model, model=self.model, filters=[17])\n\t\n\t\tnum_layers = 0\n\t\tfor ww_layer in delta_iter:\n\t\t\t\n\t\t\tprint(ww_layer.layer_id, ww_layer.name)\n\t\t\tself.assertEquals(1, len(ww_layer.Wmats))\n\t\t\tW = ww_layer.Wmats[0]\n\t\t\t\n\t\t\tlayer_norm = np.linalg.norm(W)\n\t\t\tlayer_sum = np.sum(W)\n\n\t\t\tself.assertAlmostEqual(0.0, layer_norm)\n\t\t\tself.assertAlmostEqual(0.0, layer_sum)\n\t\t\tnum_layers += 1\n\t\t\t\n\t\tself.assertEqual(1, num_layers)\n\n\n\t\treturn", "def get_hardwired_speed_weights(self):\n \n phase_shift=self.speed_phase_shift\n \n # row 1 has the weights of speed cells to grid cell 1\n self.W_speed_east=np.zeros_like(self.W_ee) \n self.W_speed_west=np.zeros_like(self.W_ee) \n self.W_speed_north=np.zeros_like(self.W_ee) \n self.W_speed_south=np.zeros_like(self.W_ee) \n\n if self.use_eight_directions is True:\n self.W_speed_north_east=np.zeros_like(self.W_ee) \n self.W_speed_north_west=np.zeros_like(self.W_ee) \n self.W_speed_south_east=np.zeros_like(self.W_ee) \n self.W_speed_south_west=np.zeros_like(self.W_ee) \n\n\n for phase_idx,phase in enumerate(self.gp.phases):\n shifted_north_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/2.),self.gp.phases)\n shifted_south_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/2.),self.gp.phases)\n shifted_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(0),self.gp.phases)\n shifted_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi),self.gp.phases)\n\n self.W_speed_north[phase_idx,:]=self.W_ee[shifted_north_phase_idx,:]\n self.W_speed_south[phase_idx,:]=self.W_ee[shifted_south_phase_idx,:]\n self.W_speed_east[phase_idx,:]=self.W_ee[shifted_east_phase_idx,:]\n self.W_speed_west[phase_idx,:]=self.W_ee[shifted_west_phase_idx,:] \n \n if self.use_eight_directions is True:\n shifted_north_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/4),self.gp.phases)\n shifted_north_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi*3/4),self.gp.phases)\n shifted_south_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/4),self.gp.phases)\n shifted_south_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi*3/4),self.gp.phases)\n \n self.W_speed_north_east[phase_idx,:]=self.W_ee[shifted_north_east_phase_idx,:]\n self.W_speed_north_west[phase_idx,:]=self.W_ee[shifted_north_west_phase_idx,:]\n self.W_speed_south_east[phase_idx,:]=self.W_ee[shifted_south_east_phase_idx,:]\n self.W_speed_south_west[phase_idx,:]=self.W_ee[shifted_south_west_phase_idx,:]", "def w(self):\n return self._w", "def getMotionsDictionaryList(self, selLegs,steps=100):\n digBewegungen={}\n for i, wert in selLegs.items():\n start= self.legScale[int(i)].get()\n ziel = float(wert)\n digBewegungen[i] = self.xSteps(start,ziel,steps=steps)\n return digBewegungen", "def _derW(self, w, x, y, z):\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list, w), self.w_n - 1), 1)\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n w_pos = self.wSearchFunc(self.w_list, w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n - 1] = self.w_n - 1\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n beta = (x - self.x_list[j - 1]) / (self.x_list[j] - self.x_list[j - 1])\n gamma = (y - self.y_list[k - 1]) / (self.y_list[k] - self.y_list[k - 1])\n delta = (z - self.z_list[l - 1]) / (self.z_list[l] - self.z_list[l - 1])\n dfdw = (\n (\n (1 - beta)\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i, j - 1, k - 1, l - 1]\n + (1 - beta) * (1 - gamma) * delta * self.f_values[i, j - 1, k - 1, l]\n + (1 - beta) * gamma * (1 - delta) * self.f_values[i, j - 1, k, l - 1]\n + (1 - beta) * gamma * delta * self.f_values[i, j - 1, k, l]\n + beta * (1 - gamma) * (1 - delta) * self.f_values[i, j, k - 1, l - 1]\n + beta * (1 - gamma) * delta * self.f_values[i, j, k - 1, l]\n + beta * gamma * (1 - delta) * self.f_values[i, j, k, l - 1]\n + beta * gamma * delta * self.f_values[i, j, k, l]\n )\n - (\n (1 - beta)\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i - 1, j - 1, k - 1, l - 1]\n + (1 - beta)\n * (1 - gamma)\n * delta\n * self.f_values[i - 1, j - 1, k - 1, l]\n + (1 - beta)\n * gamma\n * (1 - delta)\n * self.f_values[i - 1, j - 1, k, l - 1]\n + (1 - beta) * gamma * delta * self.f_values[i - 1, j - 1, k, l]\n + beta\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i - 1, j, k - 1, l - 1]\n + beta * (1 - gamma) * delta * self.f_values[i - 1, j, k - 1, l]\n + beta * gamma * (1 - delta) * self.f_values[i - 1, j, k, l - 1]\n + beta * gamma * delta * self.f_values[i - 1, j, k, l]\n )\n ) / (self.w_list[i] - self.w_list[i - 1])\n return dfdw", "def weights(self) -> List[float]:", "def get_wind_values(self):\n return (\n int(self.data[2]), # dir\n float(self.data[3]) / 10, # gust\n float(self.data[4]) / 10, # avg\n float(self.data[5]) / 10, # chill\n )", "def update_velocities(self, wx, wy):\r\n self.wx = wx\r\n self.wy = wy", "def getMovement(embeddings,\n\t\t\t\tseries):\n\tmovements = []\n\tfor word1, word2 in zip(series[:-1], series[1:]):\n\t\tmovements.append(embeddings[word2] - embeddings[word1])\n\treturn movements", "def _derW(self, w, x, y, z):\n # This may look strange, as we call the derivativeX() method to get the\n # derivative with respect to w, but that's just a quirk of 4D interpolations\n # beginning with w rather than x. The derivative wrt the first dimension\n # of an element of wxInterpolators is the w-derivative of the main function.\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n beta = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n dfdw = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[y_pos - 1][z_pos - 1].derivativeX(w, x)\n + (1 - alpha)\n * beta\n * self.wxInterpolators[y_pos - 1][z_pos].derivativeX(w, x)\n + alpha\n * (1 - beta)\n * self.wxInterpolators[y_pos][z_pos - 1].derivativeX(w, x)\n + alpha * beta * self.wxInterpolators[y_pos][z_pos].derivativeX(w, x)\n )\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdw = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n beta = (z[c] - self.z_list[j - 1]) / (\n self.z_list[j] - self.z_list[j - 1]\n )\n dfdw[c] = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[i - 1][j - 1].derivativeX(w[c], x[c])\n + (1 - alpha)\n * beta\n * self.wxInterpolators[i - 1][j].derivativeX(w[c], x[c])\n + alpha\n * (1 - beta)\n * self.wxInterpolators[i][j - 1].derivativeX(w[c], x[c])\n + alpha\n * beta\n * self.wxInterpolators[i][j].derivativeX(w[c], x[c])\n )\n return dfdw", "def test_liemdframe_calc_deltaE(self):\n\n self.mdframe.calc_deltaE()\n self.assertTrue({'vdw_1', 'coul_1', 'vdw_2', 'coul_2', 'vdw_3',\n 'coul_3', 'vdw_4', 'coul_4', 'vdw_5', 'coul_5'}.issubset(set(self.mdframe.columns)))\n self.assertEqual(self.mdframe['vdw_1'].count(), 1000)", "def acW(self):\n return self.fuselageLength * self.posFraction", "def readWaveformNames(self):\n self.sendMessage('WLIST:SIZE?')\n ansr=self.readMessage()\n msg=[]\n for i in xrange (1,int(ansr)+1):\n msg.append('WLIST:NAME? '+str(i))\n self.sendMessage(msg)\n wnames = self.readMessage()\n names=re.findall('\".*?\"',wnames)\n strippednames=[]\n for name in names:\n strippednames.append(name.rstrip('\"').lstrip('\"'))\n return strippednames", "def get_wm_ws_Gx_bot(self):\n # BASICALLY SETS self.Gm1_bot, self.dGm1_dS_bot, self.Gt1_bot, self.dGt1_dS_bot \n z_u_r = self.grid_dict['z_u_r']\n z_u_w = self.grid_dict['z_u_w']\n [Ly,N] = self.b.shape\n #---> j-loop\n for j in range(Ly): \n self.kbl[j] = N # initialize search\n #-> end j-loop\n\n #--> k-loop\n for k in range(N-1,0,-1):\n k_w = k\n k_r = k-1\n # --> j loop \n for j in range(Ly):\n if z_u_r[j,k_r] - z_u_w[j,0] > self.hbbl[j]:\n self.kbl[j] = k_w\n\n #--> end k\n # --> end j\n\n\n '''\n Compute nondimenisonal shape function coefficeints Gx() by\n matching values and vertical derivatives of interior mixing\n coefficients at hbbl (sigma=1)\n '''\n\n self.Gm1_bot = np.zeros([Ly])\n self.dGm1_dS_bot = np.zeros([Ly])\n self.Gt1_bot = np.zeros([Ly])\n self.dGt1_dS_bot = np.zeros([Ly]) \n self.Av_bl_bot = np.zeros([Ly])\n self.dAv_bl_bot = np.zeros([Ly]) \n self.cff_up_bot = np.zeros([Ly])\n self.cff_dn_bot = np.zeros([Ly])\n\n\n\n\n\n self.wm_bot = np.zeros([Ly])\n self.ws_bot = np.zeros([Ly]) \n\n # CALCULATE ustar for the bottom based on bototm velocities\n \n \n \n # CALCULATE r_D\n self.r_D = TTTW_func.get_r_D(self.u,self.v,self.Zob,self.grid_dict) \n u = self.u\n v_upts = TTTW_func.v2u(self.v)\n \n ubar = np.mean(u,axis=1)\n vbar = np.mean(v_upts,axis=1)\n\n # --> j loop\n for j in range(Ly):\n # turbulent velocity sclaes with buoyancy effects neglected\n if self.CD_SWITCH:\n # DEPTH AVERAGED APPROACH\n uref = u[j,0]\n vref = v_upts[j,0]\n ustar2 = self.C_D * (uref**2 + vref**2)\n else:\n ustar2 = self.r_D[j] * np.sqrt(u[j,0]**2 + v_upts[j,0]**2)\n wm = self.vonKar * np.sqrt(ustar2)\n ws = wm\n\n self.wm_bot[j] = wm\n self.ws_bot[j] = ws\n \n k_w = self.kbl[j] \n z_bl = z_u_w[j,0] + self.hbbl[j]\n\n if z_bl < z_u_w[j,k_w-1]:\n k_w = k_w-1\n\n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * ( self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl_bot[j] = Av_bl\n self.dAv_bl_bot[j] = dAv_bl\n\n\n self.Gm1_bot[j] = Av_bl / (self.hbbl[j] * wm + self.eps)\n self.dGm1_dS_bot[j] = np.min([0,-dAv_bl/(ws+self.eps)])\n\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * ( self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1_bot[j] = At_bl / (self.hbbl[j] * ws + self.eps)\n self.dGt1_dS_bot[j] = np.min([0,-dAt_bl/(ws+self.eps)])", "def dimensions():", "def list():\n return [Dock.OMNI, Dock.LEFT, Dock.RIGHT]", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def get_weights(self):\n return self.w", "def extract_delta_Q_kurtosis(batch,index,start_cycle,end_cycle):\n from scipy.stats import kurtosis\n X= []\n for ind in index:\n cell_no = list(batch.keys())[ind]\n Qd_100 = batch[cell_no]['cycles'][str(end_cycle-1)]['Qdlin']\n Qd_10 = batch[cell_no]['cycles'][str(start_cycle-1)]['Qdlin']\n delta = Qd_100-Qd_10\n # delta_rv_mean = delta - np.average(delta)\n # temp = np.average(np.power(delta_rv_mean,4)) / np.power(np.average(np.power(delta_rv_mean,2)),2)\n temp = kurtosis(delta,fisher=False)\n kurt = log(abs(temp),10)\n X.append(kurt)\n X = np.reshape(X,(-1,1))\n return X\n pass", "def calc_walked_distance_with_direction(self, window_size=0):\n walked_distance = []\n walked_distance_window = []\n\n for i in range(len(self)):\n vel = self.get_velocity_with_direction(i)\n if vel is None: vel = 0\n\n walked_distance.append( vel + (walked_distance[i-1] if i>0 else 0) )\n walked_distance_window.append( walked_distance[i] - (walked_distance[i-window_size] if i>window_size else 0) )\n \n return walked_distance, walked_distance_window", "def _adj(w):\r\n return (w[:d * d] - w[d * d:]).reshape([d, d])", "def delta(self):\r\n return 1 - xl.Refractive_Index_Re(self.compound, self.energy, self.density)", "def derivativeW(self, w, x, y, z):\n wa = np.asarray(w)\n xa = np.asarray(x)\n ya = np.asarray(y)\n za = np.asarray(z)\n return (\n self._derW(wa.flatten(), xa.flatten(), ya.flatten(), za.flatten())\n ).reshape(wa.shape)", "def define_windows(w, data):\n data_w1 = data[0:w, :]\n data_w2 = data[w:w * 2, :]\n data_w3 = data[w * 2:w * 3, :]\n data_w4 = data[w * 3:w * 4, :]\n data_w5 = data[w * 4:w * 5, :]\n data_w6 = data[w * 5:, :]\n\n return data_w1, data_w2, data_w3, data_w4, data_w5, data_w6", "def dwindow(window):\r\n \r\n h=window\r\n nh=len(h)\r\n lh=(nh-1)/2\r\n stepheight=(h[0]+h[-1])/2.\r\n ramp=float((h[-1]-h[0]))/nh\r\n h2=np.zeros(nh+2)\r\n h2[1:nh+1]=h-stepheight-ramp*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n dwin=(h2[2:nh+2]-h2[0:nh])/2.+ramp\r\n dwin[0]=dwin[0]+stepheight\r\n dwin[-1]=dwin[-1]-stepheight\r\n \r\n return dwin", "def weights(self):\n return [x.numpy() for x in self.core.w]", "def _process_weno_nn_delta_layer(\n self,\n delta: Sequence[types.FlowFieldVal],\n ) -> types.FlowFieldVal:\n\n delta = tf.nest.map_structure(tf.abs, delta)\n\n # Small positive number to avoid division by zero. Set close to machine\n # precision.\n epsilon = np.finfo(types.NP_DTYPE).resolution\n max_delta_0 = tf.math.reduce_max(delta[0])\n max_delta_1 = tf.math.reduce_max(delta[1])\n max_val = tf.maximum(\n tf.maximum(max_delta_0, epsilon), tf.maximum(max_delta_1, epsilon)\n )\n if isinstance(delta[0], list):\n # Transpose the order of list of lists (get Z along outer axis and\n # features along inner axis).\n delta = [[row[i] for row in delta] for i in range(len(delta[0]))]\n delta = [tf.stack(delta_z, axis=2) for delta_z in delta]\n else: # For flow field as 3D tensor, get features along last axis\n delta = tf.stack(delta, axis=-1)\n delta = tf.nest.map_structure(lambda delta_z: delta_z / max_val, delta)\n # Add a column of ones to account for the bias layer.\n delta = tf.nest.map_structure(\n lambda delta_z: tf.concat( # pylint: disable=g-long-lambda\n [delta_z, tf.expand_dims(tf.ones_like(delta_z[..., 0]), axis=-1)],\n axis=-1,\n ),\n delta,\n )\n return delta", "def welding_parameters(self):\n ret_params = []\n\n _p = copy.deepcopy(self.job)\n\n if _p is None:\n return [WeldingState() for i in range(len(self.path.points))]\n\n for i in range(len(self.path.points)):\n rm = self.current_bead.get_modification(i, False)\n pm = self.current_bead.get_modification(i, True)\n\n if pm is not None:\n _p = self.welding_state_add(_p, pm.welding_parameters)\n\n if rm is not None:\n _p = self.welding_state_add(_p, rm.welding_parameters)\n\n ret_params.append(copy.deepcopy(_p))\n\n return ret_params", "def moments(self):", "def backward(self, delta_W_next):\n delta = delta_W_next * self._act.a_prime(self._z)\n delta_W = np.dot(delta, self._W.T)\n grad_w = np.dot(self._X.T, delta)\n grad_b = np.array(([np.sum(delta, axis=0)]))\n return grad_w, grad_b, delta_W" ]
[ "0.6032407", "0.59571075", "0.57902676", "0.5779536", "0.5758633", "0.57385635", "0.56800616", "0.5667207", "0.5647853", "0.56092405", "0.5599284", "0.5591161", "0.55750877", "0.557497", "0.557401", "0.5565431", "0.55429536", "0.552723", "0.54701483", "0.54661304", "0.54582804", "0.54582804", "0.5440319", "0.5429842", "0.540978", "0.53968453", "0.53782725", "0.53717625", "0.53514", "0.5349907", "0.5349907", "0.5349907", "0.5349907", "0.5343058", "0.53303516", "0.53144985", "0.53010815", "0.5260503", "0.5260503", "0.5260503", "0.5259182", "0.5246777", "0.52353257", "0.52268803", "0.5224726", "0.52239823", "0.52163833", "0.52126205", "0.5207928", "0.52046824", "0.5199509", "0.5193531", "0.5193531", "0.518634", "0.5179299", "0.5165871", "0.51593155", "0.51452214", "0.51365304", "0.5135398", "0.51328546", "0.5131232", "0.51109314", "0.51098794", "0.51067555", "0.5105465", "0.5097164", "0.50947547", "0.5089268", "0.5087471", "0.50761384", "0.50732785", "0.50674546", "0.50623494", "0.5053483", "0.50470024", "0.50434315", "0.5036431", "0.5029456", "0.5024611", "0.5015141", "0.5013718", "0.50027734", "0.49876624", "0.49871212", "0.49871212", "0.49871212", "0.49871212", "0.4983424", "0.4978728", "0.49752867", "0.4972445", "0.49685422", "0.4968414", "0.4961398", "0.49587613", "0.4955514", "0.49541318", "0.49529552", "0.49523792" ]
0.79851145
0
it performs the cyclic mode of training
def train_cyclic(inputs, outputs, eta=0.55, maxit=1000, momentum=0.1, plot=False): global ERROR ERROR.clear() min_error = 100 ins_outs = list(zip(inputs, outputs)) counter = 0 while counter <= maxit: counter += 1 shuffle(ins_outs) for pair in ins_outs: i, o = pair error2(i, o) ERROR.append(layers[-1]["error2"].item()) try: if ERROR[-1] < min_error: min_error = ERROR[-1] optimal_w = getweigths() min_error_counter = counter print( f"Minimum error found = {min_error}, at counter = {min_error_counter}", end="\r") except: pass backpropagate(eta, momentum) updateweigths() setweigths(optimal_w) print(f"\vMinimum error reached at the {min_error_counter}st cycle") if plot: plt.plot(np.arange(len(ERROR)), ERROR, "b*-") plt.xlabel("Number of cycles") plt.ylabel("Sum of quadratic errors") plt.title("CYCLIC MODE\nERROR vs CYCLES") plt.grid() plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train():\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self)->None:", "def train(self, training_steps=10):", "def train(self):\n return", "def trainNet():", "def train(self):\n\t\traise NotImplementedError", "def train(self):\n raise NotImplementedError", "def train_step(self):\n pass", "def TrainOneStep(self):\n pass", "def train(self, ):\n raise NotImplementedError", "def train(self, mode: bool = True):\n if self.nn_module.training != mode:\n self.nn_module.train(mode)", "def fit_recurrent(self, x, y):\n # print('Stage 1')\n x_ = self.scaler_s1.fit_transform(x)\n\n self.basemodel.fit(x_, y)\n self.training_hit_probability = self._hitprobability(x_, y)\n\n # Learn the hit probability\n self.hitproba = HitProbability()\n self.hitproba.fit(x_, self.training_hit_probability)\n\n # Learn high confidence for all classes\n hm_y, auto_gamma = self._adjust_gamma(self.training_hit_probability)\n self.joint_class_hc = HC_LR()\n self.joint_class_hc.fit(x_, hm_y)\n\n # hm_subtypes = []\n # proba_subtypes = []\n\n # while np.mean(y_) > 0.01:\n # for label in np.unique(y):\n\n hm_1hot = []\n hm_1hot.append(self._one_hot(self.training_hit_probability, y)[0])\n y_ = y.copy()\n\n self.recurrent_base = []\n self.recurrent_hpc = []\n for ii in range(self.recurrent_modes):\n print('Stage 1 iter: ' + str(ii))\n #self.recurrent_base.append(BaseSvc())\n\n if np.sum(y_) > 2:\n self.basemodel = BaseSvc()\n hm_y, proba_tmp = self._fit_mode(x_, y_)\n hm_candidate = self._one_hot(proba_tmp, y_)[1]\n else:\n hm_candidate = np.zeros_like(y_)\n\n self.recurrent_base.append(self.basemodel)\n\n #if np.sum(hm_candidate) >= 2:\n hm_1hot.append(hm_candidate)\n\n # remove the selected subgroup from the target list\n y_[hm_1hot[-1] == 1] = 0\n\n # make the default base model the first\n self.basemodel = self.recurrent_base[0]\n\n print('Stage 2')\n # Stage 2\n # hm_1hot = hm_subtypes\n # train stage2\n self.confidencemodel.fit(x_, hm_1hot)", "def eval(self):\n self.train(mode=False)", "def evaluate(params,dataloader):\n MIN_DEPTH = 1e-3\n MAX_DEPTH = 80\n num_gpus = 1\n pred_depth_scale_factor = 1\n checkpoint_path = './log_diretory/mono_depth2-102000/model-97060'#'./log_diretory/kitti_resnet_MS2_nbn_1epoch_pose_fix/model-189107'\n\n gt_path = './utils/gt/eigen_zhou'\n eval_stereo = False\n\n with tf.Graph().as_default(), tf.device('/cpu:0'):\n\n dataloader = MonodepthDataloader(dataloader.data_path, dataloader.filenames_file, params, dataloader.dataset,\n dataloader.mode)\n reference = dataloader.reference_image_batch\n param = dataloader.param_path_batch\n\n\n # split for each gpu\n reference_splits = tf.split(reference, num_gpus,0)\n param_splits = tf.split(param,num_gpus,0)\n\n\n\n reuse_variables = None\n\n with tf.variable_scope(tf.get_variable_scope()):\n for i in range(num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('%d' % i) as scope:\n print(i)\n model = MonodepthModel(params, dataloader.mode, reference_splits[i],None,None,None,param_splits[i],\n #param_path=param_path_splits[i],\n reuse_variables=reuse_variables, model_index=i)\n\n\n\n config = tf.ConfigProto(allow_soft_placement=True) # allow_soft_placement는 명시된 device없을 때 자동으로 잡아준다.\n sess = tf.Session(config=config)\n # Saver\n train_saver = tf.train.Saver()\n\n # Init\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n coordinator = tf.train.Coordinator() ## coordinator=조정자, threads 관리해주는 함수\n threads = tf.train.start_queue_runners(sess=sess, coord=coordinator)\n\n # Restore\n print(\"Restore\")\n\n if checkpoint_path != '':\n print('----------------------------------------------')\n print(checkpoint_path)\n print('\\n')\n print(checkpoint_path.split(\".\")[0])\n print('----------------------------------------------')\n train_saver.restore(sess, checkpoint_path)\n print(\"Restore OK\")\n with tf.variable_scope(tf.get_variable_scope()):\n for i in range(num_gpus):\n with tf.device('/gpu:%d' % i):\n with tf.name_scope('%d' % i) as scope:\n bn_updates_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)\n num_test_samples = count_text_lines(dataloader.filenames_file)\n pred_disps = []\n print('Start')\n for step in range(num_test_samples):\n pred_disp = sess.run(model.disp_reference_est[0])\n\n pred_disp = pred_disp.squeeze()\n pred_disp,_ = disp_to_depth(pred_disp)\n\n # print(pred_disp.shape)\n # plt.imshow(pred_disp)\n # plt.show()\n pred_disp = np.expand_dims(pred_disp,0)\n\n pred_disps.append(pred_disp)\n\n pred_disps = np.concatenate(pred_disps)\n print(pred_disps.shape)\n gt_path = gt_path+ '/gt_depths.npz'\n gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1')[\"data\"]\n print(gt_depths[0].shape)\n\n print(\"-> Evaluating\")\n disable_median_scaling=False\n if eval_stereo:\n print(\" Stereo evaluation - \"\n \"disabling median scaling, scaling by {}\".format(STEREO_SCALE_FACTOR))\n disable_median_scaling = True\n pred_depth_scale_factor = STEREO_SCALE_FACTOR\n else:\n print(\" Mono evaluation - using median scaling\")\n\n errors = []\n ratios = []\n\n for i in range(pred_disps.shape[0]):\n\n gt_depth = gt_depths[i]\n gt_height, gt_width = gt_depth.shape[:2]\n\n pred_disp = pred_disps[i]\n pred_disp = cv2.resize(pred_disp, (gt_width, gt_height))\n pred_depth = 1 / pred_disp\n print(pred_depth[0,0])\n\n\n\n\n mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH)\n\n crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,\n 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)\n\n crop_mask = np.zeros(mask.shape)\n crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1\n mask = np.logical_and(mask, crop_mask)\n\n print(mask)\n #if i ==pred_disps.shape[0]-3:\n # plt.imshow(pred_depth / 100) # pred_depth[mask]/100)\n # plt.show()\n # plt.imshow(np.where(mask,pred_depth,np.zeros_like(pred_depth))/100)#pred_depth[mask]/100)\n # plt.show()\n # plt.imshow(np.where(mask,gt_depth,np.zeros_like(gt_depth))/100)\n # plt.show()\n\n print(\"pred_depth[mask]\", pred_depth[mask])\n print(\"gt_depth[mask]\", gt_depth[mask])\n pred_depth = pred_depth[mask]\n gt_depth = gt_depth[mask]\n\n pred_depth *= pred_depth_scale_factor\n if not disable_median_scaling:\n print('?')\n ratio = np.median(gt_depth) / np.median(pred_depth)\n ratios.append(ratio)\n pred_depth *= ratio\n\n pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH\n pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH\n print(\"pred_depth={}\".format(pred_depth))\n print(\"pred_depth < MIN_DEPTH\",pred_depth < MIN_DEPTH)\n print(\" pred_depth[pred_depth < MIN_DEPTH] \", pred_depth[pred_depth < MIN_DEPTH] )\n print(\"pred_depth > MAX_DEPTH\",pred_depth > MAX_DEPTH)\n print(\"pred_depth[pred_depth > MAX_DEPTH]\",pred_depth[pred_depth > MAX_DEPTH])\n print(\"pred_depth_shape={}\".format(pred_depth.shape))\n print(\"gt_depth_shape={}\".format(gt_depth.shape))\n\n errors.append(compute_errors(gt_depth, pred_depth))\n\n if not disable_median_scaling:\n ratios = np.array(ratios)\n med = np.median(ratios)\n print(\" Scaling ratios | med: {:0.3f} | std: {:0.3f}\".format(med, np.std(ratios / med)))\n\n mean_errors = np.array(errors).mean(0)\n\n print(\"\\n \" + (\"{:>8} | \" * 7).format(\"abs_rel\", \"sq_rel\", \"rmse\", \"rmse_log\", \"a1\", \"a2\", \"a3\"))\n print((\"&{: 8.3f} \" * 7).format(*mean_errors.tolist()) + \"\\\\\\\\\")\n print(\"\\n-> Done!\")", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train(self):\n self.training = True", "def train( self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.features = trainingData[0].keys() # could be useful later\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n for iteration in range(self.max_iterations):\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n print (\"Starting iteration \", iteration, \"...\")\n for i in range(len(trainingData)):#training data\n max = -10000000\n for j in range(len(self.weights)):\n prod = np.dot(self.weights[j], trainingData[i]) #este sería x0 (en la primera vuelta) (xj)\n if (prod > max):\n max=prod #en max guardamos la distancia a la instancia que más cerca está de la que estamos recorriendo\n indclase=j #guardas el índice de la clase a la que predices que pertenece\n\n if(indclase != trainingLabels[i]):\n # recalcular pesos\n self.weights[trainingLabels[i]].__radd__(trainingData[i]) #honek jarraian egiten du gehiketa pisu guztientzat\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n self.weights[indclase].__sub__(trainingData[i]) #honek jarraian egiten du kenketa pisu guztientzat\n\n\n\n\n\n ########################################################################################\n # 1. i es el indice de un ejemplo (un item, f(x) de un ejemplo) del conjunto de entrenamiento.\n # 2. Asi pues, en cada vuelta de este loop se trata un solo ejemplo\n # por cada ejemplo calculareis el producto punto (dotProduct) w*item\n # NOTAS: Recordad que cada ejemplo viene representado por varios rasgos (o features), es decir, es un vector de rasgos, tantos como nos marca el atributo self.features.\n # Asi cada ejemplo es de dimension 1 filas y self.features).\n # La dimension del vector w tambien es self.features, es decir, habra tantos pesos en w_rasgo dentro de w como rasgos haya en cada item de ejemplo\n # Recordad tambien que es una clasificacion multiclase en este caso. Hay tantas clases como nos marca el atributo self.legalLabels\n #########################################################################################", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self, mode: bool = True) -> None:\n super().train(mode=mode)\n if mode:\n self.mean_module = None\n self.covar_module = None\n self.likelihood = None\n self.task_covar_module = None", "def cycle_consistency_loss(self, reconstructed_x, reconstructed_y, x, y, loss_mode=2, ):\n if loss_mode == 1:\n forward_loss = tf.reduce_mean(tf.abs(reconstructed_x - x))\n backward_loss = tf.reduce_mean(tf.abs(reconstructed_y - y))\n elif loss_mode == 2:\n forward_loss = tf.reduce_mean(tf.square(reconstructed_x - x))\n backward_loss = tf.reduce_mean(tf.square(reconstructed_y - y))\n elif loss_mode == 3:\n forward_loss = tf.reduce_mean(tf.losses.huber_loss(x, reconstructed_x, weights=5, delta=0.2))\n backward_loss = tf.reduce_mean(tf.losses.huber_loss(y, reconstructed_y, weights=5, delta=0.2))\n elif loss_mode == 0:\n print 'cycle softmax'\n forward_loss_map = tf.square(reconstructed_x - x)\n backward_loss_map = tf.square(reconstructed_y - y)\n batchsize = forward_loss_map.get_shape()[0].value\n cycle_softmax_coef = 0.75\n\n reshaped_forward_loss_map = tf.reshape(forward_loss_map, shape=[batchsize, -1])\n forward_softmax_weight = tf.nn.softmax(reshaped_forward_loss_map*cycle_softmax_coef, dim=1)\n forward_loss = tf.reduce_sum(forward_softmax_weight * reshaped_forward_loss_map)\n\n reshaped_backward_loss_map = tf.reshape(backward_loss_map, shape=[batchsize, -1])\n backward_softmax_weight = tf.nn.softmax(reshaped_backward_loss_map*cycle_softmax_coef, dim=1)\n backward_loss = tf.reduce_sum(backward_softmax_weight * reshaped_backward_loss_map)\n\n else:\n print 'Unknown cycle loss mode'\n exit(0)\n loss = self.lambda1 * forward_loss + self.lambda2 * backward_loss\n return self.lambda1 * forward_loss, self.lambda2 * backward_loss, loss", "def train_one_epoch(self):\n print('Training......')\n\n # set mode train\n self.network.train()\n\n # prepare data\n train_loss = 0\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n RandomHorizontalFlip(),\n ToTensor()\n ])\n\n\n\n dataset = Cityscapes(params.dataset_root, mode='train', transforms = transform)\n\n train_loader = DataLoader(dataset,\n batch_size=params.train_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n \n train_size = 1896\n if train_size % self.params.train_batch != 0:\n total_batch = train_size // self.params.train_batch + 1\n else:\n total_batch = train_size // self.params.train_batch\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0 \n # train through dataset\n for batch_idx, batch in enumerate(train_loader):\n self.pb.click(batch_idx, total_batch)\n image, label = batch['image'], batch['label']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n\n # checkpoint split\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n\n\n loss = self.loss_fn(out, label_cuda)\n \n #display_image(out, label_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one + TN\n IOU = IOU+ FN \n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n\n # optimize\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n # accumulate\n train_loss += loss.item()\n\n # record first loss\n if self.train_loss == []:\n self.train_loss.append(train_loss)\n self.summary_writer.add_scalar('loss/train_loss', train_loss, 0)\n \n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(accuracy_new/total_batch)\n \n self.pb.close()\n train_loss /= total_batch\n self.train_loss.append(train_loss)\n\n # add to summary\n self.summary_writer.add_scalar('loss/train_loss', train_loss, self.epoch)", "def train(self, mode=True):\n super(Encoder, self).train(mode)\n self.apply(freeze_batchnorm)", "def test(self):\n self.training = False", "def train(self):\n loss_func = torch.nn.MSELoss()\n training_done = False\n total_loss_array = []\n while not training_done:\n # sample a timestep before the cutoff for cross_validation\n rand_timestep_within_sched = np.random.randint(len(self.X_train_naive))\n input_nn = self.X_train_naive[rand_timestep_within_sched]\n\n # iterate over pairwise comparisons\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n truth_nn = input_nn.clone()\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n truth_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n self.opt.zero_grad()\n output = self.model.forward(input_nn)\n\n loss = loss_func(output, truth_nn)\n\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n\n total_loss_array.append(loss.item())\n\n total_iterations = len(total_loss_array)\n\n if total_iterations % 1000 == 999:\n print('current timestep:', total_iterations, 'avg loss for last 500: ', np.mean(total_loss_array[-500:]))\n torch.save({'nn_state_dict': self.model.state_dict()},\n '/home/rohanpaleja/PycharmProjects/bayesian_prolo/scheduling_env/additions_for_HRI/models/Autoencoder' + str(self.num_schedules) + '.tar')\n\n if total_iterations > 2000000:\n training_done = True\n torch.save({'nn_state_dict': self.model.state_dict()},\n '/home/rohanpaleja/PycharmProjects/bayesian_prolo/scheduling_env/additions_for_HRI/models/Autoencoder' + str(self.num_schedules) + '.tar')", "def train(self):\n raise NotImplementedError()", "def train_loop_pre(self, current_step):\r\n pass", "def train(self, mode: bool = True):\n T = super().train(mode=mode)\n if mode:\n self.graph_construction()\n return T", "def train(self):\n self._stop_gradient = False", "def train(x_train, y_train, x_valid, y_valid, config):\n train_acc = []\n valid_acc = []\n train_loss = []\n valid_loss = []\n best_model = None\n NUM_EPOCH = config['epochs']\n EARLY_STOP = config['early_stop']\n EARLY_STOP_EPOCH = config['early_stop_epoch']\n BATCH_SIZE = config['batch_size']\n model = NeuralNetwork(config=config)\n loss = float('inf')\n best_loss = float('inf')\n best_accuracy = 0\n patience = 0\n\n\n\n for i in range (NUM_EPOCH):\n\n x_train, y_train = shuffle(x_train, y_train)\n x_train = np.asarray(x_train)\n y_train = np.asarray(y_train)\n\n for j in range (0, len(x_train), BATCH_SIZE):\n start = j\n end = j + BATCH_SIZE\n if (end > len(x_train)):\n end = len(x_train)\n\n x = x_train[start:end]\n y = y_train[start:end]\n\n model.forward(x, y) \n model.backward()\n\n train_epoch_loss = model.forward(x_train, y_train)\n \n train_predict = np.zeros_like(model.y)\n train_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n train_accuracy = sum([1 if all(train_predict[i] == y_train[i]) else 0 for i in range(len(y_train))])/len(y_train)\n\n train_loss.append(train_epoch_loss)\n train_acc.append(train_accuracy)\n \n valid_epoch_loss = model.forward(x_valid, y_valid)\n valid_predict = np.zeros_like(model.y)\n valid_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n valid_accuracy = sum([1 if all(valid_predict[i] == y_valid[i]) else 0 for i in range(len(y_valid))])/len(y_valid)\n\n valid_loss.append(valid_epoch_loss)\n valid_acc.append(valid_accuracy)\n\n\n print(\"Epoch:\", i, \"Train Accuracy|Loss:\", train_accuracy,\"| \", train_epoch_loss, \"~|~ Valid: \", valid_accuracy, \" | \", valid_epoch_loss)\n if EARLY_STOP:\n if valid_epoch_loss > best_loss and patience >= EARLY_STOP_EPOCH:\n return train_acc, valid_acc, train_loss, valid_loss, best_model\n elif valid_epoch_loss > best_loss and patience < EARLY_STOP_EPOCH:\n patience += 1\n else:\n patience = 0\n if valid_epoch_loss < best_loss:\n best_loss = valid_epoch_loss\n best_accuracy = valid_accuracy\n best_model = copy.deepcopy(model)\n\n loss = valid_epoch_loss\n\n \n best_model = model \n return train_acc, valid_acc, train_loss, valid_loss, best_model", "def __init__(self, args,logger):\n self.args = args\n self.controller_step = 0\n self.cuda = args.cuda\n self.epoch = 0\n self.shared_step = 0\n self.start_epoch = 0\n self.logger=logger\n self.baseline=None\n\n \"\"\"Load dataset\"\"\" \n self.load_dataset()\n if args.mode=='train':\n self.train_data_loader.restart()\n\n\n if args.use_tensorboard:\n self.tb = TensorBoard(args.model_dir)\n else:\n self.tb = None\n self.build_model()\n\n if self.args.load_path:\n self.load_model()\n\n shared_optimizer = _get_optimizer(self.args.shared_optim)\n controller_optimizer = _get_optimizer(self.args.controller_optim)\n\n self.shared_optim = shared_optimizer(\n self.shared.parameters(),\n lr=self.shared_lr,)\n\n self.controller_optim = controller_optimizer(\n self.controller.parameters(),\n lr=self.args.controller_lr)\n\n self.ce = nn.CrossEntropyLoss()\n if self.args.loss=='MulticlassDiceLoss':\n self.model_loss=MulticlassDiceLoss()\n else:\n self.model_loss=DiceLoss()\n self.time=time.time()\n self.dag_file=open(self.args.model_dir+'/'+self.args.mode+'_dag.log','a')\n\n cnn_type_index={}\n for i,action in enumerate(self.args.shared_cnn_types):\n cnn_type_index[action]=i\n if self.args.use_ref:\n self.ref_arch_num=[]\n ip=[]\n action=[]\n for i,block in enumerate(self.args.ref_arch):\n ip.append(block[0])\n action.append(cnn_type_index[block[1]])\n\n for i in range(len(ip)/2):\n self.ref_arch_num.append([ip[i],ip[i+1],action[i],action[i+1]])\n \n self.ref_arch_num=np.array(self.ref_arch_num)\n self.ref_arch_num=self.ref_arch_num.reshape(1,2*len(self.ref_arch_num))\n\n \"\"\"\n for i,block in enumerate(self.args.ref_arch):\n self.ref_arch_num.append([block[0],cnn_type_index[block[1]]])\n self.ref_arch_num=np.array(self.ref_arch_num)\n self.ref_arch_num=self.ref_arch_num.reshape(1,2*len(self.ref_arch_num))\n \"\"\"", "def train2(self):\n for epoch in range(self.epochs):\n print \"epoch: \", epoch\n self.train(self.D)\n self.alpha -= 0.002 # decrease the learning rate\n self.min_alpha = model.alpha # fix the learning rate, no decay", "def train():\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n # Get negative slope parameter for LeakyReLU\n neg_slope = FLAGS.neg_slope\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n import matplotlib.pyplot as plt\n\n data = cifar10_utils.get_cifar10(FLAGS.data_dir)\n train = data['train']\n test = data['test']\n dim_x = train.images.shape[1]*train.images.shape[2]*train.images.shape[3]\n\n mlp = MLP(dim_x, dnn_hidden_units, train.labels.shape[1], neg_slope)\n loss_module = CrossEntropyModule()\n\n loss_train = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n loss_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n accuracy_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n\n images_test = test.images\n labels_test = test.labels\n images_test = np.reshape(images_test, (images_test.shape[0], dim_x))\n\n for i in range(0, FLAGS.max_steps):\n if PRINTS:\n print('iter', i+1, end='\\r')\n images, labels = train.next_batch(FLAGS.batch_size) \n images = np.reshape(images, (images.shape[0], dim_x))\n\n pred = mlp.forward(images)\n loss = loss_module.forward(pred, labels)\n loss_grad = loss_module.backward(pred, labels)\n mlp.backward(loss_grad)\n\n for module in reversed(mlp.modules):\n if isinstance(module, LinearModule):\n module.params['weight'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['weight']\n module.params['bias'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['bias']\n if (i+1) % FLAGS.eval_freq == 0:\n pred_test = mlp.forward(images_test)\n loss_train[i // FLAGS.eval_freq] = loss\n accuracy_test[i // FLAGS.eval_freq] = accuracy(pred_test, labels_test)\n loss_test[i // FLAGS.eval_freq] = loss_module.forward(pred_test, labels_test)\n if PRINTS:\n print()\n print('test_loss:', loss_test[i // FLAGS.eval_freq])\n print('test_accuracy:', accuracy_test[i // FLAGS.eval_freq])\n print('train_loss:', loss_train[i // FLAGS.eval_freq])\n\n if PLOTS:\n fig, ax = plt.subplots(1, 2, figsize=(10,5))\n fig.suptitle('Training curves for Numpy MLP\\nFinal test accuracy: {:0.4f}, default configuration'.format(accuracy_test[i // FLAGS.eval_freq]))\n\n ax[0].set_title('Loss')\n ax[0].set_ylabel('Loss value')\n ax[0].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[0].plot(loss_train, label='Train')\n ax[0].plot(loss_test, label='Test')\n ax[0].legend()\n\n ax[1].set_title('Accuracy')\n ax[1].set_ylabel('Accuracy value')\n ax[1].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[1].plot(accuracy_test, label='Test')\n ax[1].legend()\n plt.show()\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def backward_multitime(train_x, train_y, test_x, test_y, n_selected_features, data_key=\"test\", method=\"cnn\", cnn_setting_file = \"../../parameters/cnn_model_parameter.txt\", logger=None):\n\n if logger is None:\n log_file = \"\"\n logger = setup_logger(log_file)\n\n train_samples, n_features, time_length = train_x.shape\n\n f_score = []\n eval_method = \"f1\"\n if method == \"cnn\":\n min_class = min(train_y)\n max_class = max(train_y)\n num_classes = max_class - min_class + 1\n data_stru = data_structure(num_classes, min_class, n_features, time_length)\n cnn_setting = return_cnn_setting_from_file(cnn_setting_file)\n logger.info('cnn setting:\\n ' + cnn_setting.to_string())\n saver_file_profix = \"../../object/\" + data_key + \"/backward_multitime/\" + method\n saver_file_profix = init_folder(saver_file_profix)\n saver_file_profix = saver_file_profix + return_cnn_keyword(cnn_setting)\n eval_method = cnn_setting.eval_method\n all_f_eval_value, all_f_train_time, all_f_test_time, predict_proba, saver_file, feature_list_obj_file = model_evaluation_cnn(train_x, train_y, test_x, test_y, data_stru, cnn_setting, saver_file_profix, logger)\n elif method == \"rf\":\n model = RandomForestClassifier(n_estimators=50, random_state=0)\n all_f_eval_value, all_f_train_time, all_f_test_time = model_evaluation_rf(train_x, train_y, test_x, test_y, model, logger)\n \n logger.info(\"With ALL Feature\")\n logger.info(method + \" \" + eval_method + \" Value For ALL Feature: \" + str(all_f_eval_value))\n logger.info(method +\" Training time (sec): \" + str(all_f_train_time))\n logger.info(method + \" Testing time (sec): \" + str(all_f_test_time))\n # selected feature set, initialized to contain all features\n F = range(n_features)\n count = n_features\n iter_num = 0\n while count > n_selected_features:\n max_eval_value = -1\n for i in range(n_features):\n if i in F:\n F.remove(i)\n train_x_tmp = train_x[:, F, :]\n test_x_tmp = test_x[:, F, :]\n\n if method == \"cnn\":\n eval_value, train_run_time, test_run_time, predict_proba, saver_file, feature_list_obj_file = model_evaluation_cnn(train_x_tmp, train_y, test_x_tmp, test_y, data_stru, cnn_setting, saver_file_profix, logger)\n f_eval_value = all_f_eval_value - eval_value\n elif method == \"rf\":\n eval_value, train_run_time, test_run_time = model_evaluation_rf(train_x_tmp, train_y, test_x_tmp, test_y, model, logger)\n f_eval_value = all_f_eval_value - eval_value\n\n logger.info(\"Without Feature \" + str(i) + \": \")\n logger.info(method + eval_method + \" Value For Feature \" + str(i) + \": \" + str(f_eval_value))\n logger.info(method +\" Training time (sec): \" + str(train_run_time))\n logger.info(method + \" Testing time (sec): \" + str(test_run_time))\n f_score.append(f_eval_value)\n F.append(i)\n # record the feature which results in the largest accuracy\n if eval_value > max_eval_value:\n max_eval_value = eval_value\n idx = i\n logger.info(\"For iter \" + str(iter_num))\n logger.info(\"Eval score vector: \" + str(f_score))\n logger.info(\"The removed attribute is: \" + str(idx))\n # delete the feature which results in the largest accuracy\n F.remove(idx)\n count -= 1\n iter_num = iter_num + 1\n return np.array(F)", "def run( self, cycles=-1 ):", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def train(self, mode=True):\n super(TSN, self).train(mode)\n count = 0\n if self._enable_pbn:\n print(\"Freezing BatchNorm2D except the first one.\")\n for m in self.base_model.modules():\n # print('the type train model : {}'.format(type(m)))\n if isinstance(m, torch.nn.BatchNorm2d) or \\\n isinstance(m, linklink.nn.syncbn_layer.SyncBatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n # print('the freeze module: {} of {}th'.format(type(m), count))\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def epoch(self, dataset, optimize, mode): \n accuracyDataSet = []\n lossDataSet = []\n for epoch in range(config.EPOCHS):\n totalLoss, goodAccuracy, totalAccuracy = 1, 1, 1\n random.shuffle(dataset) \n for i in range(0, len(dataset)-config.BATCH_SIZE, config.BATCH_SIZE):\n \n if optimize:\n self.model.zero_grad() \n \n if mode == 'c':\n \n batch = dataset[i:i+config.BATCH_SIZE]\n \n x_batch_prefix = [[utils.getIdOfPrefix(w[3:]) for w in a] for a,b in batch] \n x_batch_suffix = [[utils.getIdOfSuffix(w[:-3]) for w in a] for a,b in batch]\n y_batch = [utils.getIdOfTag(b) for a,b in batch] \n x_prefix = Variable(torch.LongTensor(x_batch_prefix))\n x_suffix = Variable(torch.LongTensor(x_batch_suffix))\n y = Variable(torch.LongTensor(y_batch))\n\n lstm_output, _ = self.model((x_prefix, x_suffix) , mode) \n \n if mode in ['a','b']: \n\n batch = dataset[i:i+config.BATCH_SIZE] \n x_batch = [a for a,b in batch]\n y_batch = [b for a,b in batch]\n x = Variable(torch.LongTensor(x_batch))\n y = Variable(torch.LongTensor(y_batch))\n lstm_output, _ = self.model(x, mode) \n \n if mode == 'd':\n\n batch = dataset[i:i+config.BATCH_SIZE]\n x_batch_words = [[utils.getIdOfWord(w) for w in a] for a,b in batch]\n x_batch_chars = [[[utils.getIdOfChar(c) for c in word] for word in words[0]] for words in batch]\n x_batch_char = [item for sublist in x_batch_chars for item in sublist]\n lengths = [len(item) for item in x_batch_char]\n x_batch_c = []\n length = np.max(lengths) \n\n for i in range(len(x_batch_char)):\n if len(x_batch_char[i]) > config.WORD_MAX_LENGTH:\n x_batch_c.append(x_batch_char[i][:config.WORD_MAX_LENGTH])\n\n else:\n while len(x_batch_char[i]) < config.WORD_MAX_LENGTH:\n x_batch_char[i].append(0) \n x_batch_c.append(x_batch_char[i]) \n \n x_words = Variable(torch.LongTensor(x_batch_words))\n x_chars = Variable(torch.LongTensor(x_batch_c))\n y_batch = [utils.getIdOfTag(b) for a,b in batch]\n y = Variable(torch.LongTensor(y_batch))\n lstm_output, _ = self.model((x_words,x_chars), mode) \n \n lstm_output = lstm_output.view(100, -1) \n loss = self.loss_fn(lstm_output, y)\n totalLoss += (loss.data).numpy()\n\n if optimize:\n self.optimizer.zero_grad() \n loss.backward()\n self.optimizer.step()\n\n goodAccuracy, totalAccuracy = self.getAccuracy((lstm_output.data).numpy(), (y.data).numpy(), mode)\n goodAccuracy += goodAccuracy\n totalAccuracy += totalAccuracy\n\n accuracyDataSet.append(goodAccuracy/totalAccuracy) \n lossDataSet.append( totalLoss/(len(dataset)/config.BATCH_SIZE) )\n print('Model: ',mode,' Epoch: ',epoch ,' Loss : {0:.6f}'.format(totalLoss/(len(dataset)/config.BATCH_SIZE)), ' Accuracy : {0:.6f}'.format( goodAccuracy/totalAccuracy ) ) \n\n if not optimize:\n self.acc_data_plots.append((accuracyDataSet, mode))", "def train(self, mode=True):\n super(SwinTransformer, self).train(mode)\n self._freeze_stages()", "def algorithm_loop(self):", "def main_train(lr, bs, cuda_id, not_distrib, fp16, loss_scale):\r\n torch.backends.cudnn.benchmark = True\r\n if fp16: assert torch.backends.cudnn.enabled, \"missing cudnn\"\r\n stats = (np.array([ 0.4914 , 0.48216, 0.44653]), np.array([ 0.24703, 0.24349, 0.26159]))\r\n sz=32\r\n PATH = Path(\"../../data/cifar10/\")\r\n tfms = tfms_from_stats(stats, sz, aug_tfms=[RandomCrop(sz), RandomFlip()], pad=sz//8)\r\n data1 = ImageClassifierData.from_paths(PATH, val_name='test', tfms=tfms, bs=bs)\r\n m = wrn_22().cuda()\r\n if not not_distrib: m = nn.parallel.DistributedDataParallel(m, device_ids=[cuda_id], output_device=cuda_id)\r\n learn = ConvLearner.from_model_data(m, data1)\r\n learn.crit = nn.CrossEntropyLoss()\r\n learn.metrics = [accuracy]\r\n trn_tfms = CustomTfm(0.5, 4, 32, 1)\r\n val_tfms = None\r\n data = DataBunch.from_files(PATH, trn_tfms, val_tfms, stats, torch.device('cuda', cuda_id), distrib=not not_distrib, val_name='test', bs=bs)\r\n learn.data.trn_dl, learn.data.val_dl = data.trn_dl, data.val_dl\r\n if fp16: learn.half()\r\n x,y = next(iter(data.trn_dl))\r\n opt_fn = get_opt_fn('Adam', 0.95, 0.99, False)\r\n learn.opt_fn = opt_fn\r\n cyc_len, pct = 30, 0.075\r\n nbs = [cyc_len * (1-pct) / 2, cyc_len * (1-pct) / 2, cyc_len * pct]\r\n phases = get_phases(lr, (0.95,0.85), opt_fn, 10, nbs, 0.1, True, False)\r\n #print_lr = PrintLR(learn)\r\n learn.fit_opt_sched(phases, loss_scale=loss_scale)", "def train_mode(self, loss_D, loss_G):\n \"\"\"epslon = 1e-5 to avoid loss = 0 \"\"\"\n \"\"\"#@(chuanzi): not loss = 0 in original occasion\"\"\"\n ratio = loss_D.data[0]/(loss_G.data[0] + 1e-5)\n if ratio < 1e-1 and self.train_D:\n self.train_D = False\n self.train_G = True\n if ratio > 5e-1 and not self.train_D:\n self.train_D = True\n self.train_G = True\n if ratio > 1e-1 and self.train_G:\n self.train_G = False\n self.train_D = True\n print ( \"train_D=%d, train_G=%d\" % (self.train_D, self.train_G))", "def sweep_image_model():\n for c1 in [4, 8, 16]:\n for c2 in [2, 4]:\n for c3 in [2, 4]:\n for c4 in [1, 2]:\n flags = flag_reader.read_flag()\n print(c1)\n flags.channel_list = c1 * np.array([1, c2, c2*c3, c2*c3*c4])\n print('channel list = ', flags.channel_list)\n flags.last_dim = flags.channel_list[-1]\n flags.model_name = flags.data_set + '_channel_' + str(flags.channel_list).replace('[','').replace(']','').replace(' ','_') + \\\n '_dim_last_' + str(flags.last_dim) + '_ind_' + str(flags.comp_ind) + \\\n '_lr_{}_decay_{}_reg_{}_bs_{}'.format(flags.lr, flags.lr_decay_rate, flags.reg_scale, flags.batch_size)\n print(flags.model_name)\n training_from_flag(flags)", "def train(self, X, y):", "def __init__(self, gpu_ids='0', isTrain=False, checkpoints_dir='./checkpoints', name='experiment_name', continue_train=False, model='cycle_gan'):\n \n assert(not isTrain)\n BaseModel.__init__(self, gpu_ids=gpu_ids, isTrain=isTrain, checkpoints_dir=checkpoints_dir, name=name, continue_train=continue_train, verbose=False)\n\n self.input_nc = 3\n self.output_nc = 3\n self.ngf = 64 # num of gen filters in the last conv layer\n self.ndf = 64 # num of discriminator filters in the first conv layer'\n self.netG = 'resnet_9blocks' # specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]\n self.norm = 'instance' # instance normalization or batch normalization [instance | batch | none]\n self.no_dropout = True\n self.init_type = 'normal' # network initialization [normal | xavier | kaiming | orthogonal]\n self.init_gain = 0.02\n self.netD = 'basic' # specify discriminator architecture [basic | n_layers | pixel]\n self.n_layers_D = 3 # only used if netD==n_layers\n self.pool_size = 50 # the size of image buffer that stores previously generated images\n self.lr = 0.0002\n self.beta1 = 0.5 # momentum term of adam\n self.gan_mode = 'lsgan' # the type of GAN objective. [vanilla| lsgan | wgangp]\n self.model_suffix = ''\n\n self.loss_names = []\n self.visual_names = ['real', 'fake']\n self.model_names = ['G' + self.model_suffix] # only generator is needed.\n self.netG = networks.define_G(self.input_nc, self.output_nc, self.ngf, self.netG,\n self.norm, not self.no_dropout, self.init_type, self.init_gain, self.gpu_ids)\n\n setattr(self, 'netG' + self.model_suffix, self.netG) # store netG in self.", "def set_train(self):\n self.train()\n self.volatile = False", "def train(self,block=0): \n folder_save = os.path.join(self.path_save,'epoch')\n if self.mode=='first_layer':\n print('====================A new training starts!=============')\n # trains the first layer\n print('=================== Block number {} ==================='.format(0))\n # to store results\n loss_epochs_train = np.zeros(self.nb_epochs[0])\n loss_epochs_val = np.zeros(self.nb_epochs[0])\n loss_min_val = float('Inf')\n self.CreateFolders(0)\n folder = os.path.join(self.path_save,'block_'+str(0))\n self.CreateLoader(block=0)\n # defines the optimizer\n lr = self.lr_first_layer #learnig rate\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,self.parameters()),lr=lr) \n #==========================================================================================================\n # for the first layer\n # trains for several epochs\n for epoch in range(0,self.nb_epochs[0]): \n print('This is epoch {} '.format(epoch))\n # sets training mode\n self.model.Layers[0].train()\n gc.collect()\n # goes through all minibatches\n print('This is traning stage')\n for i,minibatch in enumerate(self.train_loader,0):\n [names, x_true_RGB, x_true, x_blurred_RGB , x_blurred, h] = minibatch # get the minibatch\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n SNR_init = 0\n SNR_temp = 0\n init = Initialization(batch,sizex,sizeh,self.dtype)\n self.T_vec,self.t,h_vec,Ch_vec,gamma_vec,lambda_vec = init.f(x_blurred,h)\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,x_blurred,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode) \n hhat_vec=self.T_vec@newmh_vec+self.t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))#the restored kernel of \n for j in range(batch):\n print('This is batch {}'.format(j))\n SNR_temp = ComputeSNR(x_true[j,:,:],mk_vec[j,:,:])\n SNR_init = ComputeSNR(x_true[j,:,:],x_blurred[j,:,:])\n print('The initial SNR is {}'.format(SNR_init))\n print('The current SNR is {}'.format(SNR_temp))\n loss_ = self.loss_fun_mh(hhat_vec[j,:,:],h[j,:,:])\n print('The RMSE is {}'.format(loss_))\n loss = self.loss_fun_mh(hhat_vec,h)\n print('The loss over all batches are {}'.format(loss))\n loss_epochs_train[epoch] += torch.Tensor.item(loss)\n sys.stdout.write('\\r(%d, %3d) minibatch loss: %5.4f '%(epoch,i,torch.Tensor.item(loss)))\n \n # sets the gradients to zero, performs a backward pass, and updates the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() #performs a parameter update\n\n # tests on validation set\n print('This is validation stage')\n self.model.eval() # evaluation mode\n self.last_layer.eval() # evaluation mode\n loss_current_val = 0\n for minibatch in self.val_loader:\n [names, x_true_RGB, x_true, x_blurred_RGB , x_blurred, h] = minibatch # gets the minibatch\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n SNR_init = 0\n SNR_temp = 0\n init = Initialization(batch,sizex,sizeh,self.dtype)\n __,__,h_vec,Ch_vec,gamma_vec,lambda_vec = init.f(x_blurred,h)\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,x_blurred,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode) \n hhat_vec=self.T_vec@newmh_vec+self.t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))\n for j in range(batch):\n print('This is batch {}'.format(j))\n SNR_temp = ComputeSNR(x_true[j,:,:],mk_vec[j,:,:])\n SNR_init = ComputeSNR(x_true[j,:,:],x_blurred[j,:,:])\n print('The initial SNR is {}'.format(SNR_init))\n print('The current SNR is {}'.format(SNR_temp))\n loss_ = self.loss_fun_mh(hhat_vec[j,:,:],h[j,:,:])\n print('The RMSE is {}'.format(loss_))\n # computes loss on validation set\n loss = self.loss_fun_mh(hhat_vec,h)\n print('The loss over all batches are {}'.format(loss))\n loss_current_val += torch.Tensor.item(loss)\n loss_epochs_val[epoch] += torch.Tensor.item(loss)\n \n if loss_min_val>loss_current_val:\n torch.save(self.model.state_dict(),os.path.join(folder,'trained_model_MinLossOnVal1.pt'))\n loss_min_val = loss_current_val\n\n # save the results for each epoch\n folder_results_train = os.path.join(folder_save,'block'+str(block),'train')\n # create the path if it does not exist \n if not os.path.exists(folder_results_train):\n os.makedirs(folder_results_train)\n with open(folder_results_train+'/loss_epoch_train.txt', \"a\") as file_object:\n if epoch == 0:\n file_object.write('------------------A new test-------------------------------')\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_train[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n folder_results_val = os.path.join(folder_save,'block'+str(block),'val')\n # create the path if it does not exist \n if not os.path.exists(folder_results_val):\n os.makedirs(folder_results_val)\n with open(folder_results_val+'/loss_epoch_val.txt', \"a\") as file_object:\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_val[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n #==========================================================================================================\n # training is finished\n print('-----------------------------------------------------------------')\n print('Training of Block 0 is done.')\n self.save_OneBlock(block=0)\n print('-----------------------------------------------------------------')\n \n \n # calls the same function to start training of the next layer\n self.mode = 'greedy'\n self.train(block=1)\n \n#===========================================================================================================\n \n \n elif self.mode=='greedy':\n print('This is greedy processing')\n # trains the next layer\n print('=================== Block number {} ==================='.format(block))\n # to store results\n loss_epochs_train = np.zeros(self.nb_epochs[1])\n loss_epochs_val = np.zeros(self.nb_epochs[1])\n loss_min_val = float('Inf')\n self.CreateFolders(block)\n folder = os.path.join(self.path_save,'block_'+str(block))\n self.CreateLoader(block=block)\n # puts first blocks in evaluation mode: gradient is not computed\n self.model.GradFalse(block,self.mode)\n # defines the optimizer\n lr = self.lr_greedy\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,self.parameters()),lr=lr)\n #==========================================================================================================\n # trains for several epochs\n for epoch in range(0,self.nb_epochs[1]):\n print('This is epoch {}'.format(epoch))\n self.model.Layers[block].train() # training mode\n gc.collect()\n # goes through all minibatches\n for i,minibatch in enumerate(self.train_loader,0):\n [names, x_true, x_blurred, h, noise_std, estimatedimage_vec, diagSigma_vec, h_vec, Ch_vec, gamma_vec, lambda_vec] = minibatch # gets the minibatch\n h_vec = h_vec.type(self.dtype)\n Ch_vec = Ch_vec.type(self.dtype)\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n #print('The batch size is ',batch_size)\n SNR_init = 0\n SNR_temp = 0\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,estimatedimage_vec,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode,block=block) \n hhat_vec=self.T_vec@newmh_vec+self.t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))\n for j in range(batch):\n print('This is batch {}'.format(j))\n SNR_temp = ComputeSNR(x_true[j,:,:],mk_vec[j,:,:])\n SNR_init = ComputeSNR(x_true[j,:,:],x_blurred[j,:,:])\n print('The initial SNR is {}'.format(SNR_init))\n print('The current SNR is {}'.format(SNR_temp))\n loss_ = self.loss_fun_mh(hhat_vec[j,:,:],h[j,:,:])\n print('The RMSE is {}'.format(loss_))\n \n # Computes and prints loss\n loss = self.loss_fun_mh(hhat_vec,h)\n print('The loss over all batches are {}'.format(loss))\n loss_epochs_train[epoch] += torch.Tensor.item(loss)\n sys.stdout.write('\\r(%d, %3d) minibatch loss: %5.4f '%(epoch,i,torch.Tensor.item(loss)))\n \n # sets the gradients to zero, performs a backward pass, and updates the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # tests on validation set\n print('This is validation stage')\n self.model.eval() # evaluation mode\n #self.last_layer.eval() # evaluation mode\n loss_current_val = 0\n for minibatch in self.val_loader:\n [names, x_true, x_blurred, h, noise_std, estimatedimage_vec, diagSigma_vec, h_vec, Ch_vec, gamma_vec, lambda_vec] = minibatch # gets the minibatch\n h_vec = h_vec.type(self.dtype)\n Ch_vec = Ch_vec.type(self.dtype)\n if names =='.ipynb_checkpoints': continue\n print('The name is ',names) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n SNR_init = 0\n SNR_temp = 0\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,estimatedimage_vec,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode,block=block) \n hhat_vec =self.T_vec@newmh_vec+self.t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))\n for j in range(batch):\n print('This is batch {}'.format(j))\n SNR_temp = ComputeSNR(x_true[j,:,:],mk_vec[j,:,:])\n SNR_init = ComputeSNR(x_true[j,:,:],x_blurred[j,:,:])\n print('The initial SNR is {}'.format(SNR_init))\n print('The current SNR is {}'.format(SNR_temp))\n loss_ = self.loss_fun_mh(hhat_vec[j,:,:],h[j,:,:])\n print('The RMSE is {}'.format(loss_))\n loss = self.loss_fun_mh(hhat_vec,h)\n print('The loss over all batches are {}'.format(loss))\n loss_epochs_val[epoch] += torch.Tensor.item(loss)\n # computes loss on validation set\n loss_current_val += torch.Tensor.item(self.loss_fun_mh(hhat_vec, h))\n\n if loss_min_val>loss_current_val:\n torch.save(self.model.state_dict(),os.path.join(folder,'trained_model_MinLossOnVal1.pt'))\n loss_min_val = loss_current_val\n\n # save the results for each epoch\n folder_results_train = os.path.join(folder_save,'block'+str(block),'train')\n # create the path if it does not exist \n if not os.path.exists(folder_results_train):\n os.makedirs(folder_results_train)\n with open(folder_results_train+'/loss_epoch_train.txt', \"a\") as file_object:\n if epoch == 0:\n file_object.write('------------------A new test-------------------------------')\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_train[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n folder_results_val = os.path.join(folder_save,'block'+str(block),'val')\n # create the path if it does not exist \n if not os.path.exists(folder_results_val):\n os.makedirs(folder_results_val)\n with open(folder_results_val+'/loss_epoch_val.txt', \"a\") as file_object:\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_val[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n #==========================================================================================================\n # training is finished\n print('-----------------------------------------------------------------')\n print('Training of Block {} is done.'.format(block))\n self.save_OneBlock(block=block)\n print('-----------------------------------------------------------------')\n \n #calls the same function to start training of next block \n if block==self.nb_blocks-1:\n self.mode = 'lpp'\n self.train()\n else: \n self.train(block=block+1)\n\n#===========================================================================================================\n \n elif self.mode=='lpp':\n # trains the post-processing layer for RGB images\n print('start the post-processing layer for RGB images')\n # to store results\n loss_epochs_train = np.zeros(self.nb_epochs[2])\n loss_epochs_val = np.zeros(self.nb_epochs[2])\n loss_min_val = float('Inf')\n self.CreateFolders(self.nb_blocks)\n folder = os.path.join(self.path_save,'lpp')\n self.CreateLoader(block=self.nb_blocks)\n # puts first blocks in evaluation mode: gradient is not computed\n self.model.GradFalse(self.nb_blocks,self.mode) \n # defines the optimizer\n lr = self.lr_lpp\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,self.last_layer.parameters()),lr=lr)\n \n #==============================================================================================\n # trains for several epochs\n for epoch in range(0,self.nb_epochs[2]):\n self.model.eval() \n self.last_layer.train() #training mode\n gc.collect()\n # goes through all minibatches\n for i,minibatch in enumerate(self.train_loader,0):\n [names, x_true, x_blurred, h, noise_std, estimatedimage_vec, diagSigma_vec, h_vec, Ch_vec, gamma_vec, lambda_vec] = minibatch # gets the minibatch\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n print(x_true.shape)\n estimatedimage_vec_ = estimatedimage_vec.reshape(batch,1,sizex,sizex)\n x_true_ = torch.zeros((batch,3,sizex,sizex)).type(self.dtype)\n for channel in range(3):\n x_true_[:,channel,:,:] = torch.tensor(x_true[:,:,:,channel])\n x_blurred_ = x_blurred.reshape(batch,3,sizex,sizex)\n h_vec = h_vec.type(self.dtype)\n Ch_vec = Ch_vec.type(self.dtype)\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n # add the post-processing on RGB images (3 channels)\n U_new,V_new = RGBtoYUV(x_blurred,3,self.dtype)\n RGB_new = YUVtoRGB(estimatedimage_vec_,U_new,V_new,self.dtype)\n mk_vec = self.sigmoid(RGB_new + self.last_layer(RGB_new)) \n SNR_temp = np.zeros((3))\n SNR_init = np.zeros((3))\n for j in range(batch):\n print('This is batch {}'.format(j))\n for c in range(3): # 3 channels\n SNR_temp[c] = ComputeSNR(x_true[j,:,:,c],mk_vec[j,c,:,:])\n SNR_init[c] = ComputeSNR(x_true[j,:,:,c],x_blurred[j,:,:,c])\n print('The initial SNR is {}'.format(np.mean(SNR_init)))\n print('The current SNR is {}'.format(np.mean(SNR_temp)))\n # Computes and prints loss\n loss = self.loss_fun_mk(mk_vec, x_true_)\n print('The SSIM over all batches are {}'.format(-loss))\n loss_epochs_train[epoch] += torch.Tensor.item(loss)\n sys.stdout.write('\\r(%d, %3d) minibatch loss: %5.4f '%(epoch,i,torch.Tensor.item(-loss)))\n\n # sets the gradients to zero, performs a backward pass, and updates the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n # tests on validation set\n self.model.eval() # evaluation mode\n self.last_layer.eval() # evaluation mode\n \n loss_current_val = 0\n for minibatch in self.val_loader:\n [names, x_true, x_blurred, h, noise_std, estimatedimage_vec, diagSigma_vec, h_vec, Ch_vec, gamma_vec, lambda_vec] = minibatch # gets the minibatch\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n estimatedimage_vec_ = estimatedimage_vec.reshape(batch,1,sizex,sizex)\n x_true_ = torch.zeros((batch,3,sizex,sizex)).type(self.dtype)\n for channel in range(3):\n x_true_[:,channel,:,:] = torch.tensor(x_true[:,:,:,channel])\n x_blurred_ = x_blurred.reshape(batch,3,sizex,sizex)\n h_vec = h_vec.type(self.dtype)\n Ch_vec = Ch_vec.type(self.dtype)\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n # add the post-processing on RGB images (3 channels)\n U_new,V_new = RGBtoYUV(x_blurred,3,self.dtype)\n RGB_new = YUVtoRGB(estimatedimage_vec_,U_new,V_new,self.dtype)\n mk_vec = self.sigmoid(RGB_new + self.last_layer(RGB_new))\n SNR_temp = np.zeros((3))\n SNR_init = np.zeros((3))\n for j in range(batch):\n print('This is batch {}'.format(j))\n for c in range(3): # 3 channels\n SNR_temp[c] = ComputeSNR(x_true[j,:,:,c],mk_vec[j,c,:,:])\n SNR_init[c] = ComputeSNR(x_true[j,:,:,c],x_blurred[j,:,:,c])\n print('The initial SNR is {}'.format(np.mean(SNR_init)))\n print('The current SNR is {}'.format(np.mean(SNR_temp)))\n # Computes and prints loss\n loss = self.loss_fun_mk(mk_vec, x_true_)\n print('The SSIM over all batches are {}'.format(-loss))\n loss_epochs_val[epoch] += torch.Tensor.item(loss)\n loss_current_val += torch.Tensor.item(loss)\n \n\n if loss_min_val>loss_current_val:\n torch.save(self.last_layer.state_dict(),os.path.join(folder,'trained_post-processing_MinLossOnVal.pt'))\n loss_min_val = loss_current_val\n \n # save the results for each epoch\n folder_results_train = os.path.join(folder_save,'lpp','train')\n # create the path if it does not exist \n if not os.path.exists(folder_results_train):\n os.makedirs(folder_results_train) \n with open(folder_results_train+'/SSIM_epoch_train_new.txt', \"a\") as file_object:\n if epoch == 0:\n file_object.write('------------------A new test-------------------------------')\n file_object.write('The loss for epoch {} is {}'.format(epoch,-loss_epochs_train[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n folder_results_val = os.path.join(folder_save,'lpp','val')\n # create the path if it does not exist \n if not os.path.exists(folder_results_val):\n os.makedirs(folder_results_val)\n with open(folder_results_val+'SSIM_epoch_val_new.txt', \"a\") as file_object:\n file_object.write('The loss for epoch {} is {}'.format(epoch,-loss_epochs_val[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n \n \n \n # training of greedy approach is finished\n print('-----------------------------------------------------------------')\n print('Training of lpp is done.')\n print('-----------------------------------------------------------------')\n return \n \n############################################################################################################## \n\n elif self.mode=='all_layers':\n # start the N-N training\n # trains several blocks as one\n print('=================== Block number {} to Block number {} ==================='.format(0,self.nb_blocks-1))\n # to store results\n loss_epochs_train = np.zeros(self.nb_epochs[1])\n loss_epochs_val = np.zeros(self.nb_epochs[1])\n loss_min_val = float('Inf')\n self.CreateFolders(self.nb_blocks-1)\n folder = os.path.join(self.path_save,'block_'+str(0)+'_'+str(self.nb_blocks-1))\n self.CreateLoader(0)\n # puts first blocks in evaluation mode: gradient is not computed\n self.model.GradFalse(self.nb_blocks,self.mode)\n # defines the optimizer\n lr = self.lr_N_N #learnig rate\n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,self.parameters()),lr=lr,weight_decay=1e-4)\n\n# ==========================================================================================================\n #for the first layer\n # trains for several epochs\n for epoch in range(0,self.nb_epochs[1]): \n print('This is epoch {} '.format(epoch))\n # sets training mode\n for k in range(0,self.nb_blocks):\n self.model.Layers[k].train() #training mode\n gc.collect()\n # goes through all minibatches\n print('This is traning stage')\n for i,minibatch in enumerate(self.train_loader,0):\n [names, x_true_RGB, x_true, x_blurred_RGB, x_blurred, h] = minibatch # get the minibatch\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true_RGB = Variable(x_true_RGB.type(self.dtype),requires_grad=False)\n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred_RGB = Variable(x_blurred_RGB.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n x_true_ = torch.zeros((batch,3,sizex,sizex)).type(self.dtype)\n for channel in range(3):\n x_true_[:,channel,:,:] = torch.tensor(x_true_RGB[:,:,:,channel])\n SNR_init = 0\n SNR_temp = 0\n init = Initialization(batch,sizex,sizeh,self.dtype)\n T_vec,t,h_vec,Ch_vec,gamma_vec,lambda_vec = init.f(x_blurred,h)\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,x_blurred,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode) \n estimatedimage_vec_ = mk_vec.reshape(batch,1,sizex,sizex)\n hhat_vec=T_vec@newmh_vec+t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))#the restored kernel\n # lpp \n U_new,V_new = RGBtoYUV(x_blurred_RGB,3,self.dtype)\n RGB_new = YUVtoRGB(estimatedimage_vec_,U_new,V_new,self.dtype)\n mk_vec_RGB = self.sigmoid(RGB_new + self.last_layer(RGB_new)) \n SNR_temp = np.zeros((3))\n SNR_init = np.zeros((3))\n for j in range(batch):\n print('This is batch {}'.format(j))\n for c in range(3): # 3 channels\n SNR_temp[c] = ComputeSNR(x_true_RGB[j,:,:,c],mk_vec_RGB[j,c,:,:])\n SNR_init[c] = ComputeSNR(x_true_RGB[j,:,:,c],x_blurred_RGB[j,:,:,c])\n print('The initial SNR is {}'.format(np.mean(SNR_init)))\n print('The current SNR is {}'.format(np.mean(SNR_temp)))\n # Computes and prints loss\n loss = self.loss_fun_mk(mk_vec_RGB, x_true_)\n print('The SSIM over all batches are {}'.format(-loss))\n loss_epochs_train[epoch] += torch.Tensor.item(loss)\n sys.stdout.write('\\r(%d, %3d) minibatch loss: %5.4f '%(epoch,i,torch.Tensor.item(-loss)))\n \n # sets the gradients to zero, performs a backward pass, and updates the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() #performs a parameter update\n \n # tests on validation set\n print('This is validation stage')\n self.model.eval() # evaluation mode\n self.last_layer.eval() # evaluation mode\n loss_current_val = 0\n for minibatch in self.val_loader:\n [names, x_true_RGB, x_true, x_blurred_RGB, x_blurred, h] = minibatch # gets the minibatch\n if names =='.ipynb_checkpoints': continue\n print('The name is {} '.format(names)) \n x_true_RGB = Variable(x_true_RGB.type(self.dtype),requires_grad=False)\n x_true = Variable(x_true.type(self.dtype),requires_grad=False)\n x_blurred_RGB = Variable(x_blurred_RGB.type(self.dtype),requires_grad=False)\n x_blurred = Variable(x_blurred.type(self.dtype),requires_grad=False)\n h = Variable(h.type(self.dtype),requires_grad=False)\n batch = x_true.shape[0]\n sizex = x_true.shape[1]\n sizeh = h.shape[1]\n x_true_ = torch.zeros((batch,3,sizex,sizex)).type(self.dtype)\n for channel in range(3):\n x_true_[:,channel,:,:] = torch.tensor(x_true_RGB[:,:,:,channel])\n SNR_init = 0\n SNR_temp = 0\n init = Initialization(batch,sizex,sizeh,self.dtype)\n T_vec,t,h_vec,Ch_vec,gamma_vec,lambda_vec = init.f(x_blurred,h)\n mk_vec,diagSigma_vec,newmh_vec,newSigmah_vec,Gammap_vec,LAMBDAk_vec = self.model(x_blurred,x_true,x_blurred,h,self.noise_std_range[0]**2,h_vec,Ch_vec,gamma_vec,lambda_vec,self.mode) \n estimatedimage_vec_ = mk_vec.reshape(batch,1,sizex,sizex)\n hhat_vec=T_vec@newmh_vec+t\n hhat_vec = torch.reshape(hhat_vec,(batch,sizeh,sizeh))\n # lpp \n U_new,V_new = RGBtoYUV(x_blurred_RGB,3,self.dtype)\n RGB_new = YUVtoRGB(estimatedimage_vec_,U_new,V_new,self.dtype)\n mk_vec_RGB = self.sigmoid(RGB_new + self.last_layer(RGB_new)) \n SNR_temp = np.zeros((3))\n SNR_init = np.zeros((3))\n for j in range(batch):\n print('This is batch {}'.format(j))\n for c in range(3): # 3 channels\n SNR_temp[c] = ComputeSNR(x_true_RGB[j,:,:,c],mk_vec_RGB[j,c,:,:])\n SNR_init[c] = ComputeSNR(x_true_RGB[j,:,:,c],x_blurred_RGB[j,:,:,c])\n print('The initial SNR is {}'.format(np.mean(SNR_init)))\n print('The current SNR is {}'.format(np.mean(SNR_temp)))\n # Computes and prints loss\n loss = self.loss_fun_mk(mk_vec_RGB, x_true_)\n print('The SSIM over all batches are {}'.format(-loss))\n loss_current_val += torch.Tensor.item(loss)\n loss_epochs_val[epoch] += torch.Tensor.item(loss)\n\n if loss_min_val>loss_current_val:\n torch.save(self.last_layer.state_dict(),os.path.join(folder,'trained_post-processing_MinLossOnVal.pt'))\n torch.save(self.model.state_dict(),os.path.join(folder,'trained_model_MinLossOnVal.pt'))\n loss_min_val = loss_current_val\n\n # save the results for each epoch\n folder_results_train = os.path.join(folder_save,'block0_'+str(self.nb_blocks-1),'train')\n # create the path if it does not exist \n if not os.path.exists(folder_results_train):\n os.makedirs(folder_results_train)\n with open(folder_results_train+'/loss_epoch_train.txt', \"a\") as file_object:\n if epoch == 0:\n file_object.write('------------------A new test-------------------------------')\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_train[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n folder_results_val = os.path.join(folder_save,'block0_'+str(self.nb_blocks-1),'val')\n # create the path if it does not exist \n if not os.path.exists(folder_results_val):\n os.makedirs(folder_results_val)\n with open(folder_results_val+'/loss_epoch_val.txt', \"a\") as file_object:\n file_object.write('The loss for epoch {} is {}'.format(epoch,loss_epochs_val[epoch]))\n file_object.write(\"\\n\")\n file_object.write('----------------------------------------------------------')\n file_object.write(\"\\n\")\n \n #==========================================================================================================\n # training is finished\n print('-----------------------------------------------------------------')\n print('Training of Block {} to Block {} + lpp is done.'.format(0,self.nb_blocks-1))\n print('-----------------------------------------------------------------')", "def train(self,path,mode):\n if mode == \"porto\":\n self.prepare_data(path)\n else:\n self.prepare_sumo_data(path)\n self.poly_regression()", "def train():\r\n print('Loading and compiling models...')\r\n model_systole = get_model()\r\n model_diastole = get_model()\r\n\r\n # load the preprocessed data with the heart cut-out\r\n print('Loading data...')\r\n X_train, scaling_train, ids_train, y_train = load_train_data()\r\n X_test, scaling_test, ids_test, y_test = load_test_data()\r\n\r\n nb_iter = 200 # a higher number seems to give rise to overfitting\r\n epochs_per_iter = 3 # reduces overfitting\r\n batch_size = 32 # not tuned - potential improvement\r\n calc_crps = 2 # calculate CRPS every n-th iteration (set to 0 if CRPS estimation is not needed)\r\n\r\n # remember min val. losses (best iterations), used as sigmas for submission\r\n min_val_loss_systole = sys.float_info.max\r\n min_val_loss_diastole = sys.float_info.max\r\n\r\n print('-'*50)\r\n print('Training...')\r\n print('-'*50)\r\n\r\n for i in range(nb_iter):\r\n print('-'*50)\r\n print('Iteration {0}/{1}'.format(i + 1, nb_iter))\r\n print('-'*50)\r\n\r\n # augment data to make up for low number of samples\r\n print('Augmenting images - rotations')\r\n X_train_aug = rotation_augmentation(X_train, 15)\r\n print('Augmenting images - shifts')\r\n X_train_aug = shift_augmentation(X_train_aug, 0.1, 0.1)\r\n\r\n print('Fitting systole model...')\r\n hist_systole = model_systole.fit([X_train_aug, scaling_train], y_train[:, 0], shuffle=True, nb_epoch=epochs_per_iter,\r\n batch_size=batch_size, validation_data=([X_test, scaling_test], y_test[:, 0]))\r\n\r\n print('Fitting diastole model...')\r\n hist_diastole = model_diastole.fit([X_train_aug, scaling_train], y_train[:, 1], shuffle=True, nb_epoch=epochs_per_iter,\r\n batch_size=batch_size, validation_data=([X_test, scaling_test], y_test[:, 1]))\r\n\r\n # sigmas for predicted data, actually loss function values (RMSE)\r\n loss_systole = hist_systole.history['loss'][-1]\r\n loss_diastole = hist_diastole.history['loss'][-1]\r\n val_loss_systole = hist_systole.history['val_loss'][-1]\r\n val_loss_diastole = hist_diastole.history['val_loss'][-1]\r\n\r\n if calc_crps > 0 and i % calc_crps == 0:\r\n print('Evaluating CRPS...')\r\n pred_systole = model_systole.predict([X_train, scaling_train], batch_size=batch_size, verbose=1)\r\n pred_diastole = model_diastole.predict([X_train, scaling_train], batch_size=batch_size, verbose=1)\r\n val_pred_systole = model_systole.predict([X_test, scaling_test], batch_size=batch_size, verbose=1)\r\n val_pred_diastole = model_diastole.predict([X_test, scaling_test], batch_size=batch_size, verbose=1)\r\n\r\n # CDF for train and test data (actually a step function)\r\n cdf_train = real_to_cdf(np.concatenate((y_train[:, 0], y_train[:, 1])))\r\n cdf_test = real_to_cdf(np.concatenate((y_test[:, 0], y_test[:, 1])))\r\n\r\n # CDF for predicted data\r\n cdf_pred_systole = real_to_cdf(pred_systole, loss_systole)\r\n cdf_pred_diastole = real_to_cdf(pred_diastole, loss_diastole)\r\n cdf_val_pred_systole = real_to_cdf(val_pred_systole, val_loss_systole)\r\n cdf_val_pred_diastole = real_to_cdf(val_pred_diastole, val_loss_diastole)\r\n\r\n # evaluate CRPS on training data\r\n crps_train = crps(cdf_train, np.concatenate((cdf_pred_systole, cdf_pred_diastole)))\r\n print('CRPS(train) = {0}'.format(crps_train))\r\n\r\n # evaluate CRPS on test data\r\n crps_test = crps(cdf_test, np.concatenate((cdf_val_pred_systole, cdf_val_pred_diastole)))\r\n print('CRPS(test) = {0}'.format(crps_test))\r\n\r\n print('Saving weights...')\r\n # save weights so they can be loaded later\r\n model_systole.save_weights('weights_systole.hdf5', overwrite=True)\r\n model_diastole.save_weights('weights_diastole.hdf5', overwrite=True)\r\n\r\n # for best (lowest) val losses, save weights\r\n if val_loss_systole < min_val_loss_systole:\r\n min_val_loss_systole = val_loss_systole\r\n model_systole.save_weights('weights_systole_best.hdf5', overwrite=True)\r\n\r\n if val_loss_diastole < min_val_loss_diastole:\r\n min_val_loss_diastole = val_loss_diastole\r\n model_diastole.save_weights('weights_diastole_best.hdf5', overwrite=True)\r\n\r\n # save best (lowest) val losses in file (to be later used for generating submission)\r\n with open('val_loss.txt', mode='w+') as f:\r\n f.write(str(min_val_loss_systole))\r\n f.write('\\n')\r\n f.write(str(min_val_loss_diastole))", "def train_one_epoch(self):\n raise NotImplementedError", "def train(self, batch):\n pass", "def train(self, mode=True):\n super(CRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def train_naive(): # add arguments as needed\n pass", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def _run_cycle(self):\n pass", "def trainer(model, X_train, y_train, X_valid, y_valid, config):\n # loop for number of epochs\n # shuffle inputs based off seed\n # need to shuffle validation based off same seed\n # forward prop and get xenloss\n # backprop and update weights\n\n stop_count = config['early_stop_epoch']\n b_size = config[\"batch_size\"]\n stop = config['early_stop']\n\n xnloss = []\n val_loss = [float('inf')]\n test_scores = []\n\n train_accu = []\n valid_accu = []\n\n\n #validation loss increase per epoch counter\n c = -1\n \n for i in range(config[\"epochs\"]):\n np.random.seed(i)\n np.random.shuffle(X_train)\n\n np.random.seed(i)\n np.random.shuffle(y_train)\n\n '''You should average the loss across all mini batches'''\n #means sum up loss from all mini-batches and divide by num_batches\n sums = 0\n\n num_batches = int(X_train.shape[0] / b_size)\n k=0\n for j in range(num_batches):\n # choose minibatch\n x = X_train[j * b_size: (j+1) * b_size]\n targets = y_train[j * b_size: (j+1) * b_size]\n loss, y_pred = model.forward_pass(x, targets)\n loss = loss / (config['batch_size'] * 10) # 10 classes\n sums += loss\n #xnloss.append(loss)\n model.backward_pass()\n k +=1\n # if k < 5 or k > 44:\n # print(targets[0, :])\n # print(y_pred[0, :])\n # print(y_pred[0, :].sum())\n # print(k, '=============')\n\n # mini-batch done here, take avg of loss\n avg_loss = sums / num_batches\n xnloss.append(avg_loss)\n \n ''' epochs loop continues here\n 0) perform validation and compute its (val) loss\n\n 1) calculate test accuracy for every epoch where the\n validation loss is better than the previous validation loss.\n \n 2) Save this result (test score OR loss?) and choose the best \n one when you hit the early stopping criteria.\n\n 3) early stopping - stop training (epochs loop) after 5th consecutive \n increase in validation loss. (Experiment with diff values).\n '''\n\n '''VALIDATION PERFORMACE'''\n v_loss, v_pred = model.forward_pass(X_valid, y_valid)\n v_loss_norm = v_loss / (len(X_valid) * 10)\n\n\n '''TEST ACCURACY''' \n #if val loss better (less) than prev: calculate test scores\n \n if v_loss_norm > val_loss[-1]:\n print(\"val loss going up from last time at epoch i=\", i)\n c += 1\n else:\n c = 0\n '''insert code for test accu here'''\n # val_loss.append(v_loss_norm)\n # else: #else val loss increased, so increment counter\n \n val_loss.append(v_loss_norm)\n \n '''EARLY STOPPING'''\n if stop and c == stop_count:\n print(\"early stopped at epoch =\", i+1)\n break\n\n print(val_loss[1:3])\n print(val_loss, len(xnloss), len(val_loss[1:]))\n #outside of epochs loop\n plt.plot(xnloss, label='training loss')\n plt.plot(val_loss[1:], label='validation loss')\n plt.title(\"losses across all epochs\")\n plt.xlabel(\"epochs\")\n plt.ylabel(\"avg loss for the epoch\")\n plt.legend()\n plt.savefig('raised_a.png')\n plt.show()\n #firstplot.png is training loss against # of batches, in 1 epoch\n #avgacrossepochs.png is avg training loss of all batches, across 50 epochs\n # both_losses = []\n \n # for i in range(len(xnloss)):\n # both_losses.append((val_loss[i], xnloss[i]))\n # print(\"validation errors: \", [(val_loss[i], xnloss[i]) for i in range(len(xnloss))])", "def train(self):\n learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step, self.decay_steps,self.decay_rate, staircase=True)\n self.learning_rate_=learning_rate\n #noise_std_dev = tf.constant(0.3) / (tf.sqrt(tf.cast(tf.constant(1) + self.global_step, tf.float32))) #gradient_noise_scale=noise_std_dev\n train_op = tf_contrib.layers.optimize_loss(self.loss_val, global_step=self.global_step,\n learning_rate=learning_rate, optimizer=\"Adam\",clip_gradients=self.clip_gradients)\n return train_op", "def train(self, target):\n\n n_it = 0\n\n # print(\"\\nsomme_erreure maximale (calcul initial) = \" + str(somme_erreur_av)+'\\n')\n\n while self.training:\n\n L_erreures_normalisees_base_training = []\n L_erreures_normalisees_base_test = []\n\n # on entraine le reseau en passant 100 fois la base d'exemple\n for k in range(100):\n\n # calcul de l'erreur cummulée sur toute la base d'exemple\n somme_erreur = [0, 0] # pour le gradient\n somme_erreur_carré = [0, 0]\n somme_erreur_carré_test = [0, 0]\n\n for i in range(len(sample_position)):\n command = self.network.runNN(\n sample_position[i]) # propage erreur et calcule la vitesse des roues instant t\n erreur = [(command[0] - sample_command[i][0]), (command[1] - sample_command[i][1])]\n somme_erreur[0] += erreur[0]\n somme_erreur[1] += erreur[1]\n\n somme_erreur_carré[0] += erreur[0] ** 2\n somme_erreur_carré[1] += erreur[1] ** 2\n\n # self.network.backPropagate(erreur, 0.0001, 0) ne marche pas bien\n\n # ajout de l'erreure au carré normaliséé par la taille de la base d'apprentissage et moyenné sur les 2 roues\n L_erreures_normalisees_base_training.append((somme_erreur_carré[0] / (1 * len(sample_position)) +\n somme_erreur_carré[1] / (1 * len(sample_position))) / 2)\n\n # print(\"A l'itération \" + str(n_it) + \", somme_erreur_carré = \"+str(somme_erreur_carré))\n\n grad = [0, 0]\n grad[0] = somme_erreur[0] / (1 * len(sample_position)) # erreur moyenne\n grad[1] = somme_erreur[1] / (1 * len(sample_position))\n\n self.network.backPropagate(grad, 0.005, 0)\n # grad, pas d'app, moment : permet de lisser la trajectoire\n\n n_it += 1\n\n # Fin de l'itération pour la base d'apprentissage, début pour la base de test\n for i in range(len(test_position)):\n command = self.network.runNN(\n test_position[i]) # propage erreur et calcule la vitesse des roues instant t\n erreur_test = [(command[0] - test_command[i][0]), (command[1] - test_command[i][1])]\n somme_erreur_carré_test[0] += erreur_test[0] ** 2\n somme_erreur_carré_test[1] += erreur_test[1] ** 2\n\n L_erreures_normalisees_base_test.append((somme_erreur_carré_test[0] / (1 * len(test_position)) +\n somme_erreur_carré_test[1] / (1 * len(test_position))) / 2)\n\n # Tracé des courbes\n pl.clf()\n X = [i + 1 for i in range(len(L_erreures_normalisees_base_training))]\n # X2 = [i + 1 for i in range(len(L_erreures_normalisees_base_training))]\n\n pl.plot(X, L_erreures_normalisees_base_training, 'r+')\n pl.plot(X, L_erreures_normalisees_base_test, 'bo')\n\n # base d'entrainement affichée avec des croix rouges\n # base de test affichée avec des ronds bleu\n\n pl.show()\n\n self.training = False\n\n # print(\"\\nsomme_erreur finale = [\" + str(somme_erreur[0]) + \",\" + str(somme_erreur[1]) + \"]\")\n print(\"\\nTraining done after \" + str(n_it) + \" iterations !\")\n\n # version avec arret quand l'erreure augmente\n # if (somme_erreur[0]+somme_erreur[1]) < (somme_erreur_av[0]+somme_erreur_av[1]) :\n # self.network.backPropagate(grad, 0.9,0) # grad, pas d'app, moment : permet de lisser la trajectoire\n # somme_erreur_av = somme_erreur\n # n_it+=1\n ##print(\"n_it = \"+ str(n_it)+\"\\n\")\n\n # else :\n # self.training = False\n # print(\"Training done after \" + str(n_it) +\" iterations !\")\n\n while self.running:\n position = self.robot.get_position()\n\n network_input = [0, 0, 0]\n\n # calcul de la position relative de la cible dans le referentiel du robot\n network_input[0] = ((target[0] - position[0]) * math.cos(position[2]) + (\n target[1] - position[1]) * math.sin(position[2])) * self.alpha[0]\n network_input[1] = ((target[0] - position[0]) * (-1) * math.sin(position[2]) + (\n target[1] - position[1]) * math.cos(position[2])) * self.alpha[1]\n network_input[2] = (-1) * (position[2] - target[2]) * self.alpha[2]\n\n command = self.network.runNN(network_input) # propage erreur et calcul vitesses roues instant t\n print(\"command =\" + str(command))\n\n # dénormaliser la commande\n command = M * command\n\n self.robot.set_motor_velocity(command) # applique vitesses roues instant t,\n time.sleep(0.050) # attend delta t\n\n self.robot.set_motor_velocity([0, 0]) # stop apres arret du prog d'app\n # position = self.robot.get_position() # obtient nvlle pos robot instant t+1\n # Teta_t=position[2]\n\n self.running = False", "def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()", "def train(self, mode=True):\n super(RCRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def train(model, x_train, y_train, x_valid, y_valid, config):\n \n epochs = config['epochs']\n threshold = config['early_stop_epoch']\n alpha = config['learning_rate']\n# val_loss = 10000*np.ones((epochs,1))\n beta = config['momentum_gamma']\n batch_size = config['batch_size']\n \n N = x_train.shape[0]\n num_batches = int((N+batch_size -1 )/ batch_size)\n \n best_weight = []\n best_epoch = []\n best_bias = []\n #print(len(model.layers))\n train_loss_list = []\n \n train_acc_list = []\n val_acc_list = []\n val_loss_list = []\n \n counter = 0\n \n lam = 0.0001\n \n \n for i in range(1, epochs+1):\n shuffled_indices = np.random.permutation(range(N))\n \n for batch in range(num_batches):\n minibatch_indices = shuffled_indices[batch_size*batch:min(batch_size*(batch+1), N)]\n #print(len(minibatch_indices))\n xbatch = x_train[minibatch_indices, :]\n ybatch = y_train[minibatch_indices, :]\n #print(ybatch.shape)\n y, loss = model(xbatch, ybatch)\n \n model.backward() \n #weight update and storing\n for k in range(0, len(config['layer_specs']), 2):\n mom_w = -model.layers[k].d_v_w * beta + alpha*(model.layers[k].d_w + lam*model.layers[k].w )\n mom_b = -model.layers[k].d_v_b * beta + alpha*(model.layers[k].d_b + lam*model.layers[k].b )\n model.layers[k].w = model.layers[k].w - (mom_w )\n model.layers[k].b = model.layers[k].b - (mom_b )\n model.layers[k].d_v_w = -mom_w\n model.layers[k].d_v_b = -mom_b \n\n y, loss = model(x_train, y_train) \n train_loss_list.append(loss)\n \n train_pred = np.argmax(y, axis=1) \n acc = np.mean(np.argwhere(y_train==1)[:,1]==train_pred) \n \n train_acc_list.append(acc)\n \n \n #print(\"Training acc for epoch \", i, \" is:\\n\", acc) \n #print(\"Training loss for epoch \", i, \" is:\\n\", loss) \n val_y, val_loss = model(x_valid, y_valid)\n val_loss_list.append(val_loss)\n\n val_pred = np.argmax(val_y, axis=1) \n acc = np.mean(np.argwhere(y_valid==1)[:,1]==val_pred) \n val_acc_list.append(acc)\n \n #print(\"Validation acc for epoch \", i, \" is:\\n\", acc) \n #print(\"Validation loss for epoch \", i, \" is:\\n\", val_loss)\n if(i>1 and val_loss <min(val_loss_list[:-1])):\n #update best weights\n counter = 0\n weight = []\n bias = []\n for k in range(0, len(config['layer_specs']), 2):\n weight.append(model.layers[k].w)\n bias.append(model.layers[k].b)\n best_weight = weight \n best_bias = bias\n best_epoch = i\n else:\n counter +=1\n \n if counter > threshold:\n print(\"best epoch:\", best_epoch)\n break\n\n# if(i>=6 and val_loss[i-1]>=val_loss[i-2] and val_loss[i-2]>=val_loss[i-3]and val_loss[i-3]>=val_loss[i-4]and val_loss[i-4]>=val_loss[i-5]and val_loss[i-5]>=val_loss[i-6]):\n# break\n \n print(len(best_weight))\n print('Epoch: ', i)\n #print(val_loss)\n p = 0\n for k in range(0, len(config['layer_specs']), 2):\n model.layers[k].w = best_weight[p]\n model.layers[k].b = best_bias[p]\n p = p + 1\n \n return train_loss_list, val_loss_list, train_acc_list, val_acc_list\n raise NotImplementedError(\"Train method not implemented\")", "def train(self, num_batches: int):", "def train(self):\n # self.recognizer.train()\n self.detector.train()\n self.shared_conv.train()", "def __init__(self, num_steps, model_load_path, num_test_rec):\n\n self.global_step = 0\n self.num_steps = num_steps\n self.num_test_rec = num_test_rec\n\n self.sess = tf.Session()\n self.summary_writer = tf.train.SummaryWriter(c.SUMMARY_SAVE_DIR, graph=self.sess.graph)\n\n if c.ADVERSARIAL:\n print 'Init discriminator...'\n self.d_model = DiscriminatorModel(self.sess,\n self.summary_writer,\n c.TRAIN_HEIGHT,\n c.TRAIN_WIDTH,\n c.SCALE_CONV_FMS_D,\n c.SCALE_KERNEL_SIZES_D,\n c.SCALE_FC_LAYER_SIZES_D)\n\n print 'Init generator...'\n self.g_model = GeneratorModel(self.sess,\n self.summary_writer,\n c.TRAIN_HEIGHT,\n c.TRAIN_WIDTH,\n c.FULL_HEIGHT,\n c.FULL_WIDTH,\n c.SCALE_FMS_G,\n c.SCALE_KERNEL_SIZES_G)\n\n print 'Init variables...'\n self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=2)\n self.sess.run(tf.global_variables_initializer())\n\n # if load path specified, load a saved model\n if model_load_path is not None:\n self.saver.restore(self.sess, model_load_path)\n print 'Model restored from ' + model_load_path", "def train(self) -> Any:\n pass", "def test_training(self):\n\t\tpass", "def train(self):\r\n print(\"Starting training now\")\r\n cuda = True if torch.cuda.is_available() else False\r\n if cuda:\r\n self.model.cuda()\r\n\r\n # Construct optimizer after the model moved to GPU\r\n self.optm = self.make_optimizer()\r\n self.lr_scheduler = self.make_lr_scheduler(self.optm)\r\n\r\n dim_x = self.flags.dim_x\r\n dim_y = self.flags.dim_y\r\n dim_z = self.flags.dim_z\r\n dim_tot = self.flags.dim_tot\r\n\r\n # Time keeping\r\n tk = time_keeper(time_keeping_file=os.path.join(self.ckpt_dir, 'training time.txt'))\r\n\r\n for epoch in range(self.flags.train_step):\r\n # Set to Training Mode\r\n train_loss = 0\r\n self.model.train()\r\n # If MMD on x-space is present from the start, the model can get stuck.\r\n # Instead, ramp it up exponetially.\r\n loss_factor = min(1., 2. * 0.002 ** (1. - (float(epoch) / self.flags.train_step)))\r\n\r\n for j, (x, y) in enumerate(self.train_loader):\r\n batch_size = len(x)\r\n\r\n ######################\r\n # Preparing the data #\r\n ######################\r\n # Pad the x, y with zero_noise\r\n y_clean = y.clone() # keep a copy of y for backward\r\n x_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_x)\r\n y_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_y - dim_z)\r\n z = torch.randn(batch_size, dim_z)\r\n if cuda:\r\n x = x.cuda() # Put data onto GPU\r\n y = y.cuda() # Put data onto GPU\r\n x_pad = x_pad.cuda()\r\n y_pad = y_pad.cuda()\r\n y_clean = y_clean.cuda()\r\n z = z.cuda()\r\n\r\n # Concate the x and y with pads and add y with small purtubation\r\n y += self.flags.y_noise_scale * torch.randn(batch_size, dim_y, device=device)\r\n\r\n x, y = torch.cat((x, x_pad), dim=1), torch.cat((z, y_pad, y), dim=1)\r\n\r\n ################\r\n # Forward step #\r\n ################\r\n self.optm.zero_grad() # Zero the gradient first\r\n ypred = self.model(x) # Get the Ypred\r\n\r\n\r\n # Do the MSE loss for reconstruction, Doesn't compare z part (only pad and y itself)\r\n MSE_loss_y = self.make_loss(logit=ypred[:, dim_z:], labels=y[:, dim_z:])\r\n\r\n # Use the maximum likelihood method\r\n log_det = self.model.log_jacobian(x=x)\r\n #print(\"The log determinant is\", log_det)\r\n Forward_loss = 0.5 * (MSE_loss_y / self.flags.lambda_mse + torch.mean(torch.pow(z,2))) - torch.mean(log_det)\r\n Forward_loss.backward()\r\n\r\n ######################\r\n # Gradient Clipping #\r\n ######################\r\n for parameter in self.model.parameters():\r\n parameter.grad.data.clamp_(-self.flags.grad_clamp, self.flags.grad_clamp)\r\n\r\n #########################\r\n # Descent your gradient #\r\n #########################\r\n self.optm.step() # Move one step the optimizer\r\n\r\n # MLE training\r\n train_loss += Forward_loss \r\n\r\n # Calculate the avg loss of training\r\n train_avg_loss = train_loss.cpu().data.numpy() / (j + 1)\r\n\r\n if epoch % self.flags.eval_step == 0: # For eval steps, do the evaluations and tensor board\r\n # Record the training loss to the tensorboard\r\n self.log.add_scalar('Loss/total_train', train_avg_loss, epoch)\r\n self.log.add_scalar('Loss/MSE_y_train', MSE_loss_y, epoch)\r\n\r\n # Set to Evaluation Mode\r\n self.model.eval()\r\n print(\"Doing Evaluation on the model now\")\r\n\r\n test_loss = 0\r\n for j, (x, y) in enumerate(self.test_loader): # Loop through the eval set\r\n batch_size = len(x)\r\n\r\n ######################\r\n # Preparing the data #\r\n ######################\r\n # Pad the x, y with zero_noise\r\n y_clean = y.clone() # keep a copy of y for backward\r\n x_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_x)\r\n y_pad = self.flags.zeros_noise_scale * torch.randn(batch_size,\r\n dim_tot - dim_y - dim_z)\r\n z = torch.randn(batch_size, dim_z)\r\n if cuda:\r\n x = x.cuda() # Put data onto GPU\r\n y = y.cuda() # Put data onto GPU\r\n x_pad = x_pad.cuda()\r\n y_pad = y_pad.cuda()\r\n y_clean = y_clean.cuda()\r\n z = z.cuda()\r\n\r\n # Concate the x and y with pads and add y with small purtubation\r\n y += self.flags.y_noise_scale * torch.randn(batch_size, dim_y, device=device)\r\n\r\n x, y = torch.cat((x, x_pad), dim=1), torch.cat((z, y_pad, y), dim=1)\r\n\r\n ################\r\n # Forward step #\r\n ################\r\n self.optm.zero_grad() # Zero the gradient first\r\n ypred = self.model(x) # Get the Ypred\r\n # Do the MSE loss for reconstruction, Doesn't compare z part (only pad and y itself)\r\n MSE_loss_y = self.make_loss(logit=ypred[:, dim_z:], labels=y[:, dim_z:])\r\n\r\n log_det = self.model.log_jacobian(x=x)\r\n #print(\"The log determinant is\", log_det)\r\n Forward_loss = 0.5 * (MSE_loss_y / self.flags.lambda_mse + torch.mean(torch.pow(z,2))) - torch.mean(log_det)\r\n test_loss += Forward_loss\r\n # Aggregate the other loss (in np form)\r\n\r\n # Record the testing loss to the tensorboard\r\n test_avg_loss = test_loss.cpu().data.numpy() / (j+1)\r\n\r\n self.log.add_scalar('Loss/total_test', test_avg_loss, epoch)\r\n self.log.add_scalar('Loss/MSE_y_test', MSE_loss_y, epoch)\r\n\r\n print(\"This is Epoch %d, training loss %.5f, validation loss %.5f\" \\\r\n % (epoch, train_avg_loss, test_avg_loss ))\r\n\r\n # Model improving, save the model down\r\n if test_avg_loss < self.best_validation_loss:\r\n self.best_validation_loss = train_avg_loss\r\n self.save()\r\n print(\"Saving the model down...\")\r\n\r\n if self.best_validation_loss < self.flags.stop_threshold:\r\n print(\"Training finished EARLIER at epoch %d, reaching loss of %.5f\" %\\\r\n (epoch, self.best_validation_loss))\r\n break\r\n\r\n # Learning rate decay upon plateau\r\n self.lr_scheduler.step(train_avg_loss)\r\n tk.record(1) # Record the total time of the training peroid\r", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def train(self):\n #learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step, self.decay_steps,self.decay_rate, staircase=True) #去掉decay_steps\n train_op = tf.contrib.layers.optimize_loss(self.losses, global_step=self.global_step, learning_rate=self.learning_rate, optimizer=\"Adam\")\n return train_op", "def training(self):\n self.training = True", "def train():\n import trace\n trace.train()", "def train_cavia(args, config, path_model, device):\n global DATA, PATH_BASE, PATH_DATA, N_CLS, NAME_DATA, NAME_BACKBONE\n\n if path_model is None:\n masks_dict = None\n learner = None\n else:\n print('Load net from', path_model)\n save_dict = torch.load(path_model)\n state_dict = save_dict['state_dict']\n masks_dict = save_dict.get('masks_dict', None)\n\n learner = Learner(config, args.num_context_params, args.context_in)\n learner.load_state_dict(state_dict)\n\n if masks_dict is None:\n print('masks_dict is None!')\n cr = 1.\n else:\n cr = get_cr(learner, masks_dict)\n\n cavia = Meta(args, config, learner).to(device)\n\n # save path\n if path_model is None:\n path_save = '%s/model/cavia_lobs/%s_%s/%d-way_%d-shot' % (\n PATH_BASE, NAME_DATA, NAME_BACKBONE, args.n_way, args.k_spt)\n else:\n path_save = '/'.join(path_model.split('/')[:-1])\n if not os.path.exists(path_save):\n os.makedirs(path_save)\n\n # meta train\n meta_train(cavia, args, masks_dict, device,\n output=args.log_print,\n save=args.save_each_epoch,\n save_threshold=args.save_threshold,\n test_each_epoch=args.test_each_epoch,\n path_save=path_save,\n cr=cr,\n break_threshold=args.break_threshold)\n\n if not args.save_each_epoch:\n # meta test\n acc_avg, accs = meta_test(cavia, args, masks_dict, device)\n\n path_model = '%s/net_cr-1_acc-%.4f.pkl' % (path_save, acc_avg)\n print('Save as %s' % path_model)\n torch.save(\n {\n 'state_dict': cavia.net.state_dict(),\n 'accs': accs,\n 'config': config,\n 'args': args\n },\n path_model\n )", "def train_classifier(data, n_iters=3, batch_size=100):\n tqdm.write(f'Training a dilated CNN classifier for {n_iters} iterations.')\n (trainx, trainy), (valx, valy), (testx, testy) = data\n train_size, val_size, test_size = trainx.shape[0], valx.shape[0], testx.shape[0]\n train_batches = (train_size - 1) // batch_size + 1\n val_batches = (val_size - 1) // batch_size + 1\n test_batches = (test_size - 1) // batch_size + 1\n\n model = Network()\n model.add_layer(ConvLayer(10, (3, 3), (1, 1), 2)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(Pad2DLayer((2, 2))) \\\n .add_layer(ConvLayer(10, (3, 3), (1, 1), 2)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(Pool2DLayer((2, 2))) \\\n .add_layer(ConvLayer(10, (3, 3), (1, 1), 2)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(Pool2DLayer((2, 2))) \\\n .add_layer(FlattenLayer()) \\\n .add_layer(FCLayer(32)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(FCLayer(10)) \\\n .add_layer(SoftmaxCELayer())\n for i in range(1, n_iters + 1):\n train_order = np.random.permutation(train_size)\n bar = trange(train_batches, file=sys.stdout)\n for j in bar:\n cost = model.forward(trainx[train_order[j * batch_size: (j + 1) * batch_size]],\n trainy[train_order[j * batch_size: (j + 1) * batch_size]])\n bar.set_description(f'Curr loss: {cost}')\n model.backward()\n model.adam_trainstep()\n correct = []\n for j in range(val_batches):\n res = model.run(valx[j * batch_size:(j + 1) * batch_size])\n correct.append(np.argmax(res, axis=1) == valy[j * batch_size:(j + 1) * batch_size])\n tqdm.write(f'Validation accuracy: {np.mean(correct)}')\n tqdm.write('-------------------------------------------------------')\n\n correct = []\n for i in range(test_batches):\n res = model.run(testx[i * batch_size:(i + 1) * batch_size])\n correct.append(np.argmax(res, axis=1) == testy[i * batch_size:(i + 1) * batch_size])\n tqdm.write(f'Test accuracy: {np.mean(correct)}')\n tqdm.write('-------------------------------------------------------')", "def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):", "def train(self):\n max_tuple = self.max_gain()\n # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop\n while max_tuple.gain != 0:\n max_tuple.node.split(max_tuple.attribute)\n max_tuple = self.max_gain()", "def train(self, mode=True):\n super().train(mode)\n if mode and self.freeze_2d and self.backbone is not None:\n self._freeze(self.backbone)\n return self", "def train():\n init_distributed_mode(args)\n save_dir = TRAIN_CFG['save_dir']\n if not os.path.exists(save_dir) and torch.distributed.get_rank() == 0:\n os.mkdir(save_dir)\n kwargs = {}\n # If augmenting data, disable Pytorch's own augmentataion\n # This has to be done manually as augmentation is embedded\n # refer : https://github.com/pytorch/vision/issues/2263\n base_path = DATASET_CFG['base_path']\n train_set = DATASET_CFG['train']\n valid_set = DATASET_CFG['valid']\n dset_mean_std = DATASET_CFG['mean_std']\n if dset_mean_std is not None:\n dataset_mean = [i/255. for i in dset_mean_std[0]]\n dataset_std = [i/255. for i in dset_mean_std[1]]\n else:\n dataset_mean, dataset_std = compute_mean_std(base_path, train_set)\n kwargs['image_mean'] = dataset_mean\n kwargs['image_std'] = dataset_std\n kwargs['min_size'] = DATASET_CFG['min_size']\n kwargs['max_size'] = DATASET_CFG['max_size']\n kwargs['box_detections_per_img'] = 300 # increase max det to max val in our benchmark\n\n # Set benchmark related parameters\n if benchmark == 'ScutHead':\n combined_cfg = {**cfg, **sh_anchors}\n elif benchmark == 'CrowdHuman':\n combined_cfg = {**cfg, **ch_anchors}\n elif benchmark == 'Combined':\n combined_cfg = {**cfg, **combined_anchors}\n else:\n raise ValueError(\"New dataset has to be registered\")\n\n # Create Model\n default_filter = False\n model = customRCNN(cfg=combined_cfg,\n use_deform=NET_CFG['use_deform'],\n ohem=NET_CFG['ohem'],\n context=NET_CFG['context'],\n custom_sampling=NET_CFG['custom_sampling'],\n default_filter=default_filter,\n soft_nms=NET_CFG['soft_nms'],\n upscale_rpn=NET_CFG['upscale_rpn'],\n median_anchors=NET_CFG['median_anchors'],\n **kwargs).cuda() \n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],\n find_unused_parameters=True)\n model_without_ddp = model.module\n\n # Create Optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=HYP_CFG['learning_rate'],\n momentum=HYP_CFG['learning_rate'],\n weight_decay=HYP_CFG['weight_decay'])\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=TRAIN_CFG['milestones'],\n gamma=HYP_CFG['gamma'])\n # Restore from checkpoint\n pt_model = TRAIN_CFG['pretrained_model']\n if pt_model:\n model_without_ddp = restore_network(model_without_ddp, pt_model,\n only_backbone=TRAIN_CFG['only_backbone'])\n \n # Create training and vaid dataset\n dataset_param = {'mean': dataset_mean, 'std':dataset_std,\n 'shape':(kwargs['min_size'], kwargs['max_size'])}\n batch_size = HYP_CFG['batch_size']\n train_dataset = HeadDataset(train_set,\n base_path,\n dataset_param,\n train=True)\n val_dataset = HeadDataset(valid_set,\n base_path,\n dataset_param,\n train=False)\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,\n batch_size,\n drop_last=True)\n train_data_loader = torch.utils.data.DataLoader(train_dataset,\n batch_sampler=train_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)\n val_batch_sampler = torch.utils.data.BatchSampler(val_sampler,\n batch_size,\n drop_last=True)\n val_data_loader = torch.utils.data.DataLoader(val_dataset,\n batch_sampler=val_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n # Fastforward the LR decayer\n start_epoch = TRAIN_CFG['start_epoch']\n max_epoch = TRAIN_CFG['max_epoch']\n for _ in range(0, -1):\n scheduler.step()\n\n # Start training\n print(\"======= Training for \" + str(max_epoch) + \"===========\")\n for epoch in range(start_epoch, int(max_epoch) + 1):\n if epoch % TRAIN_CFG['eval_every'] == 0:\n print(\"========= Evaluating Model ==========\")\n result_dict = evaluate(model, val_data_loader, benchmark=benchmark)\n if torch.distributed.get_rank() == 0:\n logging.info('Eval score at {0} epoch is {1}'.format(str(epoch),\n result_dict))\n \n train_one_epoch(model, optimizer, train_data_loader,\n device, epoch, print_freq=1000)\n scheduler.step()\n if torch.distributed.get_rank() == 0:\n print(\"Saving model\")\n torch.save(model.state_dict(), osp.join(save_dir,\n TRAIN_CFG['exp_name'] + '_epoch_' + str(epoch) + '.pth'))", "def runconnectome(self, ):\n for ps in self.postSynaptic:\n if ps[:3] not in self.muscles and abs(self.postSynaptic[ps][self.thisState]) > self.threshold:\n self.fireNeuron(ps)\n self.motorcontrol()\n for ps in self.postSynaptic:\n # if self.postSynaptic[ps][thisState] != 0:\n # print ps\n # print \"Before Clone: \", self.postSynaptic[ps][thisState]\n\n # fired neurons keep getting reset to previous weight\n # wtf deepcopy -- So, the concern is that the deepcopy doesnt\n # scale up to larger neural networks?? \n self.postSynaptic[ps][self.thisState] = copy.deepcopy(self.postSynaptic[ps][self.nextState]) \n\n # this deep copy is not in the functioning version currently.\n # print \"After Clone: \", self.postSynaptic[ps][thisState]\n\n self.thisState, self.nextState = self.nextState, self.thisState", "def train_model(train, val, epochs, model, opt, nmode, n, val_nmode1, val_nmode2):\n for epoch in range(epochs):\n opt.zero_grad()\n loss = get_loss(model, train)\n loss.requires_grad = True\n loss.backward()\n opt.step()\n\n if epoch%10 == 0:\n model.eval()\n lossval = get_loss(model, val)\n print(f'Epoch: {epoch} \\tTrain loss: {loss}\\tVal loss: {lossval}\\tVal acc:')\n\n return model", "def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)", "def main(configuration_path, mode):\n config = toml.load(configuration_path)\n train_conf = read_config(config)\n\n click.echo(\"\\n Train config:\")\n print(train_conf, \"\\n\")\n\n # create databunch\n data = create_databunch(\n data_path=train_conf[\"data_path\"],\n fourier=train_conf[\"fourier\"],\n batch_size=train_conf[\"batch_size\"],\n )\n\n # get image size\n train_conf[\"image_size\"] = data.train_ds[0][0][0].shape[1]\n\n # define architecture\n arch = define_arch(\n arch_name=train_conf[\"arch_name\"], img_size=train_conf[\"image_size\"]\n )\n\n if mode == \"train\":\n if train_conf[\"normalize\"] == \"mean\":\n train_conf[\"norm_factors\"] = get_normalisation_factors(data)\n # check out path and look for existing model files\n check_outpath(train_conf[\"model_path\"], train_conf)\n\n click.echo(\"Start training of the model.\\n\")\n\n # define_learner\n learn = define_learner(data, arch, train_conf)\n\n # load pretrained model\n if train_conf[\"pre_model\"] != \"none\":\n learn.create_opt()\n load_pre_model(learn, train_conf[\"pre_model\"])\n\n # Train the model, except interrupt\n # train_conf[\"comet_ml\"] = True\n try:\n if train_conf[\"comet_ml\"]:\n learn.comet.experiment.log_parameters(train_conf)\n with learn.comet.experiment.train():\n learn.fit(train_conf[\"num_epochs\"])\n else:\n learn.fit(train_conf[\"num_epochs\"])\n except KeyboardInterrupt:\n pop_interrupt(learn, train_conf)\n\n end_training(learn, train_conf)\n\n if train_conf[\"inspection\"]:\n after_training_plots(train_conf, rand=True)\n\n if mode == \"fine_tune\":\n click.echo(\"Start fine tuning of the model.\\n\")\n\n # define_learner\n learn = define_learner(\n data,\n arch,\n train_conf,\n )\n\n # load pretrained model\n if train_conf[\"pre_model\"] == \"none\":\n click.echo(\"Need a pre-trained modle for fine tuning!\")\n return\n\n learn.create_opt()\n load_pre_model(learn, train_conf[\"pre_model\"])\n\n # Train the model, except interrupt\n try:\n learn.fine_tune(train_conf[\"num_epochs\"])\n except KeyboardInterrupt:\n pop_interrupt(learn, train_conf)\n\n end_training(learn, train_conf)\n if train_conf[\"inspection\"]:\n after_training_plots(train_conf, rand=True)\n\n if mode == \"lr_find\":\n click.echo(\"Start lr_find.\\n\")\n if train_conf[\"normalize\"] == \"mean\":\n train_conf[\"norm_factors\"] = get_normalisation_factors(data)\n\n # define_learner\n learn = define_learner(data, arch, train_conf, lr_find=True)\n\n # load pretrained model\n if train_conf[\"pre_model\"] != \"none\":\n learn.create_opt()\n load_pre_model(learn, train_conf[\"pre_model\"])\n\n learn.lr_find()\n\n # save loss plot\n plot_lr_loss(\n learn,\n train_conf[\"arch_name\"],\n Path(train_conf[\"model_path\"]).parent,\n skip_last=5,\n output_format=train_conf[\"format\"],\n )\n\n if mode == \"plot_loss\":\n click.echo(\"Start plotting loss.\\n\")\n\n # define_learner\n learn = define_learner(data, arch, train_conf, plot_loss=True)\n # load pretrained model\n if Path(train_conf[\"model_path\"]).exists:\n load_pre_model(learn, train_conf[\"model_path\"], plot_loss=True)\n else:\n click.echo(\"Selected model does not exist.\")\n click.echo(\"Exiting.\\n\")\n sys.exit()\n\n plot_lr(\n learn, Path(train_conf[\"model_path\"]), output_format=train_conf[\"format\"]\n )\n plot_loss(\n learn, Path(train_conf[\"model_path\"]), output_format=train_conf[\"format\"]\n )", "def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n model = MLP(n_hidden=dnn_hidden_units,n_classes=10,batch_size=FLAGS.batch_size, input_dim=32*32*3, \n weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale)\n\n Datasets = utils.get_cifar10(data_dir = DATA_DIR_DEFAULT, one_hot = True, validation_size = 0)\n \n for i in range(1500): #(FLAGS.max_steps):\n train_batch = Datasets.train.next_batch(batch_size = FLAGS.batch_size)\n #Get the model output\n logits = model.inference(x=train_batch[0].reshape([FLAGS.batch_size,32*32*3]))\n #Get the loss and let the model set the loss derivative.\n loss = model.loss(logits=logits, labels=train_batch[1])\n #Perform training step\n model.train_step(loss=loss, flags=FLAGS)\n\n #Every 100th iteratin print accuracy on the whole test set.\n if i % 100 == 0:\n # for layer in model.layers:\n test_batch = Datasets.test.next_batch(batch_size = 200) #Datasets.test.num_examples\n logits = model.inference(x=test_batch[0].reshape([200,32*32*3]))\n print('-- Step: ', i, \" accuracy: \",model.accuracy(logits=logits,labels=test_batch[1]),'loss', loss )\n\n ########################\n # END OF YOUR CODE #\n #######################", "def retrain_sub_model(self):\r\n \r\n self.sub_model = self.load_weights_to_sub_model()\r\n X = np.array(self.conv4_characters_list)\r\n X = np.reshape(X, (X.shape[0]*X.shape[1], X.shape[2]))\r\n y = np.repeat(np.arange(1283), 9)\r\n \r\n opt = optimizers.Adam(lr=0.001)\r\n self.sub_model.compile(optimizer=opt,loss='sparse_categorical_crossentropy',metrics=['accuracy'])\r\n print(\"***Start to creat new decision model***\")\r\n self.sub_model.fit(X, y, epochs=20)\r\n print(\"***Finish***\")", "def start_training(self):\n self.training = True", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "async def train(self):", "def dir_cnn():\n\n data_dir = \"/home/liyanzeng/git/Var-CNN--DynaFlow/preprocess\"\n\n # read in data from numpy files\n train_metadata = np.load(r\"%s/train_metadata.npy\" % data_dir)\n test_metadata = np.load(r\"%s/test_metadata.npy\" % data_dir)\n train_seq = np.load(r\"%s/train_seq.npy\" % data_dir)\n train_labels = np.load(r\"%s/train_labels.npy\" % data_dir)\n test_seq = np.load(r\"%s/test_seq.npy\" % data_dir)\n test_labels = np.load(r\"%s/test_labels.npy\" % data_dir)\n\n # apply normalization to metadata\n metadata_scaler = StandardScaler()\n train_metadata = metadata_scaler.fit_transform(train_metadata)\n test_metadata = metadata_scaler.transform(test_metadata)\n\n # extract sequences\n train_time, train_time_dleft, train_time_dright, train_dir = np.split(train_seq, 4, axis=2)\n test_time, test_time_dleft, test_time_dright, test_dir = np.split(test_seq, 4, axis=2)\n\n train_seq = train_dir\n test_seq = test_dir\n\n # construct CNN\n dilation_rate = 1\n cnn_input = Input(shape=(seq_length, 1,), name='cnn_input')\n cnn_model, dilation_rate = dir_conv_block(cnn_input, 2, 4, dilation_rate)\n cnn_model, dilation_rate = dir_conv_block(cnn_model, 2, 8, dilation_rate)\n cnn_model, dilation_rate = dir_conv_block(cnn_model, 2, 8, dilation_rate)\n cnn_model, dilation_rate = dir_conv_block(cnn_model, 3, 16, dilation_rate)\n cnn_model, dilation_rate = dir_conv_block(cnn_model, 3, 16, dilation_rate)\n cnn_output = Flatten()(cnn_model)\n cnn_output = dense_layer(cnn_output, 1024, 0.4)\n\n # construct MLP for metadata\n metadata_input = Input(shape=(7,), name='metadata_input')\n metadata_output = dense_layer(metadata_input, 32, 0.) # consider this the embedding of all the metadata\n\n # concatenate before second dense layer\n combined = Concatenate()([cnn_output, metadata_output])\n combined = dense_layer(combined, 1024, 0.5)\n\n # add final softmax layer\n if NUM_UNMON_SITES == 0: # closed-world\n combined_output = Dense(units=NUM_MON_SITES, activation='softmax', name='combined_output')(combined)\n else:\n # add extra class for unmonitored sites\n combined_output = Dense(units=NUM_MON_SITES + 1, activation='softmax', name='combined_output')(combined)\n\n model = Model(inputs=[cnn_input, metadata_input], outputs=[combined_output])\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(0.001),\n metrics=['accuracy'])\n\n training_data = ({'cnn_input': train_seq,\n 'metadata_input': train_metadata},\n {'combined_output': train_labels})\n\n test_data = ({'cnn_input': test_seq,\n 'metadata_input': test_metadata},\n {'combined_output': test_labels})\n\n lr_modifier = LearningRateScheduler(schedule=lr_scheduler)\n\n # train model\n train_time_start = time.time()\n model.fit(x=training_data[0],\n y=training_data[1],\n batch_size=50,\n epochs=200,\n verbose=0,\n callbacks=[lr_modifier])\n train_time_end = time.time()\n\n # compute final softmax predictions on test set and save predictions\n test_time_start = time.time()\n predictions = model.predict(test_data[0], batch_size=50, verbose=0)\n test_time_end = time.time()\n \n save_dir = \"predictions\"\n np.save(file=r\"%s/dir_model\" % save_dir, arr=predictions)\n \n return (train_time_end - train_time_start), (test_time_end - test_time_start)", "def worker_train():\n py_work = np.zeros(model.layer1_size, dtype=np.float32)\n\n while True:\n job = jobs.get(block=True)\n if job is None: # data finished, exit\n jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n\n lr = max(self.min_lr, self.lr * (1 - 1.0 * node_count[0]/total_node))\n #lr = self.lr \n job_words = 0\n #out_i = 0\n for edge in job:\n if edge is not None:\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n neg_l = []\n #负样本node选取和主node不连通的点\n min_node0, min_conn_tup = sorted(model.connected_path[model.vocab_t[edge[0].index]].items(), key=lambda x:x[1][0])[0]\n min_conn0 = min_conn_tup[0]\n min_node1, min_conn_tup = sorted(model.connected_path[model.vocab_t[edge[1].index]].items(), key=lambda x:x[1][0])[0]\n min_conn1 = min_conn_tup[0]\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size - 1)]\n if (model.vocab_t[nodeidx] not in model.connected_path[model.vocab_t[edge[0].index]]\n or (model.connected_path[model.vocab_t[edge[0].index]][model.vocab_t[nodeidx]][0] <= max(0.1,min_conn0))) \\\n and (model.vocab_t[nodeidx] not in model.connected_path[model.vocab_t[edge[1].index]]\n or (model.connected_path[model.vocab_t[edge[1].index]][model.vocab_t[nodeidx]][1] <= max(0.1,min_conn1))):\n neg_l.append(nodeidx)\n if len(neg_l) == 0:\n neg_l.append(model.vocab[min_node0].index)\n neg_l.append(model.vocab[min_node1].index)\n neg_np = np.asarray(neg_l)\n if weight >= 0.0:\n #job_words += sum(train_o1(model.node_embedding, edge, weight, lr, self.negative, model.table,\n job_words += sum(train_o1(model.node_embedding, edge, lr, self.negative, neg_np,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n #for i in range(int(10 * weight)))\n #job_words = sum(train_o1(model.node_embedding, edge, lr, self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) for edge in job if edge is not None)\n jobs.task_done()\n lock.acquire(timeout=30)\n try:\n node_count[0] += job_words\n\n elapsed = time.time() - start\n if elapsed >= next_report[0]:\n log.info(\"PROGRESS: at %.2f%% \\tnode_computed %d\\talpha %.05f\\t %.0f nodes/s\" %\n (100.0 * node_count[0] / total_node, node_count[0], lr, node_count[0] / elapsed if elapsed else 0.0))\n next_report[0] = elapsed + 5.0 # don't flood the log, wait at least a second between progress reports\n finally:\n lock.release()", "def run_net(self,\n pre_trained_chckpnt_dir ='' #for resuming training, load the model from this directory\n ):\n\n _rd = _read_data(data=self.data)\n\n self.alpha_coeff=1\n\n #read path of the images for train, test, and validation\n train_CTs, train_GTVs, train_Torso, train_penalize, train_surface,\\\n validation_CTs, validation_GTVs, validation_Torso, validation_penalize, validation_surface,\\\n test_CTs, test_GTVs, test_Torso, test_penalize,test_surface=_rd.read_data_path(fold=self.fold)\n self.img_width = self.img_width\n self.img_height = self.img_height\n # ======================================\n #validation instances\n bunch_of_images_no=20\n _image_class_vl = image_class(validation_CTs, validation_GTVs, validation_Torso,validation_penalize,validation_surface\n , bunch_of_images_no=bunch_of_images_no, is_training=0,\n patch_window=self.patch_window)\n _patch_extractor_thread_vl = _patch_extractor_thread(_image_class=_image_class_vl,\n sample_no=self.sample_no, patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n tumor_percent=self.tumor_percent,\n img_no=bunch_of_images_no,\n mutex=settings.mutex,is_training=0,vl_sample_no=self.validation_samples\n )\n _fill_thread_vl = fill_thread(validation_CTs,\n validation_GTVs,\n validation_Torso,\n validation_penalize,\n validation_surface,\n _image_class_vl,\n sample_no=self.sample_no,\n total_sample_no=self.validation_samples,\n patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n img_width=self.img_width, img_height=self.img_height,\n mutex=settings.mutex,\n tumor_percent=self.tumor_percent,\n is_training=0,\n patch_extractor=_patch_extractor_thread_vl,\n fold=self.fold)\n\n\n _fill_thread_vl.start()\n _patch_extractor_thread_vl.start()\n _read_thread_vl = read_thread(_fill_thread_vl, mutex=settings.mutex,\n validation_sample_no=self.validation_samples, is_training=0)\n _read_thread_vl.start()\n # ======================================\n #training instances\n bunch_of_images_no = 24\n _image_class = image_class(train_CTs, train_GTVs, train_Torso,train_penalize,train_surface\n , bunch_of_images_no=bunch_of_images_no,is_training=1,patch_window=self.patch_window\n )\n patch_extractor_thread = _patch_extractor_thread(_image_class=_image_class,\n sample_no=240, patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n tumor_percent=self.tumor_percent,\n img_no=bunch_of_images_no,\n mutex=settings.mutex,is_training=1)\n _fill_thread = fill_thread(train_CTs, train_GTVs, train_Torso,train_penalize,train_surface,\n _image_class,\n sample_no=self.sample_no,total_sample_no=self.sample_no,\n patch_window=self.patch_window,\n GTV_patchs_size=self.GTV_patchs_size,\n img_width=self.img_width,\n img_height=self.img_height,mutex=settings.mutex,\n tumor_percent=self.tumor_percent,\n is_training=1,\n patch_extractor=patch_extractor_thread,\n fold=self.fold)\n\n _fill_thread.start()\n patch_extractor_thread.start()\n\n _read_thread = read_thread(_fill_thread,mutex=settings.mutex,is_training=1)\n _read_thread.start()\n # ======================================\n\n image = tf.placeholder(tf.float32, shape=[None, None, None, None, 1])\n label = tf.placeholder(tf.float32, shape=[None, None, None, None, 2])\n penalize = tf.placeholder(tf.float32, shape=[None, None, None, None,1])\n surf_map = tf.placeholder(tf.float32, shape=[None, None, None, None,1])\n loss_coef = tf.placeholder(tf.float32, shape=[None, 2]) # shape: batchno * 2 values for each class\n alpha = tf.placeholder(tf.float32, name='alpha') # background coeff\n beta = tf.placeholder(tf.float32, name='beta') # tumor coeff\n\n ave_vali_acc=tf.placeholder(tf.float32)\n ave_loss_vali=tf.placeholder(tf.float32)\n ave_dsc_vali=tf.placeholder(tf.float32)\n\n dropout=tf.placeholder(tf.float32,name='dropout')\n is_training = tf.placeholder(tf.bool, name='is_training')\n is_training_bn = tf.placeholder(tf.bool, name='is_training_bn')\n dense_net_dim = tf.placeholder(tf.int32, name='dense_net_dim')\n\n _dn = _densenet_unet(self.densnet_unet_config,self.compression_coefficient,self.growth_rate) #create object\n y=_dn.dens_net(image=image,is_training=is_training,dropout_rate1=0,dropout_rate2=0,dim=dense_net_dim,is_training_bn=is_training_bn)\n # y = _dn.vgg(image)\n\n y_dirX = ((y[:, int(self.GTV_patchs_size / 2), :, :, 0, np.newaxis]))\n label_dirX = (label[:, int(self.GTV_patchs_size / 2), :, :, 0, np.newaxis])\n penalize_dirX = (penalize[:,16,:,:,0,np.newaxis])\n surf_map_dirX = (surf_map[:,16,:,:,0,np.newaxis])\n image_dirX = ((image[:, int(self.patch_window / 2), :, :, 0, np.newaxis]))\n\n show_img=tf.nn.softmax(y)[:, int(self.GTV_patchs_size / 2) , :, :, 0, np.newaxis]\n tf.summary.image('outprunut',show_img , 3)\n tf.summary.image('output without softmax',y_dirX ,3)\n tf.summary.image('groundtruth', label_dirX,3)\n tf.summary.image('penalize', penalize_dirX,3)\n tf.summary.image('surf_map', surf_map_dirX,3)\n tf.summary.image('image',image_dirX ,3)\n\n print('*****************************************')\n print('*****************************************')\n print('*****************************************')\n sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n devices = sess.list_devices()\n print(devices)\n\n print(device_lib.list_local_devices())\n print('*****************************************')\n print('*****************************************')\n print('*****************************************')\n\n train_writer = tf.summary.FileWriter(self.LOGDIR + '/train' ,graph=tf.get_default_graph())\n validation_writer = tf.summary.FileWriter(self.LOGDIR + '/validation' , graph=sess.graph)\n\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n saver=tf.train.Saver(tf.global_variables(), max_to_keep=1000)\n\n\n\n #define the loss function\n with tf.name_scope('cost'):\n penalize_weight=0\n [ penalized_loss,\n soft_dice_coef,logt,lbl]=self.loss_instance.dice_plus_distance_penalize(logits=y, labels=label,penalize=penalize)\n surface_loss= self.loss_instance.surface_loss(logits=y, labels=label, surf_map=surf_map)\n cost = tf.reduce_mean((1.0 - soft_dice_coef[1])+penalize_weight*penalized_loss+surface_loss, name=\"cost\")\n\n #Setup the Tensorboard plots\n tf.summary.scalar(\"cost\", cost)\n f1_measure = self.loss_instance.f1_measure(logits=y, labels=label)\n tf.summary.scalar(\"dice_bakground\", f1_measure[0])\n tf.summary.scalar(\"dice_tumor\", f1_measure[1])\n\n pwc = self.loss_instance.PWC(y, label)\n tf.summary.scalar(\"pwc_bakground\", pwc[0])\n tf.summary.scalar(\"pwc_tumor\", pwc[1])\n\n recall = self.loss_instance.Recall(y, label)\n tf.summary.scalar(\"recall_bakground\", recall[0])\n tf.summary.scalar(\"recall_tumor\", recall[1])\n\n precision = self.loss_instance.Precision(y, label)\n tf.summary.scalar(\"precision_bakground\", precision[0])\n tf.summary.scalar(\"precision_tumor\", precision[1])\n\n fpr = self.loss_instance.FPR(y, label)\n tf.summary.scalar(\"FPR_bakground\", fpr[0])\n tf.summary.scalar(\"FPR_tumor\", fpr[1])\n\n fnr = self.loss_instance.FNR(y, label)\n tf.summary.scalar(\"FNR_bakground\", fnr[0])\n tf.summary.scalar(\"FNR_tumor\", fnr[1])\n\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n optimizer_tmp = tf.train.AdamOptimizer(self.learning_rate,epsilon=0.001)\n optimizer = optimizer_tmp.minimize(cost)\n\n with tf.name_scope('validation'):\n average_validation_accuracy=ave_vali_acc\n average_validation_loss=ave_loss_vali\n average_dsc_loss=ave_dsc_vali\n tf.summary.scalar(\"average_validation_accuracy\",average_validation_accuracy)\n tf.summary.scalar(\"average_validation_loss\",average_validation_loss)\n tf.summary.scalar(\"average_dsc_loss\",average_dsc_loss)\n\n with tf.name_scope('accuracy'):\n accuracy=self.loss_instance.accuracy_fn(y, label)\n\n tf.summary.scalar(\"accuracy\", accuracy)\n\n sess.run(tf.global_variables_initializer())\n logging.debug('total number of variables %s' % (\n np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))\n summ=tf.summary.merge_all()\n\n point = 0 # starting point, starts from a value > 0 if training is resumed\n itr1 = 0 # number of iterations\n if len(pre_trained_chckpnt_dir):\n ckpt = tf.train.get_checkpoint_state(pre_trained_chckpnt_dir)\n saver.restore(sess, ckpt.model_checkpoint_path)\n point=int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])\n itr1=point\n\n\n # patch_radius = 49\n '''loop for epochs'''\n\n for epoch in range(self.total_epochs):\n while self.no_sample_per_each_itr*int(point/self.no_sample_per_each_itr)<self.sample_no:\n print('0')\n print(\"epoch #: %d\" %(epoch))\n startTime = time.time()\n step = 0\n self.beta_coeff=1+1 * np.exp(-point/2000)\n # =============start validation================\n if itr1 % self.display_validation_step ==0:\n '''Validation: '''\n loss_validation = 0\n acc_validation = 0\n validation_step = 0\n dsc_validation=0\n while (validation_step * self.batch_no_validation <settings.validation_totalimg_patch):\n [validation_CT_image, validation_GTV_image,validation_Penalize_patch,validation_Surface_patch] = _image_class_vl.return_patches_validation( validation_step * self.batch_no_validation, (validation_step + 1) *self.batch_no_validation)\n if (len(validation_CT_image)<self.batch_no_validation) | (len(validation_GTV_image)<self.batch_no_validation) | (len(validation_Penalize_patch)<self.batch_no_validation) | (len(validation_Surface_patch)<self.batch_no_validation) :\n _read_thread_vl.resume()\n time.sleep(0.5)\n continue\n\n validation_CT_image_patchs = validation_CT_image\n validation_GTV_label = validation_GTV_image\n tic=time.time()\n\n [acc_vali, loss_vali,dsc_vali,surface_loss1] = sess.run([accuracy, cost,f1_measure,surface_loss],\n feed_dict={image: validation_CT_image_patchs,\n label: validation_GTV_label,\n penalize: validation_Penalize_patch,\n dropout: 1,\n is_training: False,\n ave_vali_acc: -1,\n ave_loss_vali: -1,\n ave_dsc_vali:-1,\n dense_net_dim: self.patch_window,\n is_training_bn:False,\n alpha:1,\n beta:1,\n surf_map:validation_Surface_patch,\n })\n elapsed=time.time()-tic\n\n acc_validation += acc_vali\n loss_validation += loss_vali\n dsc_validation+=dsc_vali[1]\n validation_step += 1\n if np.isnan(dsc_validation) or np.isnan(loss_validation) or np.isnan(acc_validation):\n print('nan problem')\n process = psutil.Process(os.getpid())\n\n print(\n '%d - > %d: elapsed_time:%d acc_validation: %f, loss_validation: %f, memory_percent: %4s' % (\n validation_step,validation_step * self.batch_no_validation\n , elapsed, acc_vali, loss_vali, str(process.memory_percent()),\n ))\n\n settings.queue_isready_vl = False\n acc_validation = acc_validation / (validation_step)\n loss_validation = loss_validation / (validation_step)\n dsc_validation = dsc_validation / (validation_step)\n if np.isnan(dsc_validation) or np.isnan(loss_validation) or np.isnan(acc_validation):\n print('nan problem')\n _fill_thread_vl.kill_thread()\n print('******Validation, step: %d , accuracy: %.4f, loss: %f*******' % (\n itr1, acc_validation, loss_validation))\n\n [sum_validation] = sess.run([summ],\n feed_dict={image: validation_CT_image_patchs,\n label: validation_GTV_label,\n penalize: validation_Penalize_patch,\n dropout: 1,\n is_training: False,\n ave_vali_acc: acc_validation,\n ave_loss_vali: loss_validation,\n ave_dsc_vali:dsc_validation,\n dense_net_dim: self.patch_window,\n is_training_bn: False,\n alpha: 1,\n beta: 1,\n surf_map: validation_Surface_patch,\n\n })\n validation_writer.add_summary(sum_validation, point)\n print('end of validation---------%d' % (point))\n\n #loop for training batches\n while(step*self.batch_no<self.no_sample_per_each_itr):\n [train_CT_image_patchs, train_GTV_label, train_Penalize_patch,loss_coef_weights,train_Surface_patch] = _image_class.return_patches( self.batch_no)\n\n if (len(train_CT_image_patchs)<self.batch_no)|(len(train_GTV_label)<self.batch_no)\\\n |(len(train_Penalize_patch)<self.batch_no)|(len(train_Surface_patch)<self.batch_no):\n time.sleep(0.5)\n _read_thread.resume()\n continue\n\n tic=time.time()\n [acc_train1, loss_train1, optimizing,out,dsc_train11] = sess.run([accuracy, cost, optimizer,y,f1_measure],\n feed_dict={image: train_CT_image_patchs,\n label: train_GTV_label,\n penalize: train_Penalize_patch,\n # loss_coef: loss_coef_weights,\n dropout: self.dropout_keep,\n is_training: True,\n ave_vali_acc: -1,\n ave_loss_vali: -1,\n ave_dsc_vali: -1,\n dense_net_dim: self.patch_window,\n is_training_bn: True,\n alpha: self.alpha_coeff,\n beta: self.beta_coeff,\n surf_map: train_Surface_patch,\n\n })\n elapsed=time.time()-tic\n dsc_train1=dsc_train11[1]\n\n self.x_hist=self.x_hist+1\n # np.hstack((self.x_hist, [np.ceil(\n\n [sum_train] = sess.run([summ],\n feed_dict={image: train_CT_image_patchs,\n label: train_GTV_label,\n penalize: train_Penalize_patch,\n dropout: self.dropout_keep, is_training: True,\n ave_vali_acc: acc_train1,\n ave_loss_vali: loss_train1,\n ave_dsc_vali: dsc_train1,\n dense_net_dim: self.patch_window,\n is_training_bn: True,\n alpha: self.alpha_coeff,\n beta: self.beta_coeff,\n surf_map: train_Surface_patch,\n\n })\n train_writer.add_summary(sum_train,point)\n step = step + 1\n\n process = psutil.Process(os.getpid())\n\n print(\n 'point: %d, elapsed_time:%d step*self.batch_no:%f , LR: %.15f, acc_train1:%f, loss_train1:%f,memory_percent: %4s' % (\n int((point)),elapsed,\n step * self.batch_no, self.learning_rate, acc_train1, loss_train1,\n str(process.memory_percent())))\n\n\n point=int((point))\n if point%100==0:\n '''saveing model inter epoch'''\n chckpnt_path = os.path.join(self.chckpnt_dir,\n ('densenet_unet_inter_epoch%d_point%d.ckpt' % (epoch, point)))\n saver.save(sess, chckpnt_path, global_step=point)\n itr1 = itr1 + 1\n point=point+1\n endTime = time.time()\n\n #==============\n '''saveing model after each epoch'''\n chckpnt_path = os.path.join(self.chckpnt_dir, 'densenet_unet.ckpt')\n saver.save(sess, chckpnt_path, global_step=epoch)\n print(\"End of epoch----> %d, elapsed time: %d\" % (epoch, endTime - startTime))", "def train():\n args = arguments()\n\n # Create output directories\n create_output_dir(args.outputDir)\n\n # Start Log File\n log_path = os.path.join(args.outputDir, LOG_DIR, time.strftime('%Y-%m-%d_%H-%M-%S.log'))\n log_file = Logger(log_path)\n\n # Log arguments\n arg_str = ''\n for arg in vars(args):\n arg_str += \"\\n\" + \"{:30} {}\".format(str(arg), getattr(args, arg))\n log_file.log_line(\"Arguments\", arg_str)\n log_file.newline()\n\n # Load Params\n configuration = config_cvppp.TrainConfig()\n\n # Log params\n log_file.log_line(\"Config Parameters\\n\", configuration.to_string())\n log_file.newline()\n\n ## Load dataset API (Already logged in the args log step)\n train_dataset, crossVal_dataset = load_datasets(args)\n\n # Init the model\n checkpoint_path = os.path.join(args.outputDir, CHECKPOINT_DIR)\n training_model = model.MaskRCNN('training', configuration, checkpoint_path)\n\n # Load weights\n if args.init == 'last':\n weights_path = training_model.find_last()\n log_file.log_line(\"Initialised with \", weights_path)\n training_model.load_weights(weights_path, by_name=True)\n\n elif args.init == 'rand':\n log_file.log_line(\"Initialised with \", \"random weights\")\n pass\n\n else:\n if not os.path.exists(args.init):\n raise OSError('No weights at: ' + args.init)\n \n log_file.log_line(\"Initialised with \", args.init)\n training_model.load_weights(args.init, by_name=True)\n\n # Train the model\n augmentation = get_augmentation_sequence()\n\n custom_callbacks = None\n\n training_model.train(train_dataset, crossVal_dataset, \n learning_rate=configuration.LEARNING_RATE, \n epochs=args.numEpochs,\n augmentation=augmentation,\n layers='all',\n custom_callbacks=custom_callbacks) # Train all layers\n\n # Close the log file\n log_file.close()", "def train(self, mode=True, freeze_bn=False):\n super(NetFeat, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def train():\n\n\t# Tell TensorFlow that the model will be built into the default Graph.\n\twith tf.Graph().as_default(), tf.device('/cpu:0'):\n\n\n\t\tglobal_step = tf.Variable(0, trainable=False)\n\n\t\t#boundaries = [300000, 400000, 500000]\n\t\t#values = [0.0001, 0.00005, 0.000025, 0.0000125]#S\n\t\t#boundaries = [5000*2, 10000*2, 400000*2, 600000*2, 800000*2, 1000000*2]\n\t\t#values = [0.000001,0.00001, 0.0001, 0.00005, 0.000025, 0.0000125, 0.00000625]#C\n\t\tboundaries = [400000, 600000, 800000, 1000000]\n\t\tvalues = [0.0001, 0.00005, 0.000025, 0.0000125, 0.00000625]#Sl\n\n\t\tlearning_rate = tf.train.piecewise_constant(global_step, boundaries, values)\n\n\t\t# Create an optimizer that performs gradient descent.\n\t\topt = tf.train.AdamOptimizer(learning_rate)\n\n\t\t# Calculate the gradients for each model tower.\n\t\ttower_grads = []\n\t\twith tf.variable_scope(tf.get_variable_scope()):\n\t\t\tfor i in xrange(FLAGS.num_gpus):\n\t\t\t\twith tf.device('/gpu:%d' % i):\n\t\t\t\t\twith tf.name_scope('%s_%d' % (flowNet.TOWER_NAME, i)) as scope:\n\t\t\t\t\t\t# Calculate the loss for one tower of the model. This function\n\t\t\t\t\t\t# constructs the entire CIFAR model but shares the variables across\n\t\t\t\t\t\t# all towers.\n\t\t\t\t\t\tloss = tower_loss(scope)\n\n\t\t\t\t\t\t# Reuse variables for the next tower.\n\t\t\t\t\t\ttf.get_variable_scope().reuse_variables()\n\n\t\t\t\t\t\t# Retain the summaries from the final tower.\n\t\t\t\t\t\tsummaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)\n\n\t\t\t\t\t\t# Calculate the gradients for the batch of data on this tower.\n\t\t\t\t\t\tgrads = opt.compute_gradients(loss,var_list=tf.trainable_variables())\n\t\t\t\t\t\t# Keep track of the gradients across all towers.\n\t\t\t\t\t\ttower_grads.append(grads)\n\n\t\t# We must calculate the mean of each gradient. Note that this is the\n\t\t# synchronization point across all towers.\n\t\tgrads = average_gradients(tower_grads)\n\n\t\t# Add a summary to track the learning rate.\n\t\tsummaries.append(tf.summary.scalar('learning_rate', learning_rate))\n\n\t\t# Add histograms for gradients.\n\t\tfor grad, var in grads:\n\t\t\tif grad is not None:\n\t\t\t\tsummaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))\n\n\t\t# Apply the gradients to adjust the shared variables.\n\t\ttrain_op = opt.apply_gradients(grads, global_step=global_step)\n\t\t\n\t\t#Add histograms for trainable variables.\n\t\tfor var in tf.trainable_variables():\n\t\t\tsummaries.append(tf.summary.histogram(var.op.name, var))\n\t\t\n\t\t# Build the summary Tensor based on the TF collection of Summaries.\n\t\tsummary_op = tf.summary.merge(summaries)\n\n\t\t# Add the variable initializer Op.\n\t\tinit = tf.global_variables_initializer()\n\t\t\n\t\t# Create a saver for writing training checkpoints.\n\t\tsaver = tf.train.Saver(tf.global_variables())\n\n\t\t# Create a session for running Ops on the Graph.\n\t\tsess = tf.Session()\n\n\t\t# Instantiate a SummaryWriter to output summaries and the Graph.\n\t\tsummary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)\n\n\t\t# And then after everything is built:\n\t\tconfig = tf.ConfigProto(allow_soft_placement=True,log_device_placement=FLAGS.log_device_placement)\n\t\tconfig.gpu_options.allow_growth = True\n\t\tconfig.gpu_options.per_process_gpu_memory_fraction = 0.9\n\t\tsess = tf.Session(config=config)\n\t\t# Run the Op to initialize the variables.\n\t\tsess.run(init)\n\t\tckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n\t\tif ckpt and ckpt.model_checkpoint_path:\n\t\t\tsaver_restore = tf.train.Saver(tf.get_collection('fix'))\n\t\t\tsaver_restore.restore(sess, ckpt.model_checkpoint_path)\n\t\telse:\n\t\t\tprint('No checkpoint file found')\n\t\t\t\n\t\t# Start the queue runners.\n\t\ttf.train.start_queue_runners(sess=sess)\n\t\t\n\t\t# Start the training loop.\n\t\tfor step in xrange(FLAGS.max_steps):\n\t\t\tstart_time = time.time()\n\t\t\t_, loss_value = sess.run([train_op, loss])\n\t\t\tduration = time.time() - start_time\n\n\t\t\tassert not np.isnan(loss_value) , 'Model diverged with loss = NaN'\n\n\t\t\t#Print an overview fairly often.\n\t\t\tif step % 10 == 0:\n\t\t\t\tnum_examples_per_step = FLAGS.batch_size\n\t\t\t\texamples_per_sec = num_examples_per_step / duration\n\t\t\t\tsec_per_batch = duration\n\n\t\t\t\tformat_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ''sec/batch)')\n\t\t\t\tprint (format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch))\n\n\t\t\t# Write the summaries. \n\t\t\tif step % 1000 == 0:\n\t\t\t\t# Print status to stdout.\n\t\t\t\tprint('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))\n\t\t\t\t# Update the events file.\n\t\t\t\tsummary_str = sess.run(summary_op)\n\t\t\t\tsummary_writer.add_summary(summary_str, step)\n\t\t\t\tsummary_writer.flush()\n\n\t\t\t# Save a checkpoint.\n\t\t\tif (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:\n\t\t\t\tcheckpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')\n\t\t\t\tsaver.save(sess, checkpoint_file, global_step=step)", "def __init__(self,\n exp_name,\n ds_train,\n ds_val,\n epochs=210,\n batch_size=16,\n num_workers=4,\n loss='JointsMSELoss',\n lr=0.001,\n lr_decay=True,\n lr_decay_steps=(170, 200),\n lr_decay_gamma=0.1,\n optimizer='Adam',\n weight_decay=0.,\n momentum=0.9,\n nesterov=False,\n pretrained_weight_path=None,\n checkpoint_path=None,\n log_path='./logs',\n use_tensorboard=True,\n model_c=48,\n model_nof_joints=18,\n model_bn_momentum=0.1,\n flip_test_images=True,\n device=None\n ):\n super(Train, self).__init__()\n\n self.exp_name = exp_name\n self.ds_train = ds_train\n self.ds_val = ds_val\n self.epochs = epochs\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.loss = loss\n self.lr = lr\n self.lr_decay = lr_decay\n self.lr_decay_steps = lr_decay_steps\n self.lr_decay_gamma = lr_decay_gamma\n self.optimizer = optimizer\n self.weight_decay = weight_decay\n self.momentum = momentum\n self.nesterov = nesterov\n self.pretrained_weight_path = pretrained_weight_path\n self.checkpoint_path = checkpoint_path\n self.log_path = os.path.join(log_path, self.exp_name)\n self.use_tensorboard = use_tensorboard\n self.model_c = model_c\n self.model_nof_joints = model_nof_joints\n self.model_bn_momentum = model_bn_momentum\n self.flip_test_images = flip_test_images\n self.epoch = 0\n\n\n os.makedirs(self.log_path, 0o755, exist_ok=True) # exist_ok=False to avoid overwriting\n if self.use_tensorboard:\n self.summary_writer = tb.SummaryWriter(self.log_path)\n\n #\n # write all experiment parameters in parameters.txt and in tensorboard text field\n self.parameters = [x + ': ' + str(y) + '\\n' for x, y in locals().items()]\n\n with open(os.path.join(self.log_path, 'parameters.txt'), 'w') as fd:\n fd.writelines(self.parameters)\n if self.use_tensorboard:\n self.summary_writer.add_text('parameters', '\\n'.join(self.parameters))\n\n #\n # load model\n self.model = HRNet(c=self.model_c, nof_joints=self.model_nof_joints,\n bn_momentum=self.model_bn_momentum).cuda()\n\n\n #\n # define loss and optimizers\n if self.loss == 'JointsMSELoss':\n self.loss_fn = JointsMSELoss()\n elif self.loss == 'JointsOHKMMSELoss':\n self.loss_fn = JointsOHKMMSELoss()\n else:\n raise NotImplementedError\n\n if optimizer == 'SGD':\n self.optim = SGD(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay,\n momentum=self.momentum, nesterov=self.nesterov)\n elif optimizer == 'Adam':\n self.optim = Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)\n else:\n raise NotImplementedError\n\n\n # load pre-trained weights (such as those pre-trained on imagenet)\n if self.pretrained_weight_path is not None:\n if self.model_nof_joints == 18:\n pretrained_dict = torch.load(self.pretrained_weight_path)\n pretrained_dict_items = list(pretrained_dict.items())\n pretrained_model = {}\n j = 0\n for k, v in self.model.state_dict().items():\n v = pretrained_dict_items[j][1]\n k = pretrained_dict_items[j][0]\n\n if k == 'final_layer.weight':\n x = torch.rand(1,48,1,1).cuda()\n v = torch.cat([v, x], dim=0)\n if k == 'final_layer.bias':\n x = torch.rand(1).cuda()\n v = torch.cat([v,x],dim=0)\n pretrained_model[k] = v\n j +=1\n model_dict=self.model.state_dict()\n model_dict.update(pretrained_model)\n self.model.load_state_dict(model_dict,strict=True)\n else:\n self.model.load_state_dict(torch.load(self.pretrained_weight_path, strict=True))\n print('Pre-trained weights loaded.')\n\n self.model = nn.DataParallel(self.model.cuda())\n # self.model = nn.DataParallel(self.model.to(self.device))\n #\n # load previous checkpoint\n if self.checkpoint_path is not None:\n print('Loading checkpoint %s...' % self.checkpoint_path)\n if os.path.isdir(self.checkpoint_path):\n path = os.path.join(self.checkpoint_path, 'checkpoint_last.pth')\n else:\n path = self.checkpoint_path\n self.starting_epoch, self.model, self.optim, self.params = load_checkpoint(path, self.model, self.optim,\n self.device)\n else:\n self.starting_epoch = 0\n\n if lr_decay:\n self.lr_scheduler = MultiStepLR(self.optim, list(self.lr_decay_steps), gamma=self.lr_decay_gamma,\n last_epoch=self.starting_epoch if self.starting_epoch else -1)\n\n #\n # load train and val datasets\n self.dl_train = DataLoader(self.ds_train, batch_size=self.batch_size, shuffle=True,\n num_workers=self.num_workers, drop_last=True)\n self.len_dl_train = len(self.dl_train)\n\n # dl_val = DataLoader(self.ds_val, batch_size=1, shuffle=False, num_workers=num_workers)\n self.dl_val = DataLoader(self.ds_val, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers)\n self.len_dl_val = len(self.dl_val)\n\n #\n # initialize variables\n self.mean_loss_train = 0.\n self.mean_acc_train = 0.\n self.mean_loss_val = 0.\n self.mean_acc_val = 0.\n self.mean_mAP_val = 0.\n\n self.best_loss = None\n self.best_acc = None\n self.best_mAP = None", "def __init__(self,\n exp_name,\n ds_train,\n ds_val,\n epochs=210,\n batch_size=16,\n num_workers=4,\n loss='JointsMSELoss',\n lr=0.001,\n lr_decay=True,\n lr_decay_steps=(170, 200),\n lr_decay_gamma=0.1,\n optimizer='Adam',\n weight_decay=0.,\n momentum=0.9,\n nesterov=False,\n pretrained_weight_path=None,\n checkpoint_path=None,\n log_path='./logs',\n use_tensorboard=True,\n model_c=48,\n model_nof_joints=18,\n model_bn_momentum=0.1,\n flip_test_images=True,\n device=None\n ):\n super(GOLFTrain, self).__init__(\n exp_name=exp_name,\n ds_train=ds_train,\n ds_val=ds_val,\n epochs=epochs,\n batch_size=batch_size,\n num_workers=num_workers,\n loss=loss,\n lr=lr,\n lr_decay=lr_decay,\n lr_decay_steps=lr_decay_steps,\n lr_decay_gamma=lr_decay_gamma,\n optimizer=optimizer,\n weight_decay=weight_decay,\n momentum=momentum,\n nesterov=nesterov,\n pretrained_weight_path=pretrained_weight_path,\n checkpoint_path=checkpoint_path,\n log_path=log_path,\n use_tensorboard=use_tensorboard,\n model_c=model_c,\n model_nof_joints=model_nof_joints,\n model_bn_momentum=model_bn_momentum,\n flip_test_images=flip_test_images,\n device=device\n )", "def RunAutoEncoder(net, criterion, optimizer, lr_scheduler, train_dl, train_len, test_dl, test_len, N_EPOCHS, outputPath, SAVE_FILE,\\\n DO_PROJ_middle, run_model, criterion_classification, LOSS_LAMBDA, feature_name, TYPE_PROJ, ETA, ETA_STAR=100, AXIS=0 ):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n epoch_loss, epoch_acc, epoch_reconstruction, epoch_classification, train_time = [], [], [], [], []\n epoch_val_loss, epoch_val_acc, epoch_val_reconstruction, epoch_val_classification = [], [], [], []\n best_test = 0 \n for e in range(N_EPOCHS):\n t1 = time.perf_counter()\n\n running_loss, running_accuracy = 0, 0 \n running_classification , running_reconstruction = 0,0\n net.train()\n for i,batch in enumerate(tqdm(train_dl)):\n x = batch[0]\n labels = batch[1]\n \n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda() \n \n encoder_out, decoder_out = net(x)\n \n # Compute the loss \n loss_classification = criterion_classification(encoder_out,labels.long())\n if type(criterion) == torch.nn.modules.loss.KLDivLoss:\n loss_reconstruction = LOSS_LAMBDA * criterion(x.log(), decoder_out)\n else:\n loss_reconstruction = LOSS_LAMBDA * criterion(decoder_out, x)\n loss = loss_classification + loss_reconstruction\n \n optimizer.zero_grad()\n loss.backward()\n \n # Set the gradient as 0\n if run_model =='MaskGrad':\n for index,param in enumerate(list(net.parameters())):\n if index<len(list(net.parameters()))/2-2 and index%2==0:\n param.grad[ DO_PROJ_middle[int(index/2)] ] =0 \n optimizer.step() \n \n with torch.no_grad():\n running_loss += loss.item()\n running_reconstruction += loss_reconstruction.item()\n running_classification += loss_classification.item()\n running_accuracy += (encoder_out.max(1)[1] == labels).sum().item() \n \n if e == N_EPOCHS-1 :\n# labels = encoder_out.max(1)[1].float()\n if i == 0:\n data_decoded = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n else:\n tmp1 = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_decoded = torch.cat((data_decoded,tmp1),dim= 0)\n \n tmp2 = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((data_encoder,tmp2 ),dim= 0)\n\n t2 = time.perf_counter()\n train_time.append(t2-t1)\n print(\"Total loss:\", running_loss / float(train_len ),'loss_reconstruction: ', running_reconstruction/ train_len ,\\\n 'loss_classification: ',running_classification/ train_len ) \n epoch_loss.append(running_loss / train_len )\n epoch_reconstruction.append( running_reconstruction / train_len )\n epoch_classification.append( running_classification / train_len )\n epoch_acc.append(running_accuracy / train_len)\n \n \n # Do projection at last epoch (GRADIENT_MASK)\n if run_model=='ProjectionLastEpoch' and e==(N_EPOCHS-1):\n net_parameters = list(net.parameters())\n for index,param in enumerate(net_parameters):\n if DO_PROJ_middle == False and \\\n index!= len(net_parameters)/2-2: # Do no projection at middle layer\n param.data = Projection(param.data, TYPE_PROJ, ETA, ETA_STAR, AXIS, device).to(device)\n \n #testing our model\n running_loss, running_accuracy = 0, 0 \n running_classification , running_reconstruction = 0,0\n net.eval()\n \n for i,batch in enumerate(tqdm(test_dl)):\n with torch.no_grad():\n x = batch[0]\n labels = batch[1]\n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda()\n encoder_out, decoder_out = net(x)\n \n # Compute the loss \n loss_classification = criterion_classification(encoder_out,labels.long())\n if type(criterion) == torch.nn.modules.loss.KLDivLoss:\n loss_reconstruction = LOSS_LAMBDA * criterion(x.log(), decoder_out)\n else:\n loss_reconstruction = LOSS_LAMBDA * criterion(decoder_out, x)\n loss = loss_classification + loss_reconstruction\n running_loss += loss.item()\n running_reconstruction += loss_reconstruction.item()\n running_classification += loss_classification.item()\n running_accuracy += (encoder_out.max(1)[1] == labels).sum().item() \n print(\"test accuracy : \", running_accuracy / test_len, \"Total loss:\", running_loss / float(test_len ),'loss_reconstruction: ', running_reconstruction/ test_len ,\\\n 'loss_classification: ',running_classification/ test_len )\n if running_accuracy > best_test :\n best_net_it = e\n best_test = running_accuracy\n torch.save(net.state_dict(), str(outputPath)+\"/best_net\")\n epoch_val_loss.append(running_loss / test_len )\n epoch_val_reconstruction.append( running_reconstruction / test_len )\n epoch_val_classification.append( running_classification / test_len )\n epoch_val_acc.append(running_accuracy / test_len) \n \n print('Epoch du best net = ', best_net_it) \n if SAVE_FILE and str(run_model)!= 'ProjectionLastEpoch':\n # Save encoder data\n Lung_encoder = data_encoder.cpu().detach().numpy()\n colunms = [x for x in range(Lung_encoder.shape[1]-1)] +['label']\n res =pd.DataFrame(Lung_encoder,columns= colunms)\n #res.to_csv('{}encoder_tiro_{}.csv'.format(outputPath, str(run_model)),sep=';')\n # Save decoder data\n Lung_decoded = data_decoded.cpu().detach().numpy()\n Label = ['Label']+list(Lung_decoded[:,-1].astype(int)+1)\n Name = ['Name'] + [x+2 for x in range(train_len)]\n Label = np.vstack( (np.array(Name),np.array(Label)) )\n Lung = np.delete(Lung_decoded, -1, axis =1 )\n Lung = np.hstack( (feature_name.reshape(-1,1), Lung.T) )\n Lung = np.vstack((Label, Lung))\n res = pd.DataFrame(Lung)\n #res.to_csv('{}decoded_{}.csv'.format(outputPath, str(run_model)),sep=';',index=0, header=0) \n print(\"-----------------------\")\n print(\"Saved file to \",str(outputPath))\n print(\"-----------------------\")\n #Plot \n if str(run_model)!= 'ProjectionLastEpoch':\n #plt.figure()\n #plt.plot( epoch_acc )\n #plt.plot( epoch_val_acc )\n #plt.title('Total accuracy classification')\n #plt.show()\n print('{} epochs trained for {}s , {} s/epoch'.format(N_EPOCHS, sum(train_time), np.mean(train_time)))\n return data_encoder, data_decoded, epoch_loss , best_test, net", "def worker_train():\n py_work = np.zeros(model.layer1_size, dtype=np.float32)\n\n while True:\n job = jobs.get(block=True)\n if job is None: # data finished, exit\n jobs.task_done()\n # print('thread %s break' % threading.current_thread().name)\n break\n\n #lr = max(self.min_lr, self.lr * (1 - 1.0 * node_count[0]/total_node))\n lr = self.lr \n job_words = 0\n for edge in job:\n if edge is not None:\n if cluster_negtivate:\n node_set = set()\n if model.vocab_t[edge[0].index] not in nodeid2cluster:\n cls1 = -1\n else:\n cls1 = nodeid2cluster[model.vocab_t[edge[0].index]]\n node_set.add(cls1)\n if model.vocab_t[edge[1].index] not in nodeid2cluster:\n cls2 = -1\n else:\n cls2 = nodeid2cluster[model.vocab_t[edge[1].index]]\n node_set.add(cls2)\n neg_l = []\n #选择的负样本的node必须是有明确类别归属的\n for i in range(self.negative):\n nodeidx = model.table[np.random.randint(model.table_size)]\n if model.vocab_t[nodeidx] not in nodeid2cluster:\n i-=1\n continue\n else:\n cls_n = nodeid2cluster[model.vocab_t[nodeidx]]\n #加入不同边限制 G 里存放的是nodeid,不是idx\n if cls_n not in node_set and model.vocab_t[nodeidx] not in G[model.vocab_t[edge[0].index]] \\\n and model.vocab_t[nodeidx] not in G[model.vocab_t[edge[1].index]]:\n neg_l.append(nodeidx)\n neg_np = np.asarray(neg_l)\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n if weight > 0.0 and len(neg_np) > 0:\n #job_words += sum(train_o1(model.node_embedding, edge, lr, int(10 * (weight)) * self.negative, neg_np,\n # py_size=model.layer1_size, py_work=py_work) \n # for i in range(1))\n job_words += sum(train_o1(model.node_embedding, edge, weight, lr, self.negative, neg_np,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n elif len(neg_np) == 0:\n #job_words += sum(train_o1(model.node_embedding, edge, lr, 0, neg_np,\n # py_size=model.layer1_size, py_work=py_work) \n # for i in range(1))\n job_words += sum(train_o1(model.node_embedding, edge, weight, lr, 0, neg_np,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n else:\n weight = G[model.vocab_t[edge[0].index]][model.vocab_t[edge[1].index]]['weight']\n if weight >= 0.1:\n #job_words += sum(train_o1(model.node_embedding, edge, lr, int(10 * (weight)) * self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) \n # for i in range(1))\n job_words += sum(train_o1(model.node_embedding, edge, weight, lr, self.negative, model.table,\n py_size=model.layer1_size, py_work=py_work) \n for i in range(1))\n #for i in range(int(10 * weight)))\n #job_words = sum(train_o1(model.node_embedding, edge, lr, self.negative, model.table,\n # py_size=model.layer1_size, py_work=py_work) for edge in job if edge is not None)\n jobs.task_done()\n lock.acquire(timeout=30)\n try:\n node_count[0] += job_words\n\n elapsed = time.time() - start\n if elapsed >= next_report[0]:\n log.info(\"PROGRESS: at %.2f%% \\tnode_computed %d\\talpha %.05f\\t %.0f nodes/s\" %\n (100.0 * node_count[0] / total_node, node_count[0], lr, node_count[0] / elapsed if elapsed else 0.0))\n next_report[0] = elapsed + 5.0 # don't flood the log, wait at least a second between progress reports\n finally:\n lock.release()" ]
[ "0.700375", "0.6836105", "0.6836105", "0.6836105", "0.6836105", "0.6836105", "0.6526964", "0.64998597", "0.6478274", "0.6437638", "0.64010245", "0.6325889", "0.6291698", "0.6285171", "0.6252986", "0.62359476", "0.6207061", "0.61956275", "0.6193689", "0.61917514", "0.61817634", "0.6171412", "0.6153118", "0.6153118", "0.61012095", "0.60755575", "0.60688865", "0.60595316", "0.6057344", "0.6047044", "0.6042876", "0.6038489", "0.60286576", "0.60223573", "0.6014375", "0.5997444", "0.59907824", "0.5984889", "0.5974709", "0.59568834", "0.59538907", "0.59509075", "0.5943891", "0.59400326", "0.5939235", "0.59376955", "0.5918523", "0.59105635", "0.59027064", "0.58991206", "0.58895445", "0.5883299", "0.5878207", "0.5869082", "0.58667886", "0.5866229", "0.58599454", "0.5837261", "0.5826123", "0.58244866", "0.5816862", "0.58139503", "0.5809816", "0.5808563", "0.5807072", "0.5803481", "0.579982", "0.57969385", "0.57939655", "0.57928157", "0.5792361", "0.5782083", "0.57718194", "0.5771791", "0.57716227", "0.5769682", "0.57608926", "0.5758885", "0.5744331", "0.5742191", "0.5740287", "0.5739055", "0.5738452", "0.57362115", "0.57351136", "0.5722699", "0.571869", "0.5718131", "0.57177037", "0.57163894", "0.57140774", "0.5709309", "0.57077676", "0.56999147", "0.5698408", "0.5694303", "0.56927127", "0.5692548", "0.56909317", "0.5690598", "0.56837445" ]
0.0
-1
it performs the batch mode of training
def train_batch(inputs, outputs, eta=0.55, maxit=1000, momentum=0.1, plot=False): global ERROR ERROR.clear() min_error = 100 ins_outs = list(zip(inputs, outputs)) counter = 0 while counter <= maxit: counter += 1 shuffle(ins_outs) Dws = [] errors = [] for pair in ins_outs: i, o = pair error2(i, o) errors.append(layers[-1]["error2"].item()) ws = getweigths() backpropagate(eta, momentum) Dws.append(get_Delta_weigths()) setweigths(ws) ERROR.append(sum(errors)) try: if ERROR[-1] < min_error: min_error = ERROR[-1] optimal_w = getweigths() min_error_counter = counter print( f"Minimum error found = {min_error}, at counter = {min_error_counter}", end="\r") except: pass Delta_w = [] for ws in range(len(Dws[0])): Delta_w.append( sum( [Dws[pattern][ws] for pattern in range(len(ins_outs))] ) ) set_Delta_weigths(Delta_w) updateweigths() setweigths(optimal_w) print(f"\vMinimum error reached at the {min_error_counter}st cycle") if plot: plt.plot(np.arange(len(ERROR)), ERROR, "b*-") plt.xlabel("Number of cycles") plt.ylabel("Sum of quadratic errors") plt.title("BATCH MODE:\nERROR vs CYCLES") plt.grid() plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, batch):\n pass", "def train(self, num_batches: int):", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def train(self, batch_training=False):\n raise NotImplementedError", "def train_next_batch(self, batch_size=None):", "def epoch(self, dataset, optimize, mode): \n accuracyDataSet = []\n lossDataSet = []\n for epoch in range(config.EPOCHS):\n totalLoss, goodAccuracy, totalAccuracy = 1, 1, 1\n random.shuffle(dataset) \n for i in range(0, len(dataset)-config.BATCH_SIZE, config.BATCH_SIZE):\n \n if optimize:\n self.model.zero_grad() \n \n if mode == 'c':\n \n batch = dataset[i:i+config.BATCH_SIZE]\n \n x_batch_prefix = [[utils.getIdOfPrefix(w[3:]) for w in a] for a,b in batch] \n x_batch_suffix = [[utils.getIdOfSuffix(w[:-3]) for w in a] for a,b in batch]\n y_batch = [utils.getIdOfTag(b) for a,b in batch] \n x_prefix = Variable(torch.LongTensor(x_batch_prefix))\n x_suffix = Variable(torch.LongTensor(x_batch_suffix))\n y = Variable(torch.LongTensor(y_batch))\n\n lstm_output, _ = self.model((x_prefix, x_suffix) , mode) \n \n if mode in ['a','b']: \n\n batch = dataset[i:i+config.BATCH_SIZE] \n x_batch = [a for a,b in batch]\n y_batch = [b for a,b in batch]\n x = Variable(torch.LongTensor(x_batch))\n y = Variable(torch.LongTensor(y_batch))\n lstm_output, _ = self.model(x, mode) \n \n if mode == 'd':\n\n batch = dataset[i:i+config.BATCH_SIZE]\n x_batch_words = [[utils.getIdOfWord(w) for w in a] for a,b in batch]\n x_batch_chars = [[[utils.getIdOfChar(c) for c in word] for word in words[0]] for words in batch]\n x_batch_char = [item for sublist in x_batch_chars for item in sublist]\n lengths = [len(item) for item in x_batch_char]\n x_batch_c = []\n length = np.max(lengths) \n\n for i in range(len(x_batch_char)):\n if len(x_batch_char[i]) > config.WORD_MAX_LENGTH:\n x_batch_c.append(x_batch_char[i][:config.WORD_MAX_LENGTH])\n\n else:\n while len(x_batch_char[i]) < config.WORD_MAX_LENGTH:\n x_batch_char[i].append(0) \n x_batch_c.append(x_batch_char[i]) \n \n x_words = Variable(torch.LongTensor(x_batch_words))\n x_chars = Variable(torch.LongTensor(x_batch_c))\n y_batch = [utils.getIdOfTag(b) for a,b in batch]\n y = Variable(torch.LongTensor(y_batch))\n lstm_output, _ = self.model((x_words,x_chars), mode) \n \n lstm_output = lstm_output.view(100, -1) \n loss = self.loss_fn(lstm_output, y)\n totalLoss += (loss.data).numpy()\n\n if optimize:\n self.optimizer.zero_grad() \n loss.backward()\n self.optimizer.step()\n\n goodAccuracy, totalAccuracy = self.getAccuracy((lstm_output.data).numpy(), (y.data).numpy(), mode)\n goodAccuracy += goodAccuracy\n totalAccuracy += totalAccuracy\n\n accuracyDataSet.append(goodAccuracy/totalAccuracy) \n lossDataSet.append( totalLoss/(len(dataset)/config.BATCH_SIZE) )\n print('Model: ',mode,' Epoch: ',epoch ,' Loss : {0:.6f}'.format(totalLoss/(len(dataset)/config.BATCH_SIZE)), ' Accuracy : {0:.6f}'.format( goodAccuracy/totalAccuracy ) ) \n\n if not optimize:\n self.acc_data_plots.append((accuracyDataSet, mode))", "def train_dynamic(batch_size=10):\n \n return", "def train_step(self):\r\n batch_images = next(self.data_loader.next_batch())\r\n _, loss, summary, ea = self.sess.run([self.model.train_op, self.model.total_loss, self.model.merged, self.model.euclidean_a_p],\r\n feed_dict={self.model.input: batch_images, self.model.is_training: True})\r\n \r\n return loss, summary", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: opts[\"dropout_keep_prob\"]\n }\n _, step, loss, accuracy = sess.run(\n [train_op, global_step, cnn.loss, cnn.accuracy],\n feed_dict)", "def train():\n pass", "def train_single_batch(self, batch_data):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.optimizer.zero_grad()\n norm_adj = self.norm_adj\n ua_embeddings, ia_embeddings = self.model.forward(norm_adj)\n\n batch_users, pos_items, neg_items = batch_data\n\n u_g_embeddings = ua_embeddings[batch_users]\n pos_i_g_embeddings = ia_embeddings[pos_items]\n neg_i_g_embeddings = ia_embeddings[neg_items]\n\n batch_mf_loss, batch_reg_loss = self.loss_comput(\n u_g_embeddings,\n pos_i_g_embeddings,\n neg_i_g_embeddings,\n batch_users,\n pos_items,\n neg_items,\n )\n\n batch_loss = batch_mf_loss + batch_reg_loss\n\n batch_loss.backward()\n self.optimizer.step()\n loss = batch_loss.item()\n return loss", "def train(self, X, y, batch_size=5, num_epochs=10, alpha=0.1, gamma=0.9, learning=\"Delta\"):\n rem = int(np.ceil(len(X[0])/batch_size))\n for epoch in range(num_epochs):\n art = 0;\n for sample in range(rem):\n end = art + batch_size\n\n # Get a sample (column from X and Y) where the size of the sample is given by the batch size\n sampleX = X[:, art : end]\n sampleY = y[:, art : end]\n #print (sampleX)\n\n # Get the prediction\n results = self.predict(sampleX)\n art += batch_size\n\n if learning == \"Delta\" or learning == \"delta\":\n # Calculate e\n e = np.subtract(sampleY, results)\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(e, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Calculate the new weights along with the bias\n self.weights = np.add(self.weights, aep)\n \n elif learning == \"Filtered\" or learning == \"filtered\":\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(sampleY, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Multiply the old weights by some scalar gamma\n gw = np.multiply(1 - gamma, self.weights)\n\n self.weights = np.add(gw, aep)\n\n elif learning == \"Unsupervised_hebb\" or learning == \"unsupervised_hebb\":\n # Add a row of one's to the top of the input matrix\n #newX = np.vstack((np.array([1 for column in range(sampleX.shape[1])]), sampleX))\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(results, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Calculate the new weights along with the bias\n self.weights = np.add(self.weights, aep)", "def train_on_batch(model,\n\t\t\t batch_of_x,\n\t\t\t batch_of_y,\n\t\t\t optimizer):\n model.zero_grad()\n\n loss = model.loss(batch_of_x, batch_of_y)\n\n loss.backward()\n\n optimizer.step()\n\n return", "def fit_batch(self, data, labels, mask, weights_init, settings, test_data=None,test_labels=None):\n if self.early_stopping:\n stop_patience = settings['patience']\n patience=0\n best_acc=0.0\n current_epoch = 0\n n = np.size(data,axis=0)\n n_batch = self.batch_size\n acc_history = []\n x_train, y_train, x_val, y_val = self.shuffle_in_unison(data,labels, settings['split'])\n\n if not settings['use_random_init']:\n current_weights = weights_init\n else:\n current_weights = self.get_weights()\n for e in range(0, settings['n_epochs']):\n x_train, y_train, _,_ = self.shuffle_in_unison(x_train,y_train,0.0)\n current_epoch=e\n print(\"Epoch \" + str(e+1) + \"/\" + str(settings['n_epochs']))\n for j in tqdm(range(int(len(x_train) / n_batch))):\n masked_weights = self.mask_weights(mask, current_weights)\n self.model.set_weights(masked_weights)\n j_start = j*n_batch\n j_end = (j+1)*n_batch\n Xbatch = x_train[j_start:j_end,:,:]\n Ybatch = y_train[j_start:j_end]\n self.model.train_on_batch(Xbatch,Ybatch)\n current_weights = self.get_weights()\n if self.early_stopping:\n _, val_acc = self.evaluate_model(x_val,y_val)\n if val_acc <= best_acc:\n patience=patience+1\n else:\n best_acc=val_acc\n if settings['eval_test']:\n _, test_acc = self.evaluate_model(test_data,test_labels)\n acc_history.append(test_acc)\n if self.early_stopping:\n if patience>=stop_patience:\n break\n \n new_weights = self.mask_weights(mask, current_weights)\n self.model.set_weights(new_weights)\n return acc_history, current_epoch", "def train_batch():\n\n X, Y = train_X, train_Y\n feed_dict = {encoder[t]: X[t] for t in range(len(encoder))}\n feed_dict.update({labels[t]: Y[t] for t in range(len(labels))})\n _, loss_t = sess.run([train_op, loss], feed_dict)\n return loss_t", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train_batch(self, data, num_iteration, verbose=False):\n self.train(data, num_iteration, random_order=False, verbose=verbose)", "def _do_training(self, iteration, batch):\n\n feed_dict = self._get_feed_dict(iteration, batch)\n\n self._session.run(self._training_ops, feed_dict)\n\n if iteration % self._target_update_interval == 0:\n # Run target ops here.\n self._update_target()", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob,\n }\n _, step, loss = sess.run([train_op, global_step, cnn.loss], feed_dict)", "def train(self, training_steps=10):", "def train(self, mode=True):\n super(Encoder, self).train(mode)\n self.apply(freeze_batchnorm)", "def train_step(self, batch: dict, epoch: int):\n\n with torch.cuda.amp.autocast(self.mixed_precision):\n \n # Update momentum {key, pseudo} networks\n with torch.no_grad():\n self._momentum_update_key_net()\n self._momentum_update_pseudo_net()\n\n # Get data (3 views)\n x_q = batch['x1'].to(self.local_rank)\n x_k = batch['x2'].to(self.local_rank)\n x_ps = batch['x3'].to(self.local_rank)\n \n # Compute strong query features; (B, f)\n z_q = F.normalize(self.net_q(x_q), dim=1)\n\n with torch.no_grad():\n \n # Shuffle across nodes (gpus)\n x_k, idx_unshuffle_k = ForMoCo.batch_shuffle_ddp(x_k)\n x_ps, idx_unshuffle_ps = ForMoCo.batch_shuffle_ddp(x_ps)\n \n # Compute {key, pseudo} features; (B, f)\n z_k = F.normalize(self.net_k(x_k), dim=1)\n z_ps = F.normalize(self.net_ps(x_ps), dim=1)\n \n # Restore {key, pseudo} features to their original nodes\n z_k = ForMoCo.batch_unshuffle_ddp(z_k, idx_unshuffle_k)\n z_ps = ForMoCo.batch_unshuffle_ddp(z_ps, idx_unshuffle_ps)\n\n # Compute loss\n loss, logits, labels, loss_pseudo, probs_pseudo_neg = \\\n self.loss_function(z_q, z_ps, z_k, self.queue.buffer, threshold=self.threshold)\n \n # Backpropagate & update\n if loss_pseudo.isnan() or (epoch <= self.ramp_up):\n self.backprop(loss)\n else:\n alpha = 1.0\n self.backprop(loss + alpha * loss_pseudo)\n \n # Compute metrics\n with torch.no_grad():\n \n # Accuracy of true positives against all negatives\n rank_1 = TopKAccuracy(k=1)(logits, labels)\n \n # Accuracy of pseudo positives with ground truth labels\n above_threshold = probs_pseudo_neg.ge(self.threshold)\n num_pseudo = above_threshold.sum()\n \n # No pseudo positives may have been selected\n if self.queue.is_reliable and (num_pseudo > 0):\n labels_query = batch['y'].to(self.local_rank) # (B, )\n labels_queue = self.queue.labels # (k, )\n is_correct = labels_query.view(-1, 1).eq(labels_queue.view(1, -1)) # (B, 1) @ (1, k) -> (B, k)\n num_correct = is_correct.masked_select(above_threshold).sum()\n precision = torch.true_divide(num_correct, num_pseudo)\n else:\n num_correct = torch.zeros(1, dtype=torch.long, device=num_pseudo.device)\n precision = torch.zeros(1, dtype=torch.float32, device=num_pseudo.device)\n \n # Update memory queue\n self.queue.update(keys=z_k, labels=batch['y'].to(self.local_rank))\n\n return {\n 'loss': loss.detach(),\n 'loss_pseudo': loss_pseudo.detach(), # (1, ) or tensor(nan)\n 'rank@1': rank_1,\n 'num_correct': num_correct,\n 'num_pseudo': num_pseudo,\n 'precision': precision,\n }", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, loss, accuracy = sess.run(\n [train_op, global_step, cnn.loss, cnn.accuracy],\n feed_dict)", "def train_step(self, batch_sample, epoch_it):\n batch_x = batch_sample['waveform']\n data_type = batch_sample['data_type']\n batch_target = {\n 'ov': batch_sample['ov'],\n 'sed': batch_sample['sed_label'],\n 'doa': batch_sample['doa_label'],\n }\n if self.cuda:\n batch_x = batch_x.cuda(non_blocking=True)\n batch_target['sed'] = batch_target['sed'].cuda(non_blocking=True)\n batch_target['doa'] = batch_target['doa'].cuda(non_blocking=True)\n\n\n self.optimizer.zero_grad()\n self.af_extractor.train()\n self.model.train()\n\n (batch_x, batch_target) = self.af_extractor((batch_x, batch_target,'train', data_type))\n batch_x = (batch_x - self.mean) / self.std\n if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':\n pred, pred_constraint = self.model(batch_x)\n if self.cfg['training']['model'] == 'EINV2':\n pred = self.model(batch_x)\n if self.cfg['training']['model'] == 'SELD_ATT' or self.cfg['training']['model'] == 'SELD_ATT_LIGHT':\n loss_dict = self.losses.calculate_attention(pred, pred_constraint,batch_target, epoch_it,self.model)\n if self.cfg['training']['model'] == 'EINV2':\n loss_dict = self.losses.calculate(pred, batch_target, epoch_it, self.model)\n\n loss_dict[self.cfg['training']['loss_type']].backward(retain_graph=False)\n self.optimizer.step()\n\n self.train_losses['train_loss_all'] += loss_dict['all'].item()\n self.train_losses['train_loss_sed'] += loss_dict['sed'].item()\n self.train_losses['train_loss_doa'] += loss_dict['doa'].item()\n\n if self.cfg['training']['weight_constraints']:\n self.train_losses['train_loss_weight_orthogonal'] += loss_dict['loss_weight_orthogonal'].item()\n\n if self.cfg['training']['weight_constraints_1']:\n self.train_losses['train_loss_weight_orthogonal_1'] += loss_dict['loss_weight_orthogonal_1'].item()\n\n if self.cfg['training']['layer_constraints']:\n self.train_losses['train_loss_layer_orthogonal'] += loss_dict['loss_layer_orthogonal'].item()\n\n if self.cfg['training']['layer_constraints_1']:\n self.train_losses['train_loss_layer_orthogonal_1'] += loss_dict['loss_layer_orthogonal_1'].item()\n\n if self.cfg['training']['smoothness_loss']:\n self.train_losses['train_loss_doa_smoothness'] += loss_dict['loss_doa_smoothness'].item()", "def train_batches(feature_matrix,y):\r\n X_org,y_org = shuffle(feature_matrix, y, random_state=13)\r\n \r\n # Splitting 20% of dataset to be test later\r\n X_train_org, X_test_org, y_train_org, y_test_org = train_test_split(X_org, y_org,test_size=0.20)\r\n X = X_train_org\r\n y = y_train_org\r\n \r\n \r\n for name, clf in classifiers:\r\n inc = Incremental(clf, scoring='accuracy')\r\n batch_size=5000\r\n counter=0\r\n train_acc = [] \r\n test_acc = []\r\n \r\n # Initializing Standard Scaler and IPCA for each classifier\r\n \r\n SS = StandardScaler()\r\n # IPCA = IncrementalPCA(n_components = 500)\r\n n=1\r\n print(\"Training \", name,\".......\\n\")\r\n for j in range(80):\r\n if counter >= len(X):\r\n break\r\n \r\n # Splitting each batch into training and validation datset\r\n X_train, X_test, y_train, y_test = train_test_split(X[counter:counter+batch_size], y[counter:counter+batch_size],test_size=0.25)\r\n print(\"Iteration:\",n)\r\n \r\n classes = da.unique(y_train).compute()\r\n \r\n # Feature Scaling\r\n SS.partial_fit(np.asarray(X_train))\r\n SS.transform(np.asarray(X_test))\r\n \r\n # Feature Decomposition\r\n # IPCA.partial_fit(X_train)\r\n # IPCA.transform(X_test)\r\n \r\n # Partial fitting - Stochastic Gradient Descent\r\n inc.partial_fit(X_train, y_train, classes=classes)\r\n print('Training Score:', inc.score(X_train, y_train))\r\n print('Validation Score:', inc.score(X_test, y_test))\r\n print(\"\\n\")\r\n \r\n # Concatenating batch scores\r\n train_acc.append(inc.score(X_train, y_train)) \r\n test_acc.append(inc.score(X_test, y_test)) \r\n \r\n if(len(X)-counter < batch_size):\r\n batch_size = len(X)-counter\r\n counter += batch_size\r\n n += 1\r\n \r\n \r\n # Savings the model\r\n filename = r'C:\\PythonCodes\\MM803\\code\\Outputs\\New\\f_canny_'+name+'.sav'\r\n pickle.dump(inc, open(filename, 'wb'))\r\n \r\n # Printing Model Accuracy\r\n print(name,\" MODEL ACCURACY\")\r\n print(\"_______________________\")\r\n print(\"Avg Training Accuracy of \", name,\":\", statistics.mean(train_acc)) \r\n print(\"Avg Test Accuracy \", name,\":\",statistics.mean(test_acc))\r\n \r\n \r\n # Testing on Unseen Data\r\n SS.transform(np.asarray(X_test_org[:5000]))\r\n # IPCA.transform(X_test_org[:5000])\r\n print('\\nFinal Testing Score on Unseen data 1 by ', name,':', inc.score(X_test_org[:10], y_test_org[:10]))\r\n print('Final Testing Score on Unseen data 2 by ', name,':', inc.score(X_test_org[10:100], y_test_org[10:100]))\r\n print('Final Testing Score on Unseen data 3 by ', name,':', inc.score(X_test_org[500:1000], y_test_org[500:1000]))\r\n \r\n print('\\n\\nClassification Report of', name)\r\n print('------------------------------------')\r\n print(classification_report(y_test_org[:5000],inc.predict(X_test_org[:5000]), digits = 4))\r\n print('====================================')\r\n print('\\n')\r\n \r\n # Saving the trained StandardScaler to be used for testing\r\n filename_ss = r'C:\\PythonCodes\\MM803\\code\\Outputs\\New\\f_canny_SS.sav'\r\n pickle.dump(SS, open(filename_ss, 'wb'))\r\n \r\n # Saving the trained Incremental PCA to be used for testing\r\n #filename_ipca = r'C:\\PythonCodes\\MM803\\code\\Outputs\\New\\f_hed_IPCA.sav'\r\n #pickle.dump(IPCA, open(filename_ipca, 'wb')) \r", "def train(self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.setWeights(trainingData.shape[1])\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n \n # Hyper-parameters. Your can reset them. Default batchSize = 100, weight_decay = 1e-3, learningRate = 1e-2\n \"*** YOU CODE HERE ***\"\n self.batchSize = 100\n self.weight_decay = 1e-3\n self.learningRate = 0.1\n\n def Softmax(x):\n x_max = np.max(x, axis=0)\n x_exp = np.exp(x - x_max)\n x_exp_sum = np.sum(x_exp, axis=0)\n return x_exp / x_exp_sum\n\n for iteration in range(self.max_iterations):\n if iteration % 10 == 0:\n print(\"Starting iteration \", iteration, \"...\")\n self.learningRate *= 0.9\n dataBatches = self.prepareDataBatches(trainingData, trainingLabels)\n for batchData, batchLabel in dataBatches:\n \"*** YOUR CODE HERE ***\"\n Y = np.zeros((len(self.legalLabels), self.batchSize))\n for i in range(self.batchSize):\n Y[batchLabel[i]][i] = 1\n Y_pred = Softmax((batchData @ self.weights + self.bias).T)\n d_weight = ((Y_pred - Y) @ batchData / batchData.shape[0]).T + self.weight_decay * sum(self.weights)\n d_bias = np.mean(Y_pred - Y, axis=1) + self.weight_decay * sum(self.bias)\n self.weights -= d_weight * self.learningRate\n self.bias -= d_bias * self.learningRate", "def train_one_epoch(self):\n print('Training......')\n\n # set mode train\n self.network.train()\n\n # prepare data\n train_loss = 0\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n RandomHorizontalFlip(),\n ToTensor()\n ])\n\n\n\n dataset = Cityscapes(params.dataset_root, mode='train', transforms = transform)\n\n train_loader = DataLoader(dataset,\n batch_size=params.train_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n \n train_size = 1896\n if train_size % self.params.train_batch != 0:\n total_batch = train_size // self.params.train_batch + 1\n else:\n total_batch = train_size // self.params.train_batch\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0 \n # train through dataset\n for batch_idx, batch in enumerate(train_loader):\n self.pb.click(batch_idx, total_batch)\n image, label = batch['image'], batch['label']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n\n # checkpoint split\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n\n\n loss = self.loss_fn(out, label_cuda)\n \n #display_image(out, label_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one + TN\n IOU = IOU+ FN \n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n\n # optimize\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n # accumulate\n train_loss += loss.item()\n\n # record first loss\n if self.train_loss == []:\n self.train_loss.append(train_loss)\n self.summary_writer.add_scalar('loss/train_loss', train_loss, 0)\n \n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(accuracy_new/total_batch)\n \n self.pb.close()\n train_loss /= total_batch\n self.train_loss.append(train_loss)\n\n # add to summary\n self.summary_writer.add_scalar('loss/train_loss', train_loss, self.epoch)", "def _train(self):\r\n lr, hr = self.sess.run(self.val_batch)\r\n res = self.sess.run(\r\n [self.train, self.merged,\r\n self.GAN.g_loss, self.GAN.mse_loss, self.GAN.g_gan_loss,\r\n self.GAN.d_loss, self.GAN.d_loss_real, self.GAN.d_loss_fake],\r\n feed_dict={\r\n self.GAN.g_images: lr,\r\n self.GAN.d_images: hr,\r\n self.GAN.is_training: True\r\n })\r\n\r\n return res[1:]", "def _train_batch(self, review_fwd, review_bwd, summary):\n # feed in the data for forward model\n feed_dict_fwd = {self.enc_inp_fwd[t]: review_fwd[t] for t in range(self.seq_length)}\n feed_dict_fwd.update({self.labels[t]: summary[t] for t in range(self.seq_length)})\n\n # feed in the data for the backward model\n feed_dict_bwd = {self.enc_inp_bwd[t]: review_bwd[t] for t in range(self.seq_length)}\n feed_dict_bwd.update({self.labels[t]: summary[t] for t in range(self.seq_length)})\n\n # train forward model\n print 'Forward Batch Training.......'\n _, loss_t_forward = self.sess.run([self.train_op_fwd, self.loss_fwd], feed_dict_fwd)\n\n # train backward model\n print 'Backward Batch Training.......'\n _, loss_t_backward = self.sess.run([self.train_op_bwd, self.loss_bwd], feed_dict_bwd)\n\n return loss_t_forward, loss_t_backward", "def run_batch(self, batch_x, batch_y):\n raise NotImplementedError()", "def train(model, learning_rate=0.001, batch_size=50, epochs=5):\n for e in range (epochs):\n epoch_loss = 0\n batchIndex =0\n \n for i in range (int((len(train_ids))/batch_size)):\n minibatch_loss = 0\n batchInput = Variable(torch.FloatTensor(trainingInputs[batchIndex: batchIndex +batch_size])).squeeze()\n batchLabelOutput = Variable(torch.FloatTensor(trainingOutputs[batchIndex: batchIndex +batch_size])).squeeze()\n \n output = model(batchInput)\n optimizer.zero_grad()\n loss = criterion(output, batchLabelOutput)\n minibatch_loss += loss.data[0]\n loss.backward()\n optimizer.step()\n \n #print(minibatch_loss /batch_size)\n #if ((i == (len(train_ids)/batch_size) - 1) ):\n # print(minibatch_loss /batch_size)\n \n batchIndex = batchIndex + batch_size\n #epoch_loss += loss.data[0]\n #print(epoch_loss /len(train_ids))", "def _train(self):\n epoch_training_time = 0\n epoch_metrics_time = 0\n self.epoch_ += 1\n for i_batch, sample_batched in enumerate(self.dataloader):\n self.global_step_ += 1\n batch_start_time = time.time()\n data_sample = sample_batched[0].to(self.device)\n\n # Get model samples, either from replay buffer or noise.\n if self.model_samples_ is None:\n self.model_samples_ = deque(\n [\n self.net_.sample_from_prior(\n data_sample.shape[0], device=self.device\n ).detach()\n ]\n )\n elif len(self.model_samples_) > self.max_replay:\n self.model_samples_.popleft()\n replay_sample = random.choices(\n self.model_samples_,\n # favor more recent samples:\n weights=list(range(1, len(self.model_samples_) + 1)),\n )[0]\n noise_sample = self.net_.sample_from_prior(\n replay_sample.shape[0], device=self.device\n )\n mask = torch.rand(replay_sample.shape[0]) < self.replay_prob\n while len(mask.shape) < len(replay_sample.shape):\n # Add extra feature-dims\n mask.unsqueeze_(dim=-1)\n\n model_sample = torch.where(\n mask.to(self.device), replay_sample, noise_sample\n )\n\n self.net_.eval()\n # Run at least one iteration\n model_sample = self.net_.sample_fantasy(\n model_sample,\n num_mc_steps=self.num_mc_steps,\n mc_dynamics=self.sampler,\n ).detach()\n\n self.model_samples_.append(model_sample)\n\n # Sanity checks:\n assert (\n data_sample.shape[1:] == self.net_.input_shape\n ), \"Data is incompatible with network.\"\n assert (\n model_sample.shape[1:] == data_sample.shape[1:]\n ), \"Model and data samples are incompatible.\"\n\n # Forward gradient:\n self.net_.train()\n self.net_.zero_grad()\n data_energy_mean = self.net_(data_sample).mean()\n model_energy = self.net_(model_sample)\n model_energy_mean = model_energy.mean()\n\n # Estimate the odds of the data's energy based on a normal fitted to\n # model samples:\n data_erf = torch.erf(\n (data_energy_mean - model_energy_mean) / model_energy.std()\n )\n\n objective = data_energy_mean - model_energy_mean\n objective.backward()\n torch.nn.utils.clip_grad.clip_grad_value_(self.net_.parameters(), 1e2)\n self.optimizer_.step()\n\n batch_training_time = time.time() - batch_start_time\n epoch_training_time += batch_training_time\n self.logger_(energy_diff=float(objective))\n self.logger_(data_erf=float(data_erf))\n\n tr_metrics_start_time = time.time()\n for callback in self.step_callbacks:\n callback(\n net=self.net_,\n data_sample=data_sample,\n model_sample=model_sample,\n epoch=self.epoch_,\n global_step=self.global_step_,\n validation=False,\n )\n tr_metrics_time = time.time() - tr_metrics_start_time\n epoch_metrics_time += tr_metrics_time\n if self.verbose:\n print(\n f\"on epoch {self.epoch_}, batch {i_batch}, data erf: {data_erf}, objective: {objective}\"\n )\n print(f\"model energy: {model_energy_mean} +- {model_energy.std()}\")\n print(f\"data energy: {data_energy_mean}\")\n print(\n f\"training time: {batch_training_time:0.3f}s, metrics time: {tr_metrics_time:0.3f}s\"\n )\n means = self.logger_.means()\n if self.verbose:\n print(f\"on epoch {self.epoch_}\")\n for k, v in means.items():\n print(f\"{k}: {v}\")\n self.logger_.flush()\n means[\"loss\"] = energy_model.utils.constraints.add_soft_constraint(\n means[\"loss_ais\"], means[\"data_erf\"], lower_bound=-1\n )\n return means", "def train_batch(model, session_batch):\n if len(session_batch) == 0:\n return\n batch_size = len(session_batch)\n batch = np.zeros((batch_size, INPUT_SIZE))\n for i in range(batch_size):\n batch[i] = session_to_input(session_batch[i])\n pred = predict(model, session_batch)\n model.fit(batch, pred, batch_size=batch_size, verbose=0)", "def train_on_batch(self, x_train):\n # Prep data for batch update.\n (x_neg, y_neg,\n x_pos, y_pos,\n y_train) = self._prep_data(x_train)\n\n # 1. Train Autoencoder.\n loss = self.autoencoder.train_on_batch(x_train, y_train)\n if self.joint_train:\n _, ae_loss, gan_loss = loss\n else:\n ae_loss = loss\n\n # 2. Train Critic (Discriminator).\n # self.critic.trainable = True # Unfreeze critic.\n critic_loss = self.critic.train_on_batch(x_neg, y_neg)\n critic_loss += self.critic.train_on_batch(x_pos, y_pos)\n\n # 3. Train Generator again on updated critic.\n # Note: this is not executed if autoencoder training\n # includes generator trick loss.\n if not self.joint_train:\n gan_loss = self.gan.train_on_batch(x_train, y_pos)\n\n sum_loss = ae_loss + critic_loss + gan_loss\n return (sum_loss, ae_loss, critic_loss, gan_loss)", "def next_batch(self, batch_size, shuffle=True):", "def _batch_train(self, batch, training_step, step):\n lstm_size = (self.batch_size, self.Qmain.h_size)\n batch_mem = np.zeros(lstm_size)\n batch_carry = np.zeros(lstm_size)\n input_shape = (self.batch_size,\n self.trace_length,\n self.observation_size)\n m_data = np.vstack(batch[:, 0])\n m_data = m_data.reshape(input_shape)\n t_data = np.vstack(batch[:, 4])\n t_data = t_data.reshape(input_shape)\n q_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(m_data)]\n q1_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(t_data)]\n\n # Batch predict\n self.Qmain.trace_length.assign(self.trace_length)\n self.Qmain.dropout_rate.assign(0.0)\n self.Qtarget.trace_length.assign(self.trace_length)\n self.Qtarget.dropout_rate.assign(0.0)\n\n # Save the graph just the first time\n if training_step == 0:\n tf.summary.trace_on()\n\n # T batch predict\n pred = self.Qmain.model.predict(q_input,\n batch_size=self.batch_size)\n Q = pred[0]\n batch_bus = pred[1]\n batch_line = pred[2]\n batch_disp = pred[3]\n\n ## Log graph once and disable graph logging\n if training_step == 0:\n with self.tf_writer.as_default():\n tf.summary.trace_export(self.name + \"-graph\", step)\n\n # T+1 batch predict\n Qn, *_ = self.Qtarget.model.predict(q1_input,\n batch_size=self.batch_size)\n \n # Compute batch Q update to Qtarget\n for i in range(self.batch_size):\n idx = i * (self.trace_length - 1)\n a = batch[idx][1]\n grid = a[0]\n batch_bus[i][:] = a[1][:]\n batch_line[i][:] = a[2][:]\n batch_disp[i][:] = a[3][:]\n r = batch[idx][2]\n d = batch[idx][3]\n Q[i][grid] = r\n if d == False:\n Q[i][grid] += DISCOUNT_FACTOR * Qn[i][grid]\n\n # Batch train\n batch_x = [batch_mem, batch_carry, m_data]\n batch_y = [\n Q,\n batch_bus, batch_line, batch_disp,\n batch_mem, batch_carry\n ]\n loss = self.Qmain.model.train_on_batch(batch_x, batch_y)\n loss = loss[0]\n\n # Log to tensorboard\n self._tf_log_summary(loss, step)", "def train_batch(self,X_batch,Y_batch):\n\n average_loss = 0\n for x, y in zip(X_batch, Y_batch):\n datum_loss = self.train_datum(x,y)\n average_loss += datum_loss / self.batch_size\n\n # Update weights on all layers after processing the batch\n for l in self.layers:\n l.update_weights()\n\n return average_loss", "def run_epoch(self):\n print(\"Training\")\n self.set_train()\n\n for batch_idx in range(0, self.num_total_batch):\n\n before_op_time = time.time()\n # Choosing the dataloader for training model\n if self.choosing_dataset_to_train_with(batch_idx):\n # Synthetic dataset\n self.syn_or_real = 'syn'\n try:\n inputs = self.syn_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the synthetic dataloader')\n self.syn_train_iter = iter(self.syn_train_loader)\n inputs = self.syn_train_iter.__next__()\n else:\n # Real dataset\n self.syn_or_real = 'real'\n try:\n inputs = self.real_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the real dataloader')\n self.real_train_iter = iter(self.real_train_loader)\n inputs = self.real_train_iter.__next__()\n\n # Move all available tensors to GPU memory\n for key, ipt in inputs.items():\n if type(key) == tuple or key == \"depth_gt\":\n inputs[key] = ipt.to(self.device)\n\n # log less frequently after the first 2000 steps to save time & disk space\n self.step += 1\n self.early_phase = batch_idx % self.opt.log_frequency == 0\n self.mid_phase = False and self.step % self.opt.save_frequency == 0\n self.late_phase = self.num_total_batch - 1 == batch_idx\n\n outputs, losses = {}, {}\n # Depth estimation\n outputs_d, losses_d = self.process_batch(inputs)\n outputs.update(outputs_d)\n losses.update(losses_d)\n\n # No more if else conditions, just combine all losses based on availability of gradients\n final_loss = torch.tensor(0.).to(self.device)\n for k, v in losses.items():\n if ('d_' not in k) and v.requires_grad and ('/' not in k):\n final_loss += v\n final_loss.backward()\n losses[\"loss\"] = final_loss\n\n if (batch_idx + 1) % 2 == 0:\n self.model_optimizer.step()\n self.model_optimizer.zero_grad()\n self.zero_grad()\n\n duration = time.time() - before_op_time\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n if self.early_phase or self.mid_phase or self.late_phase:\n self.log(\"train\", inputs, outputs, losses)\n self.val(\"real\")\n self.val(\"syn\")\n\n if (batch_idx + 1) % 2 == 0:\n current_lr = self.update_learning_rate(self.model_optimizer, self.opt.learning_rate)", "def train_batch(self, batch_info: BatchInfo) -> None:\n # Each DQN batch is\n # 1. Roll out environment and store out experience in the buffer\n self.model.eval()\n\n # Helper variables for rollouts\n episode_information = []\n frames = 0\n\n with torch.no_grad():\n if not self.env_roller.is_ready_for_sampling():\n while not self.env_roller.is_ready_for_sampling():\n rollout = self.env_roller.rollout(batch_info, self.model)\n\n episode_information.extend(rollout.episode_information())\n frames += rollout.frames()\n else:\n for i in range(self.settings.batch_rollout_rounds):\n rollout = self.env_roller.rollout(batch_info, self.model)\n\n episode_information.extend(rollout.episode_information())\n frames += rollout.frames()\n\n batch_info['frames'] = frames\n batch_info['episode_infos'] = episode_information\n\n # 2. Sample the buffer and train the algo on sample batch\n self.model.train()\n\n # Algo will aggregate data into this list:\n batch_info['sub_batch_data'] = []\n\n for i in range(self.settings.batch_training_rounds):\n sampled_rollout = self.env_roller.sample(batch_info, self.model)\n\n batch_result = self.algo.optimizer_step(\n batch_info=batch_info,\n device=self.device,\n model=self.model,\n rollout=sampled_rollout\n )\n\n self.env_roller.update(rollout=sampled_rollout, batch_info=batch_result)\n\n batch_info['sub_batch_data'].append(batch_result)\n\n batch_info.aggregate_key('sub_batch_data')", "def _train_batch(self):\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n result = self._batch_iter(source, target, i)\n\n # yield\n yield result", "def train(self, mode: bool = True):\n if self.nn_module.training != mode:\n self.nn_module.train(mode)", "def train(self):\n self.training = True", "def __train_batch(self, x, y):\n self.reset()\n\n for index, batch in enumerate(x):\n self.predict(batch, dropout_probability=self.dropout_probability)\n self.out_layer.loss(y[index])\n\n # increment hit rate if, well, hit\n if m.get_max_index(self.out_layer.predicted) == m.get_max_index(y[index]):\n self.hit_count += 1.0\n\n # calculate batch loss\n self.batch_loss += (self.out_layer.cost / len(x))\n\n # calculate all delta\n self.out_layer.calculate_delta()\n\n # update weights\n self.in_layer.update(momentum_parameter=self.momentum_parameter)", "def train_batch(batch_size):\n X, Y = generatedata(isTrain=True, batch_size=batch_size)\n feed_dict = {enc_inp[t]: X[t] for t in range(len(enc_inp))}\n feed_dict.update({expected_sparse_output[t]: Y[t] for t in range(len(expected_sparse_output))})\n \n _, loss_t = sess.run([train_op, loss], feed_dict)\n return loss_t", "def trainNet():", "def train(self, session, train_dataset, val_dataset, train_dir):\n\n #self.saver=saver\n tic = time.time()\n params = tf.trainable_variables()\n num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n toc = time.time()\n logging.info(\"Number of params: %d (retreival took %f secs)\" % (num_params, toc - tic))\n\n # context_ids, question_ids, answer_spans, ctx_mask ,q_mask, train_context = dataset\n # train_dataset = [context_ids, question_ids, answer_spans, ctx_mask ,q_mask]\n\n # val_context_ids, val_question_ids, val_answer_spans, val_ctx_mask, val_q_mask, val_context = val_dataset\n # val_dataset = [val_context_ids, val_question_ids, val_answer_spans, val_ctx_mask, val_q_mask]\n\n \n num_epochs = self.flags.epochs\n\n # print train_dataset[0].shape,train_dataset[1].shape\n # print val_dataset[0].shape,val_dataset[1].shape\n\n #if self.flags.debug:\n # train_dataset = [elem[:self.flags.batch_size*1] for elem in train_dataset]\n # val_dataset = [elem[:self.flags.batch_size*1] for elem in val_dataset]\n # num_epochs = 100\n \n # print train_dataset[0].shape,train_dataset[1].shape\n # print val_dataset[0].shape,val_dataset[1].shape\n # assert False\n\n for epoch in range(num_epochs):\n logging.info(\"Epoch %d out of %d\", epoch + 1, self.flags.epochs)\n self.run_epoch(sess=session,\n train_set=train_dataset, \n val_set=val_dataset)\n logging.info(\"Saving model in %s\", train_dir)\n self.saver.save(session, train_dir+\"/\"+self.flags.run_name+\".ckpt\")", "def train(self, training, epochs, group):\n for epoch in range(epochs):\n self.input_matrix={}\n self.back_propagation_learning(training)\n acc = accuracy(self, group)\n print(\"Accuracy on epoch {} is {} \".format(epoch, acc))", "def run_train_iter(self, session, batch, summary_writer):\n # Match up our input data with the placeholders\n input_feed = {}\n input_feed[self.context_ids] = batch.context_ids\n input_feed[self.context_mask] = batch.context_mask\n input_feed[self.qn_ids] = batch.qn_ids\n input_feed[self.qn_mask] = batch.qn_mask\n input_feed[self.ans_ids] = batch.ans_ids\n input_feed[self.ans_mask] = batch.ans_mask\n input_feed[self.keep_prob] = 1.0 - self.FLAGS.dropout # apply dropout\n\n # if not use raw graph tokens\n if not self.FLAGS.use_raw_graph:\n input_feed[self.context_embedding] = batch.context_embeddings\n\n # output_feed contains the things we want to fetch.\n output_feed = [self.updates, self.summaries, self.loss, self.global_step, self.param_norm, self.gradient_norm, self.dev_loss]\n\n # Run the model\n [_, summaries, loss, global_step, param_norm, gradient_norm, dev_loss] = session.run(output_feed, input_feed)\n\n # All summaries in the graph are added to Tensorboard\n summary_writer.add_summary(summaries, global_step)\n\n return loss, global_step, param_norm, gradient_norm, dev_loss", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train(self, training_data):\n pass", "def train(self, X_train, Y_train, X_test = None, Y_test = None, epochs = 100, batch_size = 32, learning_rate = 0.005):\n m_train = X_train.shape[1]\n for epoch in range(epochs + 1):\n batch = np.arange(0, m_train)\n np.random.shuffle(batch)\n for k in range(m_train // batch_size + 1):\n if k * batch_size < m_train:\n X_mini_batch = X_train[:,batch[k * batch_size:(k + 1) * batch_size]]\n Y_mini_batch = Y_train[:,batch[k * batch_size:(k + 1) * batch_size]]\n self.update_weights(X_mini_batch, Y_mini_batch, learning_rate)\n \n if epoch % 10 == 0: \n # Loss function\n A2 = self.feedforward(X_train)\n cost = (1 / m_train) * np.sum(-np.multiply(Y_train, np.log(A2)) - np.multiply(1 - Y_train, np.log(1 - A2)))\n print(f\"epoch:{epoch}, Cost: {cost}, \", end = '')\n # Accutacy on training data\n if X_test is not None and Y_test is not None:\n A2_test = self.feedforward(X_test)\n class_pred = A2_test.argmax(axis = 0)\n class_actual = Y_test.argmax(axis = 0)\n acc = sum(class_actual == class_pred)\n print(f\"accuracy:{acc}/{X_test.shape[1]}\")", "def train_step(x_batch, y_batch, x_batch_lex):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n # lexicon\n cnn.input_x_lexicon: x_batch_lex,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy, neg_r, neg_p, f1_neg, f1_pos, avg_f1 = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy,\n cnn.neg_r, cnn.neg_p, cnn.f1_neg, cnn.f1_pos, cnn.avg_f1],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n # print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n #print(\"{}: step {}, loss {:g}, acc {:g}, neg_r {:g} neg_p {:g} f1_neg {:g}, f1_pos {:g}, f1 {:g}\".\n # format(time_str, step, loss, accuracy, neg_r, neg_p, f1_neg, f1_pos, avg_f1))\n train_summary_writer.add_summary(summaries, step)", "def train(self):\n return", "def train(batch_size, num_sample=128):\n return paddle.batch(_read_creater(num_sample=num_sample), batch_size)", "def on_train_batch_begin(self, step, logs=None):", "def next_batch(self):\n next_train_index = self.curr_train_index + self.hparams.batch_size\n if next_train_index > self.num_train:\n # Increase epoch number\n epoch = self.epochs + 1\n self.reset()\n self.epochs = epoch\n batched_data = (\n self.train_images[self.curr_train_index:self.curr_train_index +\n self.hparams.batch_size],\n self.train_labels[self.curr_train_index:self.curr_train_index +\n self.hparams.batch_size])\n final_imgs = []\n images, labels = batched_data\n if self.hparams.augment_type == 'mixup':\n images, labels = augmentation_transforms.mixup_batch(\n images, labels, self.hparams.mixup_alpha)\n elif self.hparams.augment_type == 'image_freq':\n images, labels = augmentation_transforms.freq_augment(\n images,\n labels,\n amplitude=self.hparams.freq_augment_amplitude,\n magnitude=self.hparams.augmentation_magnitude,\n proportion_f=self.hparams.freq_augment_ffrac,\n probability=self.hparams.augmentation_probability)\n for data in images:\n if self.hparams.augment_type == 'autoaugment':\n epoch_policy = self.good_policies[np.random.choice(\n len(self.good_policies))]\n final_img = augmentation_transforms.apply_policy(epoch_policy, data)\n elif self.hparams.augment_type == 'random':\n epoch_policy = found_policies.random_policy(\n self.hparams.num_augmentation_layers,\n self.hparams.augmentation_magnitude,\n self.hparams.augmentation_probability)\n final_img = augmentation_transforms.apply_policy(epoch_policy, data)\n else:\n final_img = np.copy(data)\n if self.hparams.apply_flip_crop:\n final_img = augmentation_transforms.random_flip(\n augmentation_transforms.zero_pad_and_crop(data, 4))\n # Apply cutout\n if self.hparams.apply_cutout:\n final_img = augmentation_transforms.cutout_numpy(final_img)\n\n final_imgs.append(final_img)\n final_imgs = np.array(final_imgs, np.float32)\n if self.hparams.noise_type == 'radial':\n labels = augmentation_transforms.add_radial_noise(\n final_imgs, labels, self.hparams.frequency, self.hparams.amplitude,\n self.hparams.noise_class, self.hparams.normalize_amplitude)\n elif self.hparams.noise_type == 'random' or self.hparams.noise_type == 'fourier' or self.hparams.noise_type == 'f' or self.hparams.noise_type == '1/f':\n labels = augmentation_transforms.add_sinusoidal_noise(\n final_imgs, labels, self.hparams.frequency, self.hparams.amplitude,\n self.direction, self.hparams.noise_class,\n self.hparams.normalize_amplitude)\n elif self.hparams.noise_type == 'uniform':\n labels = augmentation_transforms.add_uniform_noise(\n labels, self.hparams.amplitude, self.hparams.noise_class)\n\n batched_data = (final_imgs, labels)\n self.curr_train_index += self.hparams.batch_size\n return batched_data", "def train( self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.features = trainingData[0].keys() # could be useful later\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n for iteration in range(self.max_iterations):\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n print (\"Starting iteration \", iteration, \"...\")\n for i in range(len(trainingData)):#training data\n max = -10000000\n for j in range(len(self.weights)):\n prod = np.dot(self.weights[j], trainingData[i]) #este sería x0 (en la primera vuelta) (xj)\n if (prod > max):\n max=prod #en max guardamos la distancia a la instancia que más cerca está de la que estamos recorriendo\n indclase=j #guardas el índice de la clase a la que predices que pertenece\n\n if(indclase != trainingLabels[i]):\n # recalcular pesos\n self.weights[trainingLabels[i]].__radd__(trainingData[i]) #honek jarraian egiten du gehiketa pisu guztientzat\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n self.weights[indclase].__sub__(trainingData[i]) #honek jarraian egiten du kenketa pisu guztientzat\n\n\n\n\n\n ########################################################################################\n # 1. i es el indice de un ejemplo (un item, f(x) de un ejemplo) del conjunto de entrenamiento.\n # 2. Asi pues, en cada vuelta de este loop se trata un solo ejemplo\n # por cada ejemplo calculareis el producto punto (dotProduct) w*item\n # NOTAS: Recordad que cada ejemplo viene representado por varios rasgos (o features), es decir, es un vector de rasgos, tantos como nos marca el atributo self.features.\n # Asi cada ejemplo es de dimension 1 filas y self.features).\n # La dimension del vector w tambien es self.features, es decir, habra tantos pesos en w_rasgo dentro de w como rasgos haya en cada item de ejemplo\n # Recordad tambien que es una clasificacion multiclase en este caso. Hay tantas clases como nos marca el atributo self.legalLabels\n #########################################################################################", "def train_step(x_batch, y_batch):\r\n feed_dict = {\r\n rnn.input_x: x_batch,\r\n rnn.input_y: y_batch,\r\n rnn.dropout_keep_prob: FLAGS.dropout_keep_prob\r\n }\r\n _, step, loss, accuracy = sess.run(\r\n [train_op, global_step, rnn.loss, rnn.accuracy],\r\n feed_dict)\r\n return step, loss, accuracy", "def train(self, X, y, batch_size=5, num_epochs=10, alpha=0.1, gamma=0.9, learning=\"Delta\"):\r\n for i in range(num_epochs):\r\n bse = 0 \r\n for j in range(X.shape[1]//batch_size):\r\n x_bs = X[:,bse:bse+batch_size]\r\n y_bs = y[:,bse:bse+batch_size]\r\n diff = np.subtract(y_bs, self.predict(x_bs))\r\n bse+=batch_size\r\n\r\n if learning.lower() == \"filtered\":\r\n self.weights = (1-gamma)*self.weights + alpha*(np.dot(y_bs, x_bs.T))\r\n elif learning.lower() == \"delta\":\r\n self.weights = self.weights + alpha*(np.dot(diff, x_bs.T))\r\n\r\n else:\r\n self.weights = self.weights + alpha*(np.dot(self.predict(x_bs), x_bs.T))", "def fit(self, X_train, y_train, X_test, y_test, n_epochs=1):\n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n init.run()\n for epoch in range(n_epochs):\n # Create Batches with size of BATCH_SIZE\n X_train_batches, y_train_batches = generate_random_batches(X_train, y_train, self.batch_size)\n # print(\"-------------------X_train shape: \", X_train.shape)\n # print(\"-------------------y_train shape: \", y_train.shape)\n\n # Iterage through the batches and performn training each time\n for X_batch, y_batch in zip(X_train_batches, y_train_batches):\n # print(\"X_batch shape: \", X_batch.shape)\n # print(\"y_batch shape: \", y_batch.shape)\n # print(X_batch)\n # print(y_batch)\n # Calculate Next Gradient Descent Step\n feed_dict = {self.X_tf: X_batch, self.y_tf: y_batch, self.keep_prob: 0.5}\n summary, _ = sess.run([self.merged_summaries, self.training_op], feed_dict=feed_dict)\n self.writer_train.add_summary(summary, epoch)\n\n # Log Accuracy of Test Data\n feed_dict = {self.X_tf: X_test, self.y_tf: y_test, self.keep_prob: 0.5}\n summary, acc = sess.run([self.merged_summaries, self.accuracy], feed_dict=feed_dict)\n self.writer_test.add_summary(summary, epoch)\n\n # if epoch % 1 == 0:\n acc_train = self.accuracy.eval(feed_dict={self.X_tf: X_train, self.y_tf: y_train, self.keep_prob: 1.0})\n acc_test = self.accuracy.eval(feed_dict={self.X_tf: X_test, self.y_tf: y_test, self.keep_prob: 1.0})\n print(\"Epoch: \", epoch, \"Train accuracy:\", acc_train, \"Test accuracy:\", acc_test)\n\n #Save the final model\n self.saver.save(sess, self.log_dir + '/model')", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n # print(x_batch[0])\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n if step%100==0:\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)", "def fit(self, batches, batches_valid):\n self.batches = batches\n self.batches_valid = batches_valid\n for batch in range(self.n_total_steps):\n X_batch, y_batch = batches.get_batch()\n feed_dict = {\n self.x: X_batch,\n self.y: y_batch,\n self.keep_prob: 0.85}\n fetch_dict = {\n \"train\": self.train_op,\n \"loss\": self.loss_op,\n \"accuracy\": self.accuracy_op,\n \"global_step\": self.global_step,\n \"learning_rate\": self.learning_rate}\n result = self.session.run(fetch_dict, feed_dict)\n self.log_training(batch, total_batches, result)", "def step(self, sess, batch_data, is_training):\n\n # Input feed\n input_feed = {}\n input_feed[self.images] = batch_data['images']\n input_feed[self.bbox_true_13] = batch_data['bbox_true_13']\n input_feed[self.bbox_true_26] = batch_data['bbox_true_26']\n input_feed[self.bbox_true_52] = batch_data['bbox_true_52']\n\n # Output feed: depends on training or test\n output_feed = [self.loss] # Loss for this batch.\n if is_training:\n output_feed.append(self.train_op) # Gradient updates\n\n outputs = sess.run(output_feed, input_feed)\n return outputs[0] # loss", "def _train(self):\n\n batch = random.sample(self.D, min(self.batch_size, len(self.D)))\n no_state = np.zeros(self.stateCnt)\n\n states = [ o[0] for o in batch]\n states_ = [ (no_state if o[3] is None else o[3]) for o in batch ]\n\n p = []\n p_ = []\n for ii in range(len(batch)):\n p.append(self._predict(states[ii][:,:,:]))\n p_.append(self._predict(states_[ii][:,:,:]))\n\n batchLen = len(batch)\n\n x = np.zeros((batchLen, 84, 84, 1))\n y =np.zeros((batchLen, 11,11,6))\n\n for i in range(batchLen):\n o = batch[i]\n s = o[0]; a = o[1]; r = o[2]; s_ = o[3]\n\n t = p[i][0,:,:,:]\n if s_ is None:\n t[a] = r\n else:\n t[a] = r + self.gamma* np.amax(p_[i])\n x[i] = s\n y[i] = t\n\n self.model.fit(x,y,nb_epoch=1,verbose=0)", "def train(self, X, y, learning_rate=1e-3, num_iters=100,\n batch_size=200, verbose=True):\n num_train, dim = X.shape\n\n if self.w is None:\n self.w = 0.001 * np.random.randn(dim)\n\n loss_history = []\n\n # 将feature与label连起来,方便后面batch的划分\n all_data = list(zip(X, y))\n\n for it in xrange(num_iters):\n X_batch = None\n y_batch = None\n\n #########################################################################\n # TODO: #\n # Sample batch_size elements from the training data and their #\n # corresponding labels to use in this round of gradient descent. #\n # Store the data in X_batch and their corresponding labels in #\n # y_batch; after sampling X_batch should have shape (batch_size, dim) #\n # and y_batch should have shape (batch_size,) #\n # #\n # Hint: Use np.random.choice to generate indices. Sampling with #\n # replacement is faster than sampling without replacement. #\n #########################################################################\n\n # batch_data = np.random.choice(all_data, batch_size, False) \n # error: ValueError: a must be 1-dimensional \n # 查询相关api貌似该方法不能用于数组中元素为元组情况下的选取\n batch_data = random.sample(all_data, batch_size)\n X_batch, y_batch = zip(*batch_data)\n X_batch = np.array(X_batch)\n y_batch = np.array(y_batch)\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n # evaluate loss and gradient\n\n loss, grad = self.loss(X_batch, y_batch)\n loss_history.append(loss)\n\n # perform parameter update\n #########################################################################\n # TODO: #\n # Update the weights using the gradient and the learning rate. #\n #########################################################################\n self.w += float(learning_rate) * np.array(grad)\n #########################################################################\n # END OF YOUR CODE #\n #########################################################################\n\n if verbose and (it % 1000 == 0 or it == num_iters - 1):\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n\n return loss_history", "def train_batch(dsc_model: Discriminator, gen_model: Generator,\n dsc_loss_fn: Callable, gen_loss_fn: Callable,\n dsc_optimizer: Optimizer, gen_optimizer: Optimizer,\n x_data: DataLoader):\n\n # TODO: Discriminator update\n # 1. Show the discriminator real and generated data\n # 2. Calculate discriminator loss\n # 3. Update discriminator parameters\n # ====== YOUR CODE: ======\n dsc_optimizer.zero_grad()\n\n real_batch = x_data\n generated_batch = gen_model.sample(len(real_batch), with_grad=True)\n\n y_data = dsc_model(real_batch)\n y_generated = dsc_model(generated_batch.detach())\n\n dsc_loss = dsc_loss_fn(y_data, y_generated)\n dsc_loss.backward()\n\n dsc_optimizer.step()\n # ========================\n\n # TODO: Generator update\n # 1. Show the discriminator generated data\n # 2. Calculate generator loss\n # 3. Update generator parameters\n # ====== YOUR CODE: ======\n gen_optimizer.zero_grad()\n\n y_generated = dsc_model(generated_batch)\n\n gen_loss = gen_loss_fn(y_generated)\n gen_loss.backward()\n\n gen_optimizer.step()\n # ========================\n\n return dsc_loss.item(), gen_loss.item()", "def train_model_multi(self, embed_model, epoch, optimizer, writer, feature_matrix, iter, cfg):\n device = self.device\n batch_size = cfg.batch_size\n batches = int(np.ceil(len(feature_matrix) / batch_size))\n\n with torch.autograd.set_detect_anomaly(True):\n self.train()\n running_loss = 0.0\n for i in range(batches):\n batch_pos = torch.tensor(\n feature_matrix.iloc[i * batch_size:(i + 1) * batch_size][\"pos\"].values.astype(int)).to(device)\n batch_target = torch.tensor(\n feature_matrix.iloc[i * batch_size:(i + 1) * batch_size][self.class_columns].values).float().to(\n device)\n\n if i == batches - 1:\n batch_pos = torch.tensor(\n feature_matrix.iloc[i * batch_size:][\"pos\"].values.astype(int)).to(device)\n batch_target = torch.tensor(\n feature_matrix.iloc[i * batch_size:][self.class_columns].values).float().to(device)\n\n batch_embed = embed_model.pos_embed(batch_pos.long())\n\n \"Forward Pass\"\n batch_pred = self.forward(batch_embed, \"train\")\n loss = self.multi_label_criterion(batch_pred, batch_target)\n\n \"Backward and optimize\"\n optimizer.zero_grad()\n loss.backward()\n # self.plot_grad_flow(self.named_parameters())\n clip_grad_norm_(self.parameters(), max_norm=cfg.max_norm)\n optimizer.step()\n\n running_loss += loss.item()\n writer.add_scalar('classification testing loss', loss, iter + i)\n\n \"save model\"\n torch.save(self.state_dict(), cfg.model_dir + self.model_name + '.pth')\n\n epoch_loss = running_loss / batches\n print('epoch %s - loss : %s' % (epoch, epoch_loss))\n\n return iter + i, self, epoch_loss", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: self.cfg['dropout_keep_prob']\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n self.logger.debug(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)", "def train(self, training_data, cfg, **kwargs):\n pass", "def train_step(x_batch, y_batch):\r\n feed_dict = {\r\n cnn.input_x: x_batch,\r\n cnn.input_y: y_batch,\r\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\r\n }\r\n\r\n _, step, summaries, loss, accuracy, predictions,y_actual = sess.run(\r\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy, cnn.predictions,cnn.y],\r\n feed_dict)\r\n\r\n time_str = datetime.datetime.now().isoformat()\r\n # print(\"train_f1_score:\", f1_score(y_actual, predictions, average=None))\r\n # print (predictions)\r\n # print(y_actual)\r\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\r\n return accuracy\r\n\r\n train_summary_writer.add_summary(summaries, step)", "def train(self):\n # Track initial loss/accuracy\n self.validation_epoch()\n for epoch in range(self.epochs):\n # Perform a full pass through all the training samples\n for batch_it, (X_batch, Y_batch) in enumerate(self.dataloader_train):\n # X_batch is the CIFAR10 images. Shape: [batch_size, 3, 32, 32]\n # Y_batch is the CIFAR10 image label. Shape: [batch_size]\n # Transfer images / labels to GPU VRAM, if possible\n X_batch = to_cuda(X_batch)\n Y_batch = to_cuda(Y_batch)\n\n # Perform the forward pass\n predictions = self.model(X_batch)\n # Compute the cross entropy loss for the batch\n loss = self.loss_criterion(predictions, Y_batch)\n\n # Backpropagation\n loss.backward()\n\n # Gradient descent step\n self.optimizer.step()\n \n # Reset all computed gradients to 0\n self.optimizer.zero_grad()\n # Compute loss/accuracy for all three datasets.\n if batch_it % self.validation_check == 0:\n self.validation_epoch()\n # Check early stopping criteria.\n if self.should_early_stop():\n print(\"Early stopping.\")\n return", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train_with_iter(epoch, interval, batch_iter):\n alpha = 1\n model.train()\n # set the random seed for torch.generator() to shuffle the dataset\n trainSampler.set_epoch(epoch)\n if hvd.rank() == 0:\n print(\"=\"*50)\n\n if args.use_ldamloss:\n loss_weights = drw_weights(epoch, data_samples)\n ldamloss = LDAMLoss(data_samples, max_m=0.5, s=30, weight=loss_weights)\n\n epoch_start = time.time()\n for batch_idx, (data, target) in enumerate(train_loader):\n batch_start = time.time()\n\n if args.finetune:\n adjust_learning_rate_for_finetune(epoch, batch_idx)\n elif args.cosine_lr:\n adjust_learning_rate_for_cosine_decay(epoch, batch_idx)\n else:\n adjust_learning_rate(epoch, batch_idx)\n\n if args.cuda:\n if not args.fp16:\n data, target = data.cuda(), target.cuda()\n else:\n data, target = data.half().cuda(), target.cuda()\n\n # cutmix\n if args.cutmix:\n data, target_a, target_b, lam = cutmix_data(data, target, args.beta)\n output = model(data)\n if args.labelSmooth:\n loss = labelsmooth_loss(output, target_a) * lam + labelsmooth_loss(output, target_b) * (1. - lam)\n elif args.use_focalloss:\n loss = focal_loss(output, target_a) * lam + focal_loss(output, target_b) * (1. - lam)\n elif args.use_cbfocalloss:\n loss = CB_loss(output, target_a, data_samples, args.num_classes, \"focal\") * lam + CB_loss(output, target_a, data_samples, args.num_classes, \"focal\") * (1. - lam)\n elif args.use_ldamloss:\n loss = ldamloss(output, target_a) * lam + ldamloss(output, target_b) * (1. - lam) \n else:\n loss = F.cross_entropy(output, target_a) * lam + F.cross_entropy(output, target_b) * (1. - lam)\n\n else:\n output = model(data)\n if args.labelSmooth:\n loss = labelsmooth_loss(output, target)\n elif args.use_focalloss:\n loss = focal_loss(output, target)\n elif args.use_cbfocalloss:\n loss = CB_loss(output, target, data_samples, args.num_classes, \"focal\")\n elif args.use_ldamloss:\n loss = ldamloss(output, target)\n else:\n loss = F.cross_entropy(output, target)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # training batch acc\n train_acc = accuracy(output, target)\n\n batch_iter += 1\n if hvd.rank() == 0:\n for param_group in optimizer.param_groups:\n learning_rate = param_group[\"lr\"]\n\n waste_time = time.time() - batch_start\n print(\"Training Epoch: [{}/{}] batch: [{}/{}] batchiter: [{}/{}] Loss: {:.4f} Accuracy: {:.4f} Learning_rate: {:.6f} Time: {:.2f} date: {}\".format(\n epoch, args.epochs, batch_idx+1, total_train_sampler, batch_iter, total_train_sampler *\n args.epochs, loss.item(), train_acc.item(\n ), learning_rate, waste_time, str(datetime.datetime.now())\n ))\n\n # train log writer\n if log_writer:\n # train batch\n log_writer.add_scalars(\n 'train/lv1', {\n 'loss': loss.item(),\n 'acc': train_acc.item()\n }, batch_iter\n )\n\n log_writer.add_scalar(\n 'train/batch_time', waste_time, batch_iter\n )\n log_writer.add_scalar(\n 'learning_rate', learning_rate, batch_iter)\n\n # validaiton with each epoch\n if args.val_dir is not None and args.val_dir != \"\":\n validation_rank, val_acc = validatin_acc()\n if hvd.rank() == 0:\n print(\"Validation Epoch: [{}/{}] batchiter: [{}/{}] Loss: {:.4f} RankLoss: {:.4f} Accuracy: {:.4f} Time: {:.2f}\".format(\n epoch, args.epochs, batch_iter, total_train_sampler *\n args.epochs, validation_rank[\"loss\"], validation_rank[\"rank_loss\"], val_acc[\"val_acc\"], time.time(\n ) - batch_start\n ))\n\n # validation_log\n if log_writer:\n log_writer.add_scalars(\n 'Val/batch', {\n 'rank_loss': validation_rank[\"rank_loss\"],\n 'loss': validation_rank[\"loss\"],\n },\n batch_iter\n )\n log_writer.add_scalars(\n 'Val/batch_acc', {\n 'accuracy': val_acc['val_acc']\n },\n batch_iter\n )\n\n log_writer.add_scalars(\n 'Val/epoch_acc', {\n 'accuracy': val_acc['val_acc']\n },\n epoch + 1\n )\n\n # save checkpoint with the epoch\n save_checkpoint(epoch, \"epoch\")\n\n if hvd.rank() == 0:\n print(\"Epoch [{}/{}] waste time is {}\".format(epoch,\n args.epochs, time.time() - epoch_start))\n\n return batch_iter", "def train_single_batch(self, batch_data, ratings=None):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.optimizer.zero_grad()\n loss = self.model.forward(batch_data)\n loss.backward()\n self.optimizer.step()\n loss = loss.item()\n return loss", "def train(self, training_data, testData, classNum, batchSize):\n # find the numbers for feature and label\n featureNum = training_data.shape[1] - 1\n\n # #this will find all the unique labels automatically, but will have problem when training data is lacking some labels\n # labelNum = len(np.unique(training_data[:, :1]))\n labelNum = classNum\n\n # get the number of nodes for each layer\n if \"hidden_layer\" in self.params and self.params[\"hidden_layer\"] is not None:\n nodeNum = [featureNum] + self.params[\"hidden_layer\"] + [labelNum]\n else:\n nodeNum = [featureNum, featureNum * 2, labelNum]\n\n # get the mode for initializing the weight\n if \"weightInitMode\" in self.params and self.params[\"weightInitMode\"] is not None:\n weightInitMode = self.params[\"weightInitMode\"]\n else:\n weightInitMode = None\n\n # get the momentum factor\n if \"momentumFactor\" in self.params:\n momentumFactor = self.params[\"momentumFactor\"]\n else:\n momentumFactor = 0.0\n\n self.clf = NeuralNetwork(training_data, nodeNum, weightInitMode, momentumFactor)\n iteration = 5\n totalIter = 0\n testSize = 100000\n while iteration > 0:\n\n if iteration < 10:\n self.clf.train(iteration, batchSize)\n totalIter += iteration\n print \"---------- Settings ----------\"\n print \"Examples :\", training_data.shape[0]\n print \"Batch size :\", batchSize\n print \"Alpha :\", self.clf.getAlpha()\n print \"Momentum factor :\", momentumFactor\n print \"# of Nodes in all layers :\", nodeNum\n print \"Training iteration so far:\", totalIter\n self.file.write(\"\\n\")\n self.file.write(\"---------- Settings ----------\" + \"\\n\")\n self.file.write(\"Examples : \" + str(training_data.shape[0]) + \"\\n\")\n self.file.write(\"Batch size : \" + str(batchSize) + \"\\n\")\n self.file.write(\"Alpha : \" + str(self.clf.getAlpha()) + \"\\n\")\n self.file.write(\"Momentum factor : \" + str(momentumFactor) + \"\\n\")\n self.file.write(\"# of Nodes in all layers : \" + str(nodeNum) + \"\\n\")\n self.file.write(\"Training iteration so far: \" + str(totalIter) + \"\\n\")\n self.test(training_data, \"training\")\n self.test(testData, \"testing\")\n iteration = 0\n\n while iteration >= testSize:\n self.clf.train(testSize, batchSize)\n totalIter += testSize\n print \"---------- Settings ----------\"\n print \"Examples :\", training_data.shape[0]\n print \"Batch size :\", batchSize\n print \"Alpha :\", self.clf.getAlpha()\n print \"Momentum factor :\", momentumFactor\n print \"# of Nodes in all layers :\", nodeNum\n print \"Training iteration so far:\", totalIter\n self.file.write(\"\\n\")\n self.file.write(\"---------- Settings ----------\" + \"\\n\")\n self.file.write(\"Examples : \" + str(training_data.shape[0]) + \"\\n\")\n self.file.write(\"Batch size : \" + str(batchSize) + \"\\n\")\n self.file.write(\"Alpha : \" + str(self.clf.getAlpha()) + \"\\n\")\n self.file.write(\"Momentum factor : \" + str(momentumFactor) + \"\\n\")\n self.file.write(\"# of Nodes in all layers : \" + str(nodeNum) + \"\\n\")\n self.file.write(\"Training iteration so far: \" + str(totalIter) + \"\\n\")\n self.test(training_data, \"training\")\n self.test(testData, \"testing\")\n iteration -= testSize\n\n if iteration > 0:\n self.clf.train(iteration, batchSize)\n totalIter += iteration\n print \"---------- Settings ----------\"\n print \"Examples :\", training_data.shape[0]\n print \"Batch size :\", batchSize\n print \"Alpha :\", self.clf.getAlpha()\n print \"Momentum factor :\", momentumFactor\n print \"# of Nodes in all layers :\", nodeNum\n print \"Training iteration so far:\", totalIter\n self.file.write(\"\\n\")\n self.file.write(\"---------- Settings ----------\" + \"\\n\")\n self.file.write(\"Examples : \" + str(training_data.shape[0]) + \"\\n\")\n self.file.write(\"Batch size : \" + str(batchSize) + \"\\n\")\n self.file.write(\"Alpha : \" + str(self.clf.getAlpha()) + \"\\n\")\n self.file.write(\"Momentum factor : \" + str(momentumFactor) + \"\\n\")\n self.file.write(\"# of Nodes in all layers : \" + str(nodeNum) + \"\\n\")\n self.file.write(\"Training iteration so far: \" + str(totalIter) + \"\\n\")\n self.test(training_data, \"training\")\n self.test(testData, \"testing\")\n iteration = 0\n\n print \"\"\n restart = raw_input(\"Do you want to restart? (Y/N)\")\n if restart.upper() == \"Y\":\n totalIter = 0\n print \"Current Alpha is\", self.clf.getAlpha()\n alpha = raw_input(\"What alpha ?\")\n self.clf.setAlpha(float(alpha))\n self.clf.initTheta()\n self.file.write(\"\\n\")\n self.file.write(\"*****************************************************\\n\")\n self.file.write(\"Re-initialize trail with alpha = \" + str(alpha) + \"\\n\")\n self.file.write(\"*****************************************************\\n\")\n\n print \"\"\n iteration = raw_input(\"How many iteration do you want to train the model?\")\n try:\n iteration = int(iteration)\n except:\n iteration = raw_input(\"Please input an integer\")\n iteration = 1\n print \"Total training iterations:\", totalIter", "def train():\n\t# 1、make dataloader\n\ttrain_loader, val_loader, num_query, num_class = make_data_loader(cfg)\n\t#print(\"num_query:{},num_class:{}\".format(num_query,num_class))\n\n\t# 2、make model\n\tmodel = build_model(cfg, num_class)\n\n\t# model.eval()\n\t# x = model(img_tensor)\n\t# print(x.shape)\n\t# 3、 make optimizer\n\toptimizer = make_optimizer(cfg, model)\n\n\t# 4、 make lr_scheduler\n\tscheduler = make_lr_scheduler(cfg, optimizer)\n\n\t# 5、 make loss_func\n\tif cfg.MODEL.PCB_NECK:\n\t\t# make loss specificially for pcb \n\t\tloss_func = get_softmax_triplet_loss_fn(cfg, num_class)\n\telse:\n\t\tloss_func = make_loss(cfg, num_class)\n\n\t# get paramters\n\tlog_period = cfg.OUTPUT.LOG_PERIOD \n\tckpt_period =cfg.OUTPUT.CHECKPOINT_PERIOD\n\teval_period = cfg.OUTPUT.EVAL_PERIOD\n\toutput_dir = cfg.OUTPUT.ROOT_DIR\n\tdevice = cfg.MODEL.DEVICE\n\tepochs = cfg.SOLVER.MAX_EPOCHS\n\tuse_gpu = device == \"cuda\"\n\tuse_neck = cfg.MODEL.NECK or cfg.MODEL.LEARN_REGION \n\t# how many batch for each log\n\tbatch_size = cfg.SOLVER.IMGS_PER_BATCH\n\tbatch_num = len(train_loader) \n\t\n\tlog_iters = batch_num // log_period\n\tpretrained = cfg.MODEL.PRETRAIN_PATH != ''\n\tparallel = cfg.MODEL.PARALLEL \t\n\tgrad_clip = cfg.DARTS.GRAD_CLIP \n\n\tfeat_norm = cfg.TEST.FEAT_NORM \n\tckpt_save_path = cfg.OUTPUT.ROOT_DIR + cfg.OUTPUT.CKPT_DIR\n\tif not os.path.exists(ckpt_save_path):\n\t\tos.makedirs(ckpt_save_path)\n\n\n\t# create *_result.xlsx\n\t# save the result for analyze\n\tname = (cfg.OUTPUT.LOG_NAME).split(\".\")[0] + \".xlsx\"\n\tresult_path = cfg.OUTPUT.ROOT_DIR + name\n\n\twb = xl.Workbook()\n\tsheet = wb.worksheets[0]\n\ttitles = ['size/M','speed/ms','final_planes', 'acc', 'mAP', 'r1', 'r5', 'r10', 'loss',\n\t\t\t 'acc', 'mAP', 'r1', 'r5', 'r10', 'loss','acc', 'mAP', 'r1', 'r5', 'r10', 'loss']\n\tsheet.append(titles)\n\tcheck_epochs = [40, 80, 120, 160, 200, 240, 280, 320, 360, epochs]\n\tvalues = []\n\n\tlogger = logging.getLogger('MobileNetReID.train')\n\t\n\t# count parameter\n\tsize = count_parameters(model)\n\tlogger.info(\"the param number of the model is {:.2f} M\".format(size))\n\t\n\tvalues.append(format(size, '.2f'))\n\tvalues.append(model.final_planes)\n\n\tlogger.info(\"Start training\")\n\t\n\t#count = 183, x, y = batch -> 11712 for train\n\tif pretrained:\n\t\tstart_epoch = model.start_epoch\n\n\tif parallel:\n\t\tmodel = nn.DataParallel(model)\n\n\tif use_gpu:\n\t\t# model = nn.DataParallel(model)\n\t\tmodel.to(device)\n\t\n\t# save the best model\n\tbest_mAP, best_r1 = 0., 0.\n\tis_best = False\n\t# batch : img, pid, camid, img_path\n\tavg_loss, avg_acc = RunningAverageMeter(), RunningAverageMeter()\n\tavg_time, global_avg_time = AverageMeter(), AverageMeter()\n\tglobal_avg_time.reset()\n\tfor epoch in range(epochs):\n\t\tscheduler.step()\n\n\t\tif pretrained and epoch < start_epoch - 1:\n\t\t\tcontinue\n\t\n\t\tmodel.train()\n\t\t# sum_loss, sum_acc = 0., 0.\n\t\tavg_loss.reset()\n\t\tavg_acc.reset()\n\t\tavg_time.reset()\n\t\tfor i, batch in enumerate(train_loader):\n\n\t\t\tt0 = time.time()\n\t\t\timgs,labels = batch\n\n\t\t\tif use_gpu:\n\t\t\t\timgs = imgs.to(device)\n\t\t\t\tlabels = labels.to(device)\n\n\t\t\tres = model(imgs)\n\t\t\t# score, feat = model(imgs)\n\t\t\t# loss = loss_func(score, feat, labels)\n\t\t\tloss, acc = compute_loss_acc(use_neck, res, labels, loss_func)\n\t\t\t\n\t\t\tloss.backward()\n\t\t\tif grad_clip != 0:\n\t\t\t\tnn.utils.clip_grad_norm(model.parameters(), grad_clip)\n\n\t\t\toptimizer.step()\n\n\t\t\toptimizer.zero_grad()\n\n\t\t\t# acc = (score.max(1)[1] == labels).float().mean()\n\n\t\t\t# sum_loss += loss\n\t\t\t# sum_acc += acc \n\t\t\tt1 = time.time()\n\t\t\tavg_time.update((t1 - t0) / batch_size)\n\t\t\tavg_loss.update(loss)\n\t\t\tavg_acc.update(acc)\n\n\t\t\t#log the info \n\t\t\tif (i+1) % log_iters == 0:\n\n\t\t\t\tlogger.info(\"epoch {}: {}/{} with loss is {:.5f} and acc is {:.3f}\".format(\n\t\t\t\t\t epoch+1, i+1, batch_num, avg_loss.avg, avg_acc.avg))\n\n\t\tlr = optimizer.state_dict()['param_groups'][0]['lr']\n\t\tlogger.info(\"end epochs {}/{} with lr: {:.5f} and avg_time is {:.3f} ms\".format(epoch+1, epochs, lr, avg_time.avg * 1000))\n\t\tglobal_avg_time.update(avg_time.avg)\n\t\t# change the lr \n\n\t\t# eval the model \n\t\tif (epoch+1) % eval_period == 0 or (epoch + 1) == epochs :\n\t\t\t\n\t\t\tmodel.eval()\n\t\t\tmetrics = R1_mAP(num_query, use_gpu = use_gpu, feat_norm = feat_norm)\n\n\t\t\twith torch.no_grad():\n\n\t\t\t\tfor vi, batch in enumerate(val_loader):\n\t\t\t\t\t\n\t\t\t\t\timgs, labels, camids = batch\n\n\t\t\t\t\tif use_gpu:\n\t\t\t\t\t\timgs = imgs.to(device)\n\n\t\t\t\t\tfeats = model(imgs)\n\t\t\t\t\tmetrics.update((feats,labels, camids))\n\n\t\t\t\t#compute cmc and mAP\n\t\t\t\tcmc, mAP = metrics.compute()\n\t\t\t\tlogger.info(\"validation results at epoch:{}\".format(epoch + 1))\n\t\t\t\tlogger.info(\"mAP:{:.2%}\".format(mAP))\n\t\t\t\tfor r in [1,5,10]:\n\t\t\t\t\tlogger.info(\"CMC curve, Rank-{:<3}:{:.2%}\".format(r,cmc[r-1]))\t\n\n\t\t\t\t# determine whether cur model is the best \n\t\t\t\tif mAP > best_mAP:\n\t\t\t\t\tis_best = True\n\t\t\t\t\tbest_mAP = mAP\n\t\t\t\t\tlogger.info(\"Get a new best mAP\")\n\t\t\t\tif cmc[0] > best_r1:\n\t\t\t\t\tis_best = True\n\t\t\t\t\tbest_r1 = cmc[0]\n\t\t\t\t\tlogger.info(\"Get a new best r1\")\n\n\t\t\t\t# add the result to sheet\n\t\t\t\tif (epoch + 1) in check_epochs:\n\t\t\t\t\tval = [avg_acc.avg, mAP, cmc[0], cmc[4], cmc[9]]\n\t\t\t\t\tchange = [format(v * 100, '.2f') for v in val]\n\t\t\t\t\tchange.append(format(avg_loss.avg, '.3f'))\n\t\t\t\t\tvalues.extend(change)\n\n\n\t\t# we hope that eval_period == ckpt_period or eval_period == k* ckpt_period where k is int\t\t\t\n\t\t# whether to save the model\n\t\tif (epoch+1) % ckpt_period == 0 or is_best:\n\n\t\t\tif parallel:\n\t\t\t\ttorch.save(model.module.state_dict(), ckpt_save_path + \"checkpoint_{}.pth\".format(epoch + 1 ))\n\t\t\telse:\n\t\t\t\ttorch.save(model.state_dict(), ckpt_save_path + \"checkpoint_{}.pth\".format(epoch + 1 ))\n\n\t\t\tlogger.info(\"checkpoint {} saved !\".format(epoch + 1))\n\n\t\t\tif is_best:\n\t\t\t\tif parallel:\n\t\t\t\t\ttorch.save(model.module.state_dict(), ckpt_save_path + \"best_ckpt.pth\")\n\t\t\t\telse:\n\t\t\t\t\ttorch.save(model.state_dict(), ckpt_save_path + \"best_ckpt.pth\")\n\t\t\t\tlogger.info(\"best checkpoint was saved\")\n\t\t\t\tis_best = False\n\t\n\tvalues.insert(1, format(global_avg_time.avg * 1000, '.2f'))\n\tsheet.append(values)\n\twb.save(result_path)\n\n\tlogger.info(\"training is end, time for per imgs is {} ms\".format(global_avg_time.avg *1000))", "def run_epoch(self, dataset, k=20, training=True):\n start_time = time.time()\n \n # initialize\n losses = []\n recalls = []\n mrrs = []\n ##增加###\n zippers = []\n ##增加###\n optimizer = self.optimizer\n hidden = self.gru.init_hidden()\n if not training:\n self.gru.eval()\n device = self.device\n \n def reset_hidden(hidden, mask):\n \"\"\"Helper function that resets hidden state when some sessions terminate\"\"\"\n if len(mask) != 0:\n hidden[:, mask, :] = 0\n \n return hidden\n\n # Start the training loop\n loader = SessionDataLoader(dataset, batch_size=self.batch_size)\n # 一个bach一个bach的迭代,每次迭代是一个input:tensor([ 31, 26, 27, 29, 24]);一个output:tensor([ 31, 26, 28, 17, 24])\n #\n if training==True:\n n_items = len(dataset.items)\n # sampling 增加额外负样本采样\n if self.n_sample > 0:\n pop = dataset.df.groupby('ItemId').size() # item的流行度supp,数据如下格式\n # ItemId\n # 214507331 1\n # 214507365 1\n # 将sample_alpha设置为1会导致基于流行度的采样,将其设置为0会导致均匀采样\n pop = pop[dataset.itemmap[\n dataset.item_key].values].values ** self.sample_alpha # item选择作为样本的概率为supp ^ sample_alpha\n pop = pop.cumsum() / pop.sum()\n pop[-1] = 1\n if self.sample_store:\n generate_length = self.sample_store // self.n_sample\n if generate_length <= 1:\n sample_store = 0\n print('No example store was used')\n else:\n neg_samples = self.generate_neg_samples(n_items, pop, generate_length)\n sample_pointer = 0\n else:\n print('No example store was used')\n\n for input, target, mask in loader:\n input = input.to(device)\n target = target.to(device)\n # print(input)\n # print(target)\n #额外的 SAMPLING THE OUTPUT\n if self.n_sample>0 and training:\n if self.sample_store:\n if sample_pointer == generate_length:\n neg_samples = self.generate_neg_samples(n_items, pop, generate_length)\n sample_pointer = 0\n sample = neg_samples[sample_pointer]\n sample_pointer += 1\n else:\n sample = self.generate_neg_samples(pop, 1)\n y = torch.LongTensor(np.hstack([target, sample]))\n else:\n y = target #不增加额外采样\n # reset the hidden states if some sessions have just terminated\n hidden = reset_hidden(hidden, mask).detach()\n # Go through the GRU layer\n logit, hidden = self.gru(input, target, hidden)\n # Output sampling #理解,很重要!!!!!!!\n y = y.to(device)\n logit_sampled = logit[:, y]\n # Calculate the mini-batch loss\n loss = self.loss_fn(logit_sampled)\n with torch.no_grad():\n recall, mrr = E.evaluate(logit, target, k)\n losses.append(loss.item()) \n recalls.append(recall)\n mrrs.append(mrr)\n # Gradient Clipping(Optional)\n if self.clip_grad != -1:\n for p in self.gru.parameters():\n p.grad.data.clamp_(max=self.clip_grad)\n # Mini-batch GD\n if training:\n # Backprop\n loss.backward()\n optimizer.step()\n optimizer.zero_grad() # flush the gradient after the optimization\n\n results = dict()\n results['loss'] = np.mean(losses)\n results['recall'] = np.mean(recalls)\n results['mrr'] = np.mean(mrrs)\n \n end_time = time.time()\n results['time'] = (end_time - start_time) / 60\n \n if not training:\n self.gru.train()\n\n return results", "def train(self, mode=True):\n super().train(mode)\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()", "def tf_mlp_multiclass_clf(X_train , y_train, Gazes_train,\n X_val, y_val, Gazes_val,\n X_test, y_test, Gazes_test,\n SAVE_PATH, MAX_NB_ITER, BATCH_SIZE, trainbool, category, category_number):\n with tf.Graph().as_default(): \n batch_X_train, batch_y_train, batch_Gazes_train, batch_X_val, batch_y_val, batch_Gazes_val = \\\n myio.batch_batch(X_train, y_train, Gazes_train, X_val, y_val, Gazes_val, BATCH_SIZE) \n #total number of instances for one example\n instance_number = np.shape(X_test)[1]\n def save_model(saver,sess,save_path):\n path = saver.save(sess, save_path)\n print 'model save in %s'%path\n def model(x, w_h, w_o, b_h, b_o):\n h = tf.nn.relu(tf.matmul(x, w_h)+b_h)\n output = tf.nn.softmax(tf.matmul(h, w_o) + b_o)\n \n return output\n def evaluation(X, y, gaze_pred, sess):\n X = np.reshape(X, [-1, 2048])\n gaze_pred = sess.run(gaze_pred, feed_dict={x:X})\n gaze_pred = np.reshape(gaze_pred, [-1, instance_number*category_number])\n# ap=metric.getAP(zip(y, np.min(gaze_pred,axis=1)))\n label_pred = np.argmax(gaze_pred,axis=1)%category_number\n y = np.where(y==1)[1]\n\n if np.shape(y)[0] == np.shape(label_pred)[0] * instance_number:\n \"\"\"The training dataset needs to reshape label vector,\n ,but the testset (organised as a bag) does not need this.\"\"\"\n y = y[::instance_number]\n return accuracy_score(label_pred, y)\n # lr is just X*w so this model line is pretty simple\n W1 = tf.Variable(tf.random_normal([2048, 1000], stddev=0.01,dtype=tf.float64))\n W2 = tf.Variable(tf.random_normal([1000, category_number],stddev=0.01,dtype=tf.float64))\n b1 = tf.Variable(tf.zeros([1, 1000],dtype=tf.float64))\n b2 = tf.Variable(tf.zeros([1, category_number],dtype=tf.float64))\n # w = tf.get_variable(\"w1\", [28*28, 10])\n x = tf.placeholder(tf.float64, shape=[None, 2048],name=\"input\")\n y_ = tf.placeholder(tf.float64, shape=[None, category_number],name=\"gt_output\")\n gaze_ = tf.placeholder(tf.float64, shape=[None, category_number],name=\"gaze\")\n gaze_pred = model(x, W1, W2, b1, b2)\n \n# regularizers = (tf.nn.l2_loss(W1) + tf.nn.l2_loss(W2) +\n# tf.nn.l2_loss(b1) + tf.nn.l2_loss(b2))\n# cross entropy \n \n loss_mean = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(gaze_pred), reduction_indices=[1]))\n# L2-loss\n# loss_mean = tf.reduce_mean(tf.pow(gaze_-gaze_pred, 2))\n# loss_mean += regularizers*5e-4\n train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss_mean)\n\n best_val_acc= -1\n EARLY_STOP_PATIENCE=10\n current_epoch=0\n min_nb_iter=50\n saver = tf.train.Saver()\n init_op = tf.initialize_all_variables()\n # Launch the graph in a session\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n with tf.device('/cpu:0'):\n if trainbool:\n sess.run(init_op)\n print \"begin training\"\n for i in range(MAX_NB_ITER):\n mean_train_lm=0\n batches = range(np.shape(batch_X_train)[0])\n random.shuffle(batches)\n for j in batches:\n _, loss_train = sess.run([train_op, loss_mean], feed_dict={x:batch_X_train[j],\n gaze_:batch_Gazes_train[j],\n y_:batch_y_train[j]})\n mean_train_lm += loss_train\n \n print \"epoch:%d, mean_train_loss:%f\"%(i, mean_train_lm/np.shape(batch_X_train)[0])\n loss_val = sess.run(loss_mean, feed_dict={x:batch_X_val[0],\n gaze_:batch_Gazes_val[0],\n y_:batch_y_val[0]})\n print \"epoch:%d, mean_val_loss:%f\"%(i, loss_val)\n train_acc = evaluation(X_train, y_train, gaze_pred, sess)\n print \"epoch:%d, acc_train:%f\"%(i, train_acc,)\n val_acc = evaluation(X_val, y_val, gaze_pred, sess)\n print \"epoch:%d, acc_val:%f\"%(i, val_acc)\n test_acc = evaluation(X_test, y_test, gaze_pred, sess)\n print \"epoch:%d, acc_test:%f\"%(i, test_acc,)\n \n if val_acc > best_val_acc:\n print \"save model of epoch:%d\"%i\n current_epoch = i\n best_val_acc = val_acc\n save_model(saver, sess, SAVE_PATH)\n elif i > min_nb_iter and (i - current_epoch) >= EARLY_STOP_PATIENCE:\n print 'early stopping at epoch %d'%i\n break \n \n f= open(\"/local/wangxin/results/upmc_food/tf_mlp_multiclass_clf/ap_res.txt\",\"a+\")\n f.write(\" \".join([SAVE_PATH, category, str(train_acc), str(val_acc), str(test_acc)])+'\\n')\n f.close()\n if not trainbool:\n print \"load model from %s\"%SAVE_PATH\n saver.restore(sess, SAVE_PATH)", "def train_on_batch(network, optimizer, loss_fn, metrics_fn, X, targets,\n config):\n optimizer.zero_grad()\n\n # Extract the per primitive features\n F = network.compute_features(X)\n predictions = compute_predictions_from_features(\n F, network, targets, config\n )\n\n # Do the forward pass to predict the primitive_parameters\n batch_loss = loss_fn(predictions, targets, config[\"loss\"])\n metrics_fn(predictions, targets)\n # Do the backpropagation\n batch_loss.backward()\n nn.utils.clip_grad_norm_(network.parameters(), 1)\n # Do the update\n optimizer.step()\n\n return batch_loss.item()", "def train_step(x_batch, y_batch):\n\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss,\n cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n logger.info(\"{}: step {}, loss {:g}, acc {:g}\".format(\n time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)", "def optimize(self, session, batch):\n X_batch, y_batch = zip(*batch) # Unzip batch, each returned element is a tuple of lists\n\n if(self.FLAGS.augment):\n X_batch = augment_batch(X_batch, self.FLAGS.img_H, self.FLAGS.img_W)\n\n input_feed = {}\n\n input_feed[self.X] = X_batch\n input_feed[self.y] = y_batch\n input_feed[self.is_training] = True\n input_feed[self.learning_rate] = self.current_lr\n\n output_feed = []\n\n output_feed.append(self.train_op)\n output_feed.append(self.loss)\n output_feed.append(self.global_norm)\n output_feed.append(self.global_step)\n\n\n if self.FLAGS.tb is True:\n output_feed.append(self.train_loss_tb)\n output_feed.append(self.global_norm_tb)\n output_feed.append(self.learning_rate_tb)\n output_feed.append(self.y_out_stddev_tb)\n output_feed.append(self.y_out_max_tb)\n tr, loss, norm, step, train_tb, norm_tb, lr_tb, y_out_stddev_tb, y_out_max_tb = session.run(output_feed, input_feed)\n self.tensorboard_writer.add_summary(train_tb, step)\n self.tensorboard_writer.add_summary(norm_tb, step)\n self.tensorboard_writer.add_summary(lr_tb, step)\n self.tensorboard_writer.add_summary(y_out_stddev_tb, step)\n self.tensorboard_writer.add_summary(y_out_max_tb, step)\n else:\n tr, loss, norm, step = session.run(output_feed, input_feed)\n\n return loss, norm, step", "def flow(self, batch_size=32):\n nb_batches = int(len(self.image_ids_in_subset) / batch_size) + 1\n while True:\n # Before each epoch we shuffle the images' ids\n random.shuffle(self.image_ids_in_subset)\n\n for i in range(nb_batches):\n # We first get all the image ids for the next batch\n current_bach = self.image_ids_in_subset[i*batch_size:(i+1)*batch_size]\n X_batch = []\n Y_batch = []\n\n for image_id in current_bach:\n # Load the image and resize it. We get a PIL Image object\n img = image.load_img(self.get_img_path(int(image_id)), grayscale=False, target_size=(cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE))\n # Cast the Image object to a numpy array and put the channel has the last dimension\n img_arr = image.img_to_array(img, data_format='channels_last')\n X_batch.append(img_arr)\n # Y_batch.append(self.id_to_label[image_id])\n Y_batch.append(self.get_labels(image_id))\n\n # resize X_batch in (batch_size, IMG_HEIGHT, IMG_WIDTH, 3)\n X_batch = np.reshape(X_batch, (-1, cfg.IMAGE.IMG_SIZE, cfg.IMAGE.IMG_SIZE, 3))\n # resize Y_batch in (None, nb_classes)\n Y_batch = np.reshape(Y_batch, (-1, self.nb_classes))\n\n # substract mean values from imagenet\n X_batch = preprocess_input(X_batch, data_format='channels_last')\n yield(X_batch, Y_batch)", "def batch_fit(self, train_loader: torch.utils.data.DataLoader,\n test_loader: torch.utils.data.DataLoader,\n train_size: int, test_size: int, epochs: int = 1,\n calc_mapk: bool = True):\n\n for epoch in range(epochs):\n stats = {'epoch': epoch+1}\n\n print('Training begins...')\n train_loss = self._training(train_loader, train_size)\n stats['train_loss'] = train_loss\n\n print('Validation begins...')\n if calc_mapk:\n print('validation with mapk')\n val_loss, val_mapk = self._validation(\n test_loader, test_size, calc_mapk)\n stats['val_mapk'] = val_mapk\n else:\n print('validation without mapk')\n val_loss = self._validation(\n test_loader, test_size, calc_mapk)\n stats['val_loss'] = val_loss\n print(stats)\n\n self.metrics.append(stats)", "def forward(self, batch, is_train=False):\n\t\tbatch_tensors = batch[0]\n\t\ttokens, context_word_emb, char_index, text_len, gold_labels = batch_tensors\n\n\t\tn_sentences, max_sentence_length = tokens.shape[0], tokens.shape[1]\n\t\ttext_len_mask = self.sequence_mask(lengths=text_len, maxlen=max_sentence_length)\n\n\t\tcontext_emb_list = []\n\t\tcontext_emb_list.append(context_word_emb)\n\n\t\t#TODO add char_emb\n\t\t# pdb.set_trace()\n\t\tchar_emb = self.char_embbedings(torch.as_tensor(char_index, device=self.device, dtype=torch.int64))\n\t\t_, _, max_char_len, self.char_emb_size = char_emb.shape\n\t\tflattened_char_emb = char_emb.reshape([n_sentences * max_sentence_length, max_char_len, self.char_emb_size]).transpose_(1,2)\t# n_words, max_word_len, char_emb_size (N, L, C)->(N, C, L)\n\t\tflattened_aggregated_char_emb = self.char_emb_cnn(flattened_char_emb)\n\t\taggregated_char_emb = flattened_aggregated_char_emb.reshape(n_sentences, max_sentence_length, flattened_aggregated_char_emb.shape[1])\n\t\tcontext_emb_list.append(aggregated_char_emb)\n\t\t# pdb.set_trace()\n\t\tcontext_emb = torch.cat(context_emb_list, 2)\n\t\tcontext_emb = self.lexical_Dropout(context_emb)\n\n\t\tcandidate_scores_mask = torch.logical_and(torch.unsqueeze(text_len_mask,dim=1),torch.unsqueeze(text_len_mask,dim=2)) \n\t\tcandidate_scores_mask = torch.triu(candidate_scores_mask, diagonal=0)\n\t\tflattened_candidate_scores_mask = candidate_scores_mask.view(-1)\n\n\t\t# pdb.set_trace()\n\t\t#----------through rnn------------\n\t\tpack = pack_padded_sequence(context_emb, text_len, batch_first=True, enforce_sorted=False)\n\t\tpack, _ = self.rnn(pack)\n\t\tcontext_outputs, _ = pad_packed_sequence(pack, batch_first=True, total_length=context_emb.shape[1])\n\n\t\t# context_outputs = self.mlpx(context_emb)\n\t\t#--------biaffine----------------\n\t\tcandidate_starts_emb = self.start_project(context_outputs)\n\t\tcandidate_end_emb = self.end_project(context_outputs)\n\n\t\t\n\t\tcandidate_ner_scores = self.bilinear(candidate_starts_emb, candidate_end_emb)\n\t\tcandidate_ner_scores = candidate_ner_scores.reshape(-1,self.num_types+1)[flattened_candidate_scores_mask==True]\n\t\t# pdb.set_trace()\n\t\tif is_train:\n\t\t\tloss = self.criterion(input=candidate_ner_scores, target=gold_labels)\n\t\t\tloss = loss.sum()\n\t\telse:\n\t\t\tloss = 0\n\t\n\t\treturn loss, candidate_ner_scores", "def run_training_batch(self, session, batch):\n feed_dict = self.batch_to_feed(batch)\n feed_dict[self.use_dropout_placeholder] = 1.0\n fetches = [self.loss, self.train_op]\n\n # options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n \n loss, _ = session.run(fetches, feed_dict=feed_dict)\n # loss, _ = session.run(fetches,\n # feed_dict=feed_dict,\n # options=options,\n # run_metadata=run_metadata)\n \n # fetched_timeline = timeline.Timeline(run_metadata.step_stats)\n # chrome_trace = fetched_timeline.generate_chrome_trace_format()\n # with open('timeline.json', 'w') as f:\n # f.write(chrome_trace)\n \n return loss", "def _train_epoch(self, train_batches, dropout_keep_prob, data, batch_size, save_dir, save_prefix):\n pad_id = self.vocab.get_id(self.vocab.pad_token)\n total_num, total_loss = 0, 0\n log_every_n_batch, n_batch_loss = 50, 0\n eval_every_n_batch = (len(data.train_set) - 1) / (8 * batch_size)\n for bitx, batch in enumerate(train_batches, 1): \n feed_dict = {self.p: batch['passage_token_ids'],\n self.q: batch['question_token_ids'],\n self.pc: batch['passage_char_ids'],\n self.qc: batch['question_char_ids'],\n self.p_em: batch['passage_em'],\n self.p_pos: batch['passage_pos'],\n self.q_pos: batch['question_pos'],\n self.p_length: batch['passage_length'],\n self.q_length: batch['question_length'],\n self.start_label: batch['start_id'],\n self.end_label: batch['end_id'],\n self.pr: batch['passage_rank'],\n self.dropout_keep_prob: dropout_keep_prob}\n\n _, loss = self.sess.run([self.train_op, self.loss], \n feed_dict=feed_dict)\n\n total_loss += loss * len(batch['raw_data'])\n total_num += len(batch['raw_data'])\n n_batch_loss += loss\n if log_every_n_batch > 0 and bitx % log_every_n_batch == 0:\n self.logger.info('Average loss from batch {} to {} is {}'.format(\n bitx - log_every_n_batch + 1, bitx, n_batch_loss / log_every_n_batch))\n n_batch_loss = 0\n \n if eval_every_n_batch > 0 and bitx % eval_every_n_batch == 0:\n self.logger.info('Evaluating the model ...')\n if data.dev_set is not None:\n eval_batches = data.gen_mini_batches('dev', batch_size, pad_id, shuffle=False)\n eval_loss, bleu_rouge = self.evaluate(eval_batches)\n self.logger.info('Dev eval loss {}'.format(eval_loss))\n self.logger.info('Dev eval result: {}'.format(bleu_rouge))\n\n if bleu_rouge['ROUGE-L'] > self.max_rouge_l:\n self.save(save_dir, save_prefix)\n self.max_rouge_l = bleu_rouge['ROUGE-L']\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n\n return 1.0 * total_loss / total_num", "def iteration_one(self, epoch, data_loader, train=True):\n str_code = \"train\" if train else \"test\"\n\n avg_loss = 0.0\n total_correct = 0\n total_element = 0\n len_data_loader = len(data_loader)\n\n for i, data in tqdm(enumerate(data_loader)):\n # 0. batch_data will be sent into the device(GPU or cpu)\n data = {key: value.to(self.device) for key, value in data.items()}\n\n # 1. forward the next_sentence_prediction and masked_lm model\n # next_sent_output, mask_lm_output = self.model.forward(data[\"bert_input\"], data[\"segment_label\"])\n mask_lm_output = self.model.forward(data[\"bert_input\"], distance_matrix=data[\"dist_mat\"])\n\n # 2. NLLLoss of predicting masked token word\n loss = self.loss_masked(mask_lm_output.transpose(1, 2), data[\"bert_label\"])\n\n # 3. backward and optimization only in train\n if train:\n if self.lr_scheduler == 'cycle':\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n self.optim_schedule.step()\n else:\n self.optim_schedule.zero_grad()\n loss.backward()\n self.optim_schedule.step_and_update_lr()\n\n # masked token prediction accuracy\n idx = (data[\"bert_label\"] > 0)\n # print(mask_lm_output.transpose(1, 2).argmax(dim=1)[idx])\n # print(mask_lm_output.transpose(1, 2).argmax(dim=1)[idx].eq(data[\"bert_label\"][idx]))\n correct = mask_lm_output.transpose(1, 2).argmax(dim=1)[idx].eq(data[\"bert_label\"][idx]).sum().item()\n batch_n_element = data[\"bert_label\"][idx].nelement()\n total_correct += correct\n total_element += batch_n_element\n # print(correct, data[\"bert_label\"][idx].nelement())\n\n # next sentence prediction accuracy\n # correct = next_sent_output.argmax(dim=-1).eq(data[\"is_next\"]).sum().item()\n # total_correct += correct\n # total_element += data[\"is_next\"].nelement()\n\n avg_loss += loss.item()\n\n if train:\n # print(\"write train loss\")\n self.writer.add_scalar('Loss/train', loss.item(), epoch*len_data_loader + i)\n self.writer.add_scalar('Accuracy/train', 100.0 * correct / batch_n_element, epoch*len_data_loader + i)\n else:\n self.writer.add_scalar('Loss/test', loss.item(), epoch*len_data_loader + i)\n self.writer.add_scalar('Accuracy/test', 100.0 * correct / batch_n_element, epoch*len_data_loader + i)\n # print(i, loss)\n # self.writer.add_scalar('Loss', loss, epoch*len_data_loader + i)\n # self.writer.add_scalar('Accuracy', 100.0 * correct / batch_n_element, epoch*len_data_loader + i)\n\n # print(\"EP%d_%s, avg_loss=\" % (epoch, str_code), avg_loss / len_data_loader)\n print(\"EP%d_%s, avg_loss=\" % (epoch, str_code), avg_loss / len_data_loader, \"total_acc=\",\n total_correct * 100.0 / total_element)", "def train_one_epoch(self):\n self.model.train()\n for batch_idx, (imgs, labels) in enumerate(self.tr_loader):\n imgs, labels = imgs.to(self.device), labels.to(self.device)\n self.optimizer.zero_grad()\n\n outputs, aux_outputs = self.model(imgs).values()\n loss1 = self.criterion(outputs, labels)\n loss2 = self.criterion(aux_outputs, labels)\n self.loss = loss1 + 0.3*loss2\n\n _, preds = torch.max(outputs, 1)\n acc = preds.eq(labels.view_as(preds)).sum().item() / self.cfg.bs\n\n self.loss.backward()\n self.optimizer.step()\n \n self.summary_writer.add_scalars('scalar_group', \n { 'loss_end' : loss1.item(),\n 'loss_aux' : loss2.item(),\n 'loss_total' : self.loss.item(),\n 'accuracy' : acc},\n self.current_iteration)\n\n if batch_idx % self.cfg.log_interval == 0:\n info_1 = 'Epochs {} [{}/{} ({:.0f}%)] | Loss: {:.6f}' .format(\n self.current_epoch, \n batch_idx * len(imgs), \n len(self.tr_loader.dataset), \n 100. * batch_idx / len(self.tr_loader),\n self.loss.item())\n info_2 = 'Batch Accuracy : {:.2f}'.format(acc)\n self.logger.info('{} | {}'.format(info_1, info_2))\n self.save_checkpoint('{}_epoch{}_iter{}.pt'.format(\n self.cfg.exp_name,\n self.current_epoch, \n self.current_iteration)\n )\n self.current_iteration += 1", "def train_step(x_batch, y_batch, batch_idx, epoch_idx):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: 0.75\n }\n _, step, summaries, loss, accuracy = sess.run(\n [step_update, global_step, summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n\n print(\"Epoch {}, Batch_no {} : loss {:g}, acc {:g}\".format(epoch_idx, batch_idx, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)", "def sample_batch(self, batch_type, batch_size):\n\n if batch_type == \"train\":\n folders = self.meta_train_characters\n if batch_type == \"test\":\n folders = self.meta_test_characters\n if batch_type == \"val\":\n folders = self.meta_val_characters\n\n num_batches = len(folders)//batch_size\n folders = folders[:num_batches*batch_size]\n all_image_batches = []\n all_label_batches = []\n\n for batch_idx in range(batch_size):\n sample_classes = random.sample(folders, self.num_classes)\n #sample_classes = folders[batch_idx*self.num_classes : (batch_idx+1)*self.num_classes]\n one_hot_labels = np.identity(self.num_classes)\n\n labels_images = get_images(sample_classes, one_hot_labels, nb_samples=self.num_samples_per_class, shuffle=False)\n train_images = []\n train_labels = [] \n for sample_idx, (labels, images) in enumerate(labels_images):\n train_images.append(image_file_to_array(images, 784))\n train_labels.append(labels)\n\n \n train_images, train_labels = shuffle(train_images, train_labels)\n\n labels = np.vstack(train_labels).reshape((-1, self.num_classes, self.num_classes)) # K, N, N\n images = np.vstack(train_images).reshape((self.num_samples_per_class, self.num_classes, -1)) # K x N x 784\n\n all_label_batches.append(labels)\n all_image_batches.append(images)\n\n all_image_batches = np.stack(all_image_batches).astype(np.float32)\n all_label_batches = np.stack(all_label_batches).astype(np.float32)\n\n return all_label_batches, all_image_batches", "def train(self):\n raise NotImplementedError", "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n batch_size = 1\n while True:\n error = False\n for x, y in dataset.iterate_once(batch_size):\n y_pred = self.get_prediction(x)\n y = nn.as_scalar(y)\n if y != y_pred:\n error = True\n nn.Parameter.update(self.get_weights(),x,y)\n if error == False:\n break", "def train(self, data_dict, label_dict):\n loaders = self.init_loaders(data_dict, label_dict)\n best_performance = 1e18\n loss_dict = self.init_loss_dict()\n performance_dict = self.init_performance_dict()\n\n for epoch in range(self.config_dict[\"num_epochs\"]):\n print(\"Epoch {}/{}\".format(epoch, self.config_dict[\"num_epochs\"] - 1))\n print(\"-\" * 10)\n\n if self.scheduler is not None:\n self.scheduler.step()\n\n for phase in [\"train\", \"val\"]:\n self.model.train(phase == \"train\")\n running_loss_dict = self.init_running_loss_dict(\n list(loss_dict[phase].keys())\n )\n output_dict = self.init_output_dict()\n i = 0\n for the_data in loaders[phase]:\n i += 1\n batch_loss_dict = {}\n inputs, labels = self.transform_batch(the_data)\n\n # zero parameter gradients\n self.optimizer.zero_grad()\n\n # forward\n outputs = self.model(inputs)\n\n output_dict = self.update_output_dict(output_dict, outputs, labels)\n\n batch_loss_dict[\"loss\"] = self.criterion(outputs, labels)\n if phase == \"train\":\n batch_loss_dict[\"loss\"].backward()\n self.optimizer.step()\n\n for key in batch_loss_dict.keys():\n running_loss_dict[key] += batch_loss_dict[key].item()\n\n # Compute epoch losses and update loss dict\n epoch_loss_dict = {\n key: running_loss_dict[key] / i for key in running_loss_dict.keys()\n }\n loss_dict[phase] = self.update_metric_dict(\n loss_dict[phase], epoch_loss_dict\n )\n\n # Compute epoch performance and update performance dict\n epoch_statistics = self.compute_epoch_statistics(output_dict)\n performance_dict[phase] = self.update_metric_dict(\n performance_dict[phase], epoch_statistics\n )\n\n print(\"Phase: {}:\".format(phase))\n self.print_metric_dict(epoch_loss_dict)\n self.print_metric_dict(epoch_statistics)\n\n if phase == \"val\":\n best_model_condition = epoch_loss_dict[\"loss\"] < best_performance\n if best_model_condition:\n print(\"Best model updated\")\n best_performance = epoch_loss_dict[\"loss\"]\n best_model_wts = copy.deepcopy(self.model.state_dict())\n\n print(\"Best val performance: {:4f}\".format(best_performance))\n self.model.load_state_dict(best_model_wts)\n result_dict = {\n phase: {**performance_dict[phase], **loss_dict[phase]}\n for phase in performance_dict.keys()\n }\n return result_dict", "def after_batch(self):\n if self.trainer._mode == 'train':\n with open(os.path.join(self.root_path, 'loss.txt'), 'a+') as fout:\n fout.write(str(self.trainer._epoch) + '\\t' +\n str(self.trainer._loss.detach().cpu().item()) + '\\n')\n\n if self.trainer._mode == 'test' and (self.f is not None):\n for index in range(len(self.trainer._ids)):\n one_input = self.get_one(self.trainer._input, index)\n one_output = self.get_one(self.trainer._output, index)\n\n res = self.f(one_input, one_output)\n id = self.trainer._ids[index]\n\n self.show(res, id)" ]
[ "0.846066", "0.8322821", "0.77974415", "0.7654369", "0.76157534", "0.7293694", "0.7097855", "0.7097493", "0.7072412", "0.7032535", "0.70106894", "0.70053935", "0.6989753", "0.69883996", "0.69795567", "0.69641083", "0.69641083", "0.69641083", "0.69641083", "0.69641083", "0.6964093", "0.694169", "0.6941236", "0.69259197", "0.68910223", "0.68820727", "0.6873527", "0.6861105", "0.6846473", "0.6840632", "0.6839936", "0.6837611", "0.68354744", "0.68265337", "0.6812581", "0.68035644", "0.67790276", "0.67645556", "0.6755987", "0.6751228", "0.67505693", "0.67250615", "0.67168355", "0.67106634", "0.6709219", "0.6704231", "0.67032236", "0.6695343", "0.66942924", "0.6691164", "0.66829914", "0.667956", "0.6678139", "0.6667639", "0.66609484", "0.6641469", "0.66380256", "0.66355604", "0.66265595", "0.66223454", "0.6618031", "0.6617476", "0.66155344", "0.66127384", "0.66040945", "0.66036963", "0.6600445", "0.65985966", "0.6598505", "0.6590943", "0.6587447", "0.65844995", "0.6580296", "0.658002", "0.6572338", "0.65690935", "0.6565136", "0.6565136", "0.655841", "0.6557631", "0.65561783", "0.6555554", "0.65523934", "0.6551511", "0.6551489", "0.65508354", "0.65442955", "0.6542311", "0.6536797", "0.6536346", "0.6535473", "0.6529004", "0.652438", "0.6521055", "0.65151054", "0.65081316", "0.6501272", "0.65003324", "0.6500292", "0.6500072", "0.6497021" ]
0.0
-1
Sets/clears a software breakpoint address > the address of the software breakpoint instruction > the instruction to be programmed (either the software breakpoint opcode or the original instruction the software breakopint was replacing). flags > One or more of the SWBPFlags listed below returns the original/old opcode at address
def set_sw_bp(address, instruction, flags): log.info("Debug:: set/remove bp at address 0x%0x, instructions 0x%0x, flags = 0x%0x" % ( address, instruction, flags)) # Accept addressing both from FLASH_START and from 0x0 addr = address & (FLASH_START-1) single_page_access = False buffer_size = PAGE_SIZE * 16 # Canopus: single page read-modify-write is possible within the first 16kb of flash. # SAMRH71: single page read-modify-write is possible in whole flash. if addr < 16384 or "RH71" in device: buffer_size = PAGE_SIZE single_page_access = True buffer_mask = long(buffer_size-1) data_buffer = bytearray(buffer_size) # Get the start address to the flash page(es) we need to erase start_addr = addr & ~(buffer_mask) absolute_start_addr = address & ~(buffer_mask) # Get BP address within the buffer bp_addr = addr & buffer_mask prog_read("pgm", absolute_start_addr, buffer_size, data_buffer) org_inst = 0 n = 0 # Replace instruction in data_buffer while(n < 2): org_inst += data_buffer[bp_addr+n] << (n*8) data_buffer[bp_addr+n] = ((instruction >> (n*8)) & 0xff) n = n+1 if single_page_access: if "RH71" in device: # Remove flash offset, if any, and mask away page internal address bits. # FARG bitfield in EFC_FCR page_number = addr & 0x3fff00 # SAMRH71 has page_size 256 # Erase and write page (two separate commands on SAMRH71) dev.Write32(efc_fcr, efc_cmd_ep | page_number) waitForFlashReady() dev.Write(start_addr, data_buffer, 0, PAGE_SIZE) dev.Write32(efc_fcr, efc_cmd_wp | page_number) waitForFlashReady() else: dev.Write(start_addr, data_buffer, 0, PAGE_SIZE) # Remove flash offset, if any, and mask away page internal address bits. # Then shift right once to position page_number in the FARG bitfield in EFC_FCR page_number = (addr & 0x3ffe00)/2 # Canopus has page_size 512 # Erase and write page (one single command on Canopus) dev.Write32(efc_fcr, efc_cmd_ewp | page_number) waitForFlashReady() else: # Erase 16 pages (16pages == buffer_size). The "0x200" sets the number of pages to erase. dev.Write32(efc_fcr, efc_cmd_epa | (start_addr >> 1) | 0x200) waitForFlashReady() prog_write("Pgm", absolute_start_addr, buffer_size, data_buffer) return org_inst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_breakpoint(self, addr: int) -> Optional[Breakpoint]:\n if not self.enabled:\n self.enable()\n\n if not self.can_support_address(addr):\n LOG.error('Breakpoint out of range 0x%X', addr)\n return None\n\n if self.available_breakpoints == 0:\n LOG.error('No more hardware breakpoints are available, dropped breakpoint at 0x%08x', addr)\n return None\n\n for bp in self.hw_breakpoints:\n if not bp.enabled:\n bp.enabled = True\n comp = 0\n if self.fpb_rev == 1:\n bp_match = (1 << 30)\n if addr & 0x2:\n bp_match = (2 << 30)\n comp = addr & 0x1ffffffc | bp_match | 1\n elif self.fpb_rev == 2:\n comp = (addr & 0xfffffffe) | 1\n self.ap.write32(bp.comp_register_addr, comp)\n LOG.debug(\"BP: wrote 0x%08x to comp @ 0x%08x\", comp, bp.comp_register_addr)\n bp.addr = addr\n self.num_hw_breakpoint_used += 1\n return bp\n return None", "def software_breakpoint_set(self, addr, thumb=False, arm=False, flash=False, ram=False):\n if flash and not ram:\n flags = enums.JLinkBreakpoint.SW_FLASH\n elif not flash and ram:\n flags = enums.JLinkBreakpoint.SW_RAM\n else:\n flags = enums.JLinkBreakpoint.SW\n\n if thumb:\n flags = flags | enums.JLinkBreakpoint.THUMB\n elif arm:\n flags = flags | enums.JLinkBreakpoint.ARM\n\n handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)\n if handle <= 0:\n raise errors.JLinkException('Software breakpoint could not be set.')\n\n return handle", "def remove_breakpoint(self, bp: Breakpoint) -> None:\n for hwbp in self.hw_breakpoints:\n if hwbp.enabled and hwbp.addr == bp.addr:\n hwbp.enabled = False\n self.ap.write_memory(hwbp.comp_register_addr, 0)\n self.num_hw_breakpoint_used -= 1\n return", "def breakpoint_find(self, addr):\n return self._dll.JLINKARM_FindBP(addr)", "def gdb_breakpoint():\n _gdb_python_call_gen('gdb_breakpoint')()", "def set_breakpoint(context, *args):\n\n vars = [arg for arg in locals()['args']] # noqa F841\n\n if settings.DEBUG:\n breakpoint()", "def breakpoint_set(self, addr, thumb=False, arm=False):\n flags = enums.JLinkBreakpoint.ANY\n\n if thumb:\n flags = flags | enums.JLinkBreakpoint.THUMB\n elif arm:\n flags = flags | enums.JLinkBreakpoint.ARM\n\n handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)\n if handle <= 0:\n raise errors.JLinkException('Breakpoint could not be set.')\n\n return handle", "def break_cmd(cmd, cnt, args):\n if cnt == 1:\n log(\"Break command needs an address\")\n return\n log(\"break\"+ \" {:08x}\".format(int(args[1], 16)))\n cpu.set_break(int(args[1],16))", "def hardware_breakpoint_set(self, addr, thumb=False, arm=False):\n flags = enums.JLinkBreakpoint.HW\n\n if thumb:\n flags = flags | enums.JLinkBreakpoint.THUMB\n elif arm:\n flags = flags | enums.JLinkBreakpoint.ARM\n\n handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)\n if handle <= 0:\n raise errors.JLinkException('Hardware breakpoint could not be set.')\n\n return handle", "def _SetBreakpoint(self, mobj, line=-1):\n handle = -1\n if line < 0:\n line = self.GetCurrentLine()\n if not mobj.IsSet(self, line):\n # Clear other set breakpoint marker states on same line\n ed_marker.Breakpoint().Set(self, line, delete=True)\n ed_marker.BreakpointDisabled().Set(self, line, delete=True)\n mobj.Set(self, line, delete=False)\n handle = mobj.Handle\n return handle", "def clear_break_cmd(cmd, cnt, args):\n if cnt == 1:\n log(\"Clear break command needs an address\")\n return \n log(\"clear break\"+\" {:08x}\".format(int(args[1], 16)))\n cpu.clear_break(int(args[1],16))", "def frame_off_savregs(*args):\n return _ida_frame.frame_off_savregs(*args)", "def handle_breakpoints(self, bit):\n while bit != self.options.current_breakpoint:\n self.check_required()\n self.options.shift_breakpoint()\n self.arguments = self.options.get_arguments()\n self.current_argument = self.arguments.pop(0)", "def delete_breakpoint():\n raise NotImplementedError()", "def break_code(self, breakpoint):\n\n self.cont = False\n self.pause_reason = \"breakpoint\"\n self.scope_assign = {}\n self.scope_var_id = 0\n handler.send_breakpoint_event(breakpoint)", "def clear_breakpoint(self, id):\r\n bps = self.bpoints.filter( ('id',),(id,) )\r\n if len(bps)==0:\r\n raise Exception('No breakpoint with id '+str(id))\r\n bpdict = bps[0]\r\n\r\n #clear the breakpoint in each engine\r\n console = self.app.toolmgr.get_tool('Console')\r\n engines = console.get_all_engines(active=True)\r\n for eng in engines:\r\n eng.debugger.clear_breakpoint(id)\r\n \r\n #remove from internal breakpoint list\r\n self.bpoints.remove(bpdict)\r\n\r\n #clear any markers from the editor pages\r\n page = self.frame.notebook.GetPageFromPath( bpdict['filename'] )\r\n if page is not None:\r\n page.DeleteBreakpointMarker( id )\r\n\r\n #publish a breakpoint cleared message\r\n self.msg_node.publish_msg( editor_messages.EDITOR_BREAKPOINT_CLEARED,\r\n (id,) )", "def handle_next_breakpoint(self, bit):\n self.check_required()\n self.options.shift_breakpoint()\n self.arguments = self.options.get_arguments()\n self.current_argument = self.arguments.pop(0)", "def breakpoint(g=None, l=0):\n global simulator\n\n if simulator is None:\n print \"Program is not started.\"\n\n try:\n if g is None:\n g = simulator.get_pc()[0]\n l = simulator.get_pc()[1]\n bp = simulator.add_breakpoint(g, l)\n print \"breakpoint set at (0x{:x},{}) with id={}.\".format(g, l, bp[0])\n return bp[0]\n except:\n simulation_error()\n return None", "def avoid_instr(bv: BinaryView, addr: int):\n\n # Highlight the instruction in red\n highlight_instr(bv, addr, HighlightStandardColor.RedHighlightColor)\n\n # Add the instruction to the list associated with the current view\n bv.session_data.mui_avoid.add(addr)", "def rm_avoid_instr(bv: BinaryView, addr: int):\n\n # Remove instruction highlight\n clear_highlight(bv, addr)\n\n # Remove the instruction to the list associated with the current view\n bv.session_data.mui_avoid.remove(addr)", "def breakpoint(self):\n return None", "def set_breakpoint(self, func):\n\n if func == \"\":\n return\n\n if self.bp_func is not None:\n self.debugger.set_breakpoint(self.bp_func, False)\n\n self.bp_func = func\n self.debugger.set_breakpoint(self.bp_func, True)\n\n # Console output\n self.write_console_output_sig.emit(\"[%s] Breakpoint set on \"\n \"function %s.\" % (DEBUG, func))", "def breakpoint_clear(self, handle):\n return not self._dll.JLINKARM_ClrBPEx(handle)", "def pdb_option(args, run):\n run.pdb = True", "def SWP():\n\tglobal pointer, memory, registers\n\ttmp = registers[memory[pointer + 0x02]]\n\tregisters[memory[pointer + 0x02]] = registers[memory[pointer + 0x01]]\n\tregisters[memory[pointer + 0x01]] = tmp\n\tpointer += 0x03", "def delete_breakpoints(l=None):\n global simulator\n if simulator is None:\n return\n if isinstance(l, int):\n l = [l]\n if l is None:\n l = []\n for(id, a) in simulator.get_breakpoints():\n l = l + [id]\n for bp in l:\n if not simulator.del_breakpoint(bp):\n print \"unknown breakpoint\", bp", "def pdb(item, item2=None):\n import pdb # noqa\n pdb.set_trace() # noqa", "def breakpoint_clear_all(self):\n return not self._dll.JLINKARM_ClrBPEx(0xFFFFFFFF)", "def cond(id, e=None):\n global simulator\n\n if simulator is None:\n print \"Program is not started; set breakpoint to entrypoint.\"\n return None\n bp = None\n if e is None or isinstance(e, str):\n bp = simulator.set_cond(id, e)\n else:\n raise TypeError(e)\n if bp is None:\n print \"no such breakpoint \", id\n return\n elif e is None:\n print \"making breakpoint\", id, \" unconditional\"\n else:\n print \"making breakpoint\", id, \" conditional\"\n print bp[0], \" : \", bp[1]", "def add_breakpoint():\n raise NotImplementedError()", "def reset(self):\n with self.bkp_lock:\n self.active_breakpoints = set()\n self.stepping = SteppingMode.STEP_NO_STEP\n self.continue_next()", "def frame_off_retaddr(*args):\n return _ida_frame.frame_off_retaddr(*args)", "def xpm(Pdb=Pdb):\n info = sys.exc_info()\n print(traceback.format_exc())\n post_mortem(info[2], Pdb)", "def back_patch(self, *args, **kwargs):\n self.pb[self.ss_i(0)] = \"JPF\", _m(self.ss_i(1)), _m(self.pc)\n self.pop(2)", "def magic_pdb(self, parameter_s=''):\n\n par = parameter_s.strip().lower()\n\n if par:\n try:\n pdb = {'off':0,'0':0,'on':1,'1':1}[par]\n except KeyError:\n print 'Incorrect argument. Use on/1, off/0 or nothing for a toggle.'\n return\n else:\n self.shell.InteractiveTB.call_pdb = pdb \n else:\n self.shell.InteractiveTB.call_pdb = 1 - self.shell.InteractiveTB.call_pdb\n print 'Automatic pdb calling has been turned',\\\n on_off(self.shell.InteractiveTB.call_pdb)", "def instruction_jmp(self, address):\n self.exec_ptr = Vm.filter_mem_address(address)", "def BRK(self, *_):\n # first, increment PC. Awkwardly, this overshoots the instruction after BRK, but that's a c64 bug and not an emulator bug\n self.reg.PC += 1\n # now write the PC to the stack in two steps\n self.push(self.reg.PCH)\n self.push(self.reg.PCL)\n # write processor flags to the stack\n self.push(self.reg.P | 0b00010000) # this logical OR forces the B flag set on write to stack only\n # set PC from the 16 bytes at 0xfffe and 0xffff\n self.reg.PC = self.ram.get(0xffff) | (self.ram.get(0xfffe) << 8)", "def init(self) -> None:\n fpcr = self.ap.read32(self.address + FPB.FP_CTRL)\n self.fpb_rev = 1 + ((fpcr & FPB.FP_CTRL_REV_MASK) >> FPB.FP_CTRL_REV_SHIFT)\n if self.fpb_rev not in (1, 2):\n LOG.warning(\"Unknown FPB version %d\", self.fpb_rev)\n self.nb_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF)\n self.nb_lit = (fpcr >> 7) & 0xf\n LOG.info(\"%d hardware breakpoints, %d literal comparators\", self.nb_code, self.nb_lit)\n for i in range(self.nb_code):\n self.hw_breakpoints.append(HardwareBreakpoint(self.address + FPB.FP_COMP0 + 4*i, self))\n\n # disable FPB (will be enabled on first bp set)\n self.disable()\n for bp in self.hw_breakpoints:\n self.ap.write_memory(bp.comp_register_addr, 0)", "def stop_handler(self,event):\n if event.breakpoint.location == BREAKPOINT:\n self.get_cache_history_items()", "def add_breakpoint(self, xval: XValue[T], yval: float, squash: bool = True) -> None:\n if squash and self.call(xval) == yval:\n return\n self.breakpoints[xval] = yval", "def setBreakAtEntry():\n\tfile_info=gdb.execute(\"info file\", False, True)\n\tmslines=file_info.split('\\n')\n\tfor s in mslines:\n\t\tif s.find(\"Entry point\") > -1:\n\t\t\taddress = '*'+s.split(': ')[-1]\n\t\t\ttry:\n\t\t\t\tif address not in [ bp.location for bp in gdb.breakpoints() ]: \n\t\t\t\t\tprint 'Setting entry point breakpoint at ' + str(address)\n\t\t\t\t\tgdb.Breakpoint(address, gdb.BP_BREAKPOINT)\n\t\t\texcept TypeError: # no breakpoints set\n\t\t\t\tprint 'Setting entry point breakpoint at ' + str(address)\n\t\t\t\tgdb.Breakpoint(address, gdb.BP_BREAKPOINT)", "def set_step(self):\n # Issue #13183: pdb skips frames after hitting a breakpoint and running\n # step commands.\n # Restore the trace function in the caller (that may not have been set\n # for performance reasons) when returning from the current frame.\n if self.frame_returning:\n caller_frame = self.frame_returning.f_back\n if caller_frame and not caller_frame.f_trace:\n caller_frame.f_trace = self.trace_dispatch\n self._set_stopinfo(None, None)", "def setFlag(self, whichFlag, whichValue):\n \n try:\n if self.__debugOn == True:\n print(\"Flags in: %x\" %self.__flags)\n \n # Get temproary flag value that blanks out the flag.\n tFlag = (~whichFlag) & self.__flags\n \n # Set our flag to the given value.\n self.__flags = tFlag | whichValue\n \n if self.__debugOn == True:\n print(\"Flags out: %x\" %self.__flags)\n \n except:\n raise\n \n return", "def jmp(self, addr):\n\n self.reg.ip = addr", "def goto(fixed_pc: int):\n\n def _goto(state: State) -> State:\n return state._replace(pc=fixed_pc)\n\n return _goto", "def breakpoint_info(self, handle=0, index=-1):\n if index < 0 and handle == 0:\n raise ValueError('Handle must be provided if index is not set.')\n\n bp = structs.JLinkBreakpointInfo()\n bp.Handle = int(handle)\n res = self._dll.JLINKARM_GetBPInfoEx(index, ctypes.byref(bp))\n if res < 0:\n raise errors.JLinkException('Failed to get breakpoint info.')\n\n return bp", "def set_stopflag(cobj, stop):\n pass", "def _decr_stack_pointer(self):\n return [\"@SP\", \"M=M-1\"]", "def JMP(self, value):\n self.reg.PC = value", "def setFlag(flagbyte, pos, status):\n if status:\n return flagbyte | 2**pos\n else:\n return flagbyte & ~2**pos", "def set_I_to_address(self):\n self.I = self.opcode & 0xFFF\n logger.info(\"Set I to {}\".format(hex(self.I)))", "def get_reljmp_patch(from_addr, to_addr):\n patch = b\"\\x48\\x8D\\x05\" + struct.pack(\"<i\", to_addr - from_addr - 7)\n patch += b\"\\xFF\\xE0\"\n return patch", "def open_breakpoint_editor(on_open, *args, **kwargs):\n\n def __internal():\n m = GPS.MDI.get('Breakpoints')\n if not m:\n return True # Wait again\n on_open(*(m, ) + args, **kwargs)\n return False\n\n GLib.timeout_add(200, __internal)\n GPS.Menu.get('/Debug/Data/Breakpoints').action.execute_if_possible()", "def SetBreakpoint(self, line=-1, disabled=False):\n if not disabled:\n handle = self._SetBreakpoint(ed_marker.Breakpoint(), line)\n else:\n handle = self._SetBreakpoint(ed_marker.BreakpointDisabled(), line)\n return handle", "def StripPC(addr):\n global ARCH\n if ARCH == \"arm\":\n return addr & ~1\n return addr", "def set_pc(self, value):\n self.regs['PC'].set_value(value & 0xFFFF)\n return value & 0xFFFF", "def fix_program(instructions):\n for index, instruction in enumerate(instructions):\n command = instruction.split(\" \")[0]\n\n if command == JMP_COMMAND:\n new_command = NOP_COMMAND\n elif command == NOP_COMMAND:\n new_command = JMP_COMMAND\n else:\n continue\n\n old_instruction = instructions[index]\n instructions[index] = instruction.replace(command, new_command)\n\n accumulator, last_instructions = run_until_loop(instructions)\n if len(instructions) == last_instructions:\n return accumulator\n\n # Restore\n instructions[index] = old_instruction", "def _force_off(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'off')", "def set_breakpoint(self, filename,lineno,\r\n condition=None,ignore_count=None,trigger_count=None):\r\n #create new id.\r\n id = self.bp_counter\r\n self.bp_counter+=1 \r\n\r\n #store in DictList\r\n bpdata = { 'id':id,'filename':filename, 'lineno':lineno,\r\n 'condition':condition, 'ignore_count':ignore_count,\r\n 'trigger_count':trigger_count }\r\n self.bpoints.append(bpdata)\r\n \r\n #set the breakpoint in each engine.\r\n console = self.app.toolmgr.get_tool('Console')\r\n engines = console.get_all_engines(active=True)\r\n for eng in engines:\r\n eng.debugger.set_breakpoint(bpdata)\r\n\r\n #add a breakpoint marker to the editor page\r\n page = self.frame.notebook.GetPageFromPath(filename)\r\n if page is not None:\r\n page.AddBreakpointMarker( id, lineno )\r\n\r\n #publish a breakpoint set message\r\n self.msg_node.publish_msg( editor_messages.EDITOR_BREAKPOINT_SET,\r\n (bpdata,) ) \r\n return id", "def instruction_jf(self, value, address):\n if Vm.is_register(value):\n value = self.get_register(value)\n\n if value == 0:\n self.exec_ptr = Vm.filter_mem_address(address)\n else:\n self.exec_ptr += 3", "def rm_find_instr(bv: BinaryView, addr: int):\n\n # Remove instruction highlight\n clear_highlight(bv, addr)\n\n # Remove the instruction to the list associated with the current view\n bv.session_data.mui_find.remove(addr)", "def j(*args):\n try:\n pc = int(gdb.selected_frame().pc())\n pwndbg.ida.Jump(pc)\n except Exception:\n pass", "def DeleteBreakpoint(self, line):\n ed_marker.Breakpoint().Set(self, line, delete=True)\n ed_marker.BreakpointDisabled().Set(self, line, delete=True)", "def Spm():\n\n global Asm\n\n if dec.Asm.Parse_Pointer == 0 or not dec.Asm.Optional:\n # No operand, use defalt Z index\n target.CodeWord(dec.Asm.Instructions[dec.Asm.Mnemonic][3])\n else:\n # An operand is given, must be Z or Z+\n value = GetIndex()\n\n if value[1] != 'Z' or value[0] == 2 or value[2] != 0:\n # Illegal index register\n errors.DoError('badoper', False)\n index = 0 # Dummy mode\n else:\n # Legal index register\n index = value[0]\n\n target.CodeWord(dec.Asm.Instructions[dec.Asm.Mnemonic][3] +\n (index << 4))\n\n NoMore()", "def reset_break_points(self):\n\n self._break_points = np.array([-np.inf, np.inf])\n\n explanatory_variables = self.get_explanatory_variables()\n self.set_explanatory_variables(explanatory_variables[0])\n\n self._create_model()", "def togglePWMPower(self):\n # PCPWM1 is located at position 6\n mask = 1 << 6\n self._injectFault(\"PCONP\", 0x400FC0C4, mask)", "def halt(self, addr):\n\n self.reg.run_flag = False", "def jmp_to_addr(self):\n self.pc = self.opcode & 0x0FFF\n logger.info(\"Jumped to address at {}\".format(hex(self.pc)))\n # PC gets incremented after every instruction this counteracts that\n self.pc -= 2", "def set_debug_flag(flag):\n pma._pma_set_debug_flag(flag)", "def replace_instruction(bet_id, new_price):\n args = locals()\n return {\n to_camel_case(k): v for k, v in args.items() if v is not None\n }", "def change_value_api(self, exe_name):\n exe = os.path.join(os.getcwd(), exe_name)\n\n # Create a target by the debugger.\n target = self.dbg.CreateTarget(exe)\n self.assertTrue(target, VALID_TARGET)\n\n # Create the breakpoint inside function 'main'.\n breakpoint = target.BreakpointCreateByLocation('main.c', self.line)\n self.assertTrue(breakpoint, VALID_BREAKPOINT)\n\n # Create the breakpoint inside the function 'main'\n check_breakpoint = target.BreakpointCreateByLocation('main.c', self.check_line)\n self.assertTrue(check_breakpoint, VALID_BREAKPOINT)\n\n # Create the breakpoint inside function 'main'.\n end_breakpoint = target.BreakpointCreateByLocation('main.c', self.end_line)\n self.assertTrue(end_breakpoint, VALID_BREAKPOINT)\n\n # Now launch the process, and do not stop at entry point.\n process = target.LaunchSimple(None, None, os.getcwd())\n self.assertTrue(process, PROCESS_IS_VALID)\n\n # Get Frame #0.\n self.assertTrue(process.GetState() == lldb.eStateStopped)\n thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint)\n self.assertTrue(thread.IsValid(), \"There should be a thread stopped due to breakpoint condition\")\n frame0 = thread.GetFrameAtIndex(0)\n self.assertTrue (frame0.IsValid(), \"Got a valid frame.\")\n\n # Get the val variable and change it:\n error = lldb.SBError()\n\n val_value = frame0.FindVariable (\"val\")\n self.assertTrue (val_value.IsValid(), \"Got the SBValue for val\")\n actual_value = val_value.GetValueAsSigned (error, 0);\n self.assertTrue (error.Success(), \"Got a value from val\")\n self.assertTrue (actual_value == 100, \"Got the right value from val\")\n \n result = val_value.SetValueFromCString (\"12345\")\n self.assertTrue (result, \"Setting val returned True.\")\n actual_value = val_value.GetValueAsSigned (error, 0);\n self.assertTrue (error.Success(), \"Got a changed value from val\")\n self.assertTrue (actual_value == 12345, \"Got the right changed value from val\")\n \n # Now check that we can set a structure element:\n\n mine_value = frame0.FindVariable (\"mine\")\n self.assertTrue (mine_value.IsValid(), \"Got the SBValue for mine\")\n \n mine_second_value = mine_value.GetChildMemberWithName (\"second_val\")\n self.assertTrue (mine_second_value.IsValid(), \"Got second_val from mine\")\n actual_value = mine_second_value.GetValueAsUnsigned (error, 0)\n self.assertTrue (error.Success(), \"Got an unsigned value for second_val\")\n self.assertTrue (actual_value == 5555)\n\n result = mine_second_value.SetValueFromCString (\"98765\")\n self.assertTrue (result, \"Success setting mine.second_value.\")\n actual_value = mine_second_value.GetValueAsSigned (error, 0);\n self.assertTrue (error.Success(), \"Got a changed value from mine.second_val\")\n self.assertTrue (actual_value == 98765, \"Got the right changed value from mine.second_val\")\n \n # Next do the same thing with the pointer version.\n ptr_value = frame0.FindVariable (\"ptr\")\n self.assertTrue (ptr_value.IsValid(), \"Got the SBValue for ptr\")\n \n ptr_second_value = ptr_value.GetChildMemberWithName (\"second_val\")\n self.assertTrue (ptr_second_value.IsValid(), \"Got second_val from ptr\")\n actual_value = ptr_second_value.GetValueAsUnsigned (error, 0)\n self.assertTrue (error.Success(), \"Got an unsigned value for ptr->second_val\")\n self.assertTrue (actual_value == 6666)\n\n result = ptr_second_value.SetValueFromCString (\"98765\")\n self.assertTrue (result, \"Success setting ptr->second_value.\")\n actual_value = ptr_second_value.GetValueAsSigned (error, 0);\n self.assertTrue (error.Success(), \"Got a changed value from ptr->second_val\")\n self.assertTrue (actual_value == 98765, \"Got the right changed value from ptr->second_val\")\n \n # gcc may set multiple locations for breakpoint\n breakpoint.SetEnabled(False)\n\n # Now continue, grab the stdout and make sure we changed the real values as well...\n process.Continue();\n\n self.assertTrue(process.GetState() == lldb.eStateStopped)\n thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint)\n self.assertTrue(thread.IsValid(), \"There should be a thread stopped due to breakpoint condition\")\n\n expected_value = \"Val - 12345 Mine - 55, 98765, 55555555. Ptr - 66, 98765, 66666666\"\n stdout = process.GetSTDOUT(1000)\n self.assertTrue (expected_value in stdout, \"STDOUT showed changed values.\")\n\n # Finally, change the stack pointer to 0, and we should not make it to our end breakpoint.\n frame0 = thread.GetFrameAtIndex(0)\n self.assertTrue (frame0.IsValid(), \"Second time: got a valid frame.\")\n sp_value = frame0.FindValue (\"sp\", lldb.eValueTypeRegister);\n self.assertTrue (sp_value.IsValid(), \"Got a stack pointer value\")\n result = sp_value.SetValueFromCString(\"1\")\n self.assertTrue (result, \"Setting sp returned true.\")\n actual_value = sp_value.GetValueAsUnsigned (error, 0)\n self.assertTrue (error.Success(), \"Got a changed value for sp\")\n self.assertTrue (actual_value == 1, \"Got the right changed value for sp.\")\n \n # Boundary condition test the SBValue.CreateValueFromExpression() API.\n # LLDB should not crash!\n nosuchval = mine_value.CreateValueFromExpression(None, None)\n\n process.Continue()\n\n self.assertTrue(process.GetState() == lldb.eStateStopped)\n thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint)\n self.assertTrue(thread == None, \"We should not have managed to hit our second breakpoint with sp == 1\")\n \n process.Kill()", "def jmp(self, address, nbr_of_args):\n\n # Set the PC to the address stored in the given register.\n self.pc = self.reg[address]", "def set_exception_trap(flag):\n global CRDS_EXCEPTION_TRAP\n old_flag = CRDS_EXCEPTION_TRAP\n if flag is not None:\n CRDS_EXCEPTION_TRAP = flag\n return old_flag", "def eflags(vdb, line):\n trace = vdb.getTrace()\n argv = shlex.split(line)\n if len(argv) not in (0, 1):\n return vdb.do_help('eflags')\n\n if len(argv) > 0:\n flag = argv[0].upper()\n valid_flags = list(trace.getStatusFlags().keys())\n if flag not in valid_flags:\n raise Exception('invalid flag: %s, valid flags %s' % (flag, valid_flags))\n value = trace.getRegisterByName(flag)\n trace.setRegisterByName(flag, not bool(value))\n # TODO: this is not plumbed through to flags gui due to new gui\n # eventing coming soon.\n vdb.vdbUIEvent('vdb:setflags')\n return\n\n ef = trace.getRegisterByName('eflags')\n vdb.vprint('%16s: %s' % ('Carry', bool(ef & e_i386.EFLAGS_CF)))\n vdb.vprint('%16s: %s' % ('Parity', bool(ef & e_i386.EFLAGS_PF)))\n vdb.vprint('%16s: %s' % ('Adjust', bool(ef & e_i386.EFLAGS_AF)))\n vdb.vprint('%16s: %s' % ('Zero', bool(ef & e_i386.EFLAGS_ZF)))\n vdb.vprint('%16s: %s' % ('Sign', bool(ef & e_i386.EFLAGS_SF)))\n vdb.vprint('%16s: %s' % ('Trap', bool(ef & e_i386.EFLAGS_TF)))\n vdb.vprint('%16s: %s' % ('Interrupt', bool(ef & e_i386.EFLAGS_IF)))\n vdb.vprint('%16s: %s' % ('Direction', bool(ef & e_i386.EFLAGS_DF)))\n vdb.vprint('%16s: %s' % ('Overflow', bool(ef & e_i386.EFLAGS_OF)))", "def removeInstructionAt(self, address: ghidra.program.model.address.Address) -> None:\n ...", "def devirtualize_jumps(instructions):\n indexof = {id(inst): i for i, inst, in enumerate(instructions)}\n jumps = set(dis.hasjabs).union(set(dis.hasjrel))\n\n for inst in instructions:\n if inst.opcode in jumps:\n target = inst.target\n target_index = indexof[id(target)]\n for offset in (1, 2, 3):\n if (\n target_index >= offset\n and instructions[target_index - offset].opcode == dis.EXTENDED_ARG\n ):\n target = instructions[target_index - offset]\n else:\n break\n\n if inst.opcode in dis.hasjabs:\n if sys.version_info < (3, 10):\n inst.arg = target.offset\n else:\n # arg is offset of the instruction line rather than the bytecode\n # for all jabs/jrel since python 3.10\n inst.arg = int(target.offset / 2)\n else: # relative jump\n if sys.version_info < (3, 10):\n inst.arg = target.offset - inst.offset - instruction_size(inst)\n else:\n inst.arg = int(\n (target.offset - inst.offset - instruction_size(inst)) / 2\n )\n inst.argval = target.offset\n inst.argrepr = f\"to {target.offset}\"", "def test_get_dynamic_vals(self):\n \"\"\"Get argument vals for the call stack when stopped on a breakpoint.\"\"\"\n self.build(dictionary=self.getBuildFlags())\n exe = self.getBuildArtifact(\"a.out\")\n\n # Create a target from the debugger.\n\n target = self.dbg.CreateTarget(exe)\n self.assertTrue(target, VALID_TARGET)\n\n # Set up our breakpoints:\n\n third_call_bpt = target.BreakpointCreateByLocation(\n 'pass-to-base.cpp', self.main_third_call_line)\n self.assertTrue(third_call_bpt,\n VALID_BREAKPOINT)\n fourth_call_bpt = target.BreakpointCreateByLocation(\n 'pass-to-base.cpp', self.main_fourth_call_line)\n self.assertTrue(fourth_call_bpt,\n VALID_BREAKPOINT)\n fifth_call_bpt = target.BreakpointCreateByLocation(\n 'pass-to-base.cpp', self.main_fifth_call_line)\n self.assertTrue(fifth_call_bpt,\n VALID_BREAKPOINT)\n sixth_call_bpt = target.BreakpointCreateByLocation(\n 'pass-to-base.cpp', self.main_sixth_call_line)\n self.assertTrue(sixth_call_bpt,\n VALID_BREAKPOINT)\n\n # Now launch the process, and do not stop at the entry point.\n process = target.LaunchSimple(\n None, None, self.get_process_working_directory())\n\n self.assertTrue(process.GetState() == lldb.eStateStopped,\n PROCESS_STOPPED)\n\n b = self.frame().FindVariable(\"b\").GetDynamicValue(lldb.eDynamicCanRunTarget)\n self.assertTrue(b.GetNumChildren() == 0, \"b has 0 children\")\n self.runCmd(\"continue\")\n self.assertTrue(b.GetNumChildren() == 0, \"b still has 0 children\")\n self.runCmd(\"continue\")\n self.assertTrue(b.GetNumChildren() != 0, \"b now has 1 child\")\n self.runCmd(\"continue\")\n self.assertTrue(\n b.GetNumChildren() == 0,\n \"b didn't go back to 0 children\")", "def continue_target_dynlink(self):\n\n self.logger.info('continue target')\n func_info = self.debugger.get_func_info(self.bp_func)\n\n if self.state == self.ExecStates.ON_BP_SHOW_PREV_FRAME:\n self.logger.info(\"on bp show prev frame\")\n b = self.debugger.continue_target()\n self.logger.info(\"after on bp show prev frame\")\n\n pc, code = self.debugger.print_frame(1)\n self.write_asm_display_sig.emit(code, pc)\n\n pc, code = self.debugger.print_function(1)\n if pc != -1:\n self.write_c_display_sig.emit(code, pc)\n\n state = self.debugger.get_process_state()\n if state == self.debugger.ProcessState.STOPPED:\n self.state = self.ExecStates.ON_BP_SHOW_CURR_FRAME\n pc = self.debugger.get_pc_from_frame(0)\n #self.debugger.set_breakpoint_on_return()\n\n self.write_console_output_sig.emit(\n \"[%s] Process stopped on breakpoint. The current \"\n \"instruction calls the function monitored.\" % DEBUG)\n\n self.write_console_output_sig.emit(\"[%s] The function \"\n \"call is redirected to the .PLT section at address \"\n \"0x%0.7X\" % (DEBUG, pc))\n else:\n self.state = self.ExecStates.EXIT\n\n elif self.state == self.ExecStates.ON_BP_SHOW_CURR_FRAME:\n self.logger.info(\"on bp show curr frame\")\n pc, code = self.debugger.print_frame(0)\n self.write_asm_display_sig.emit(code, pc)\n\n pc, code = self.debugger.print_function(0)\n if pc != -1:\n self.write_c_display_sig.emit(code, pc)\n\n pc = self.debugger.get_pc_from_frame(0)\n\n self.write_console_output_sig.emit(\"[%s] The function %s has \"\n \"a corresponding entry in the .GOT.PLT section at address \"\n \"%s.\" % (DEBUG, func_info.name, func_info.got_entry.addr))\n\n self.write_console_output_sig.emit(\"[%s] We jump to the \"\n \"address indicated by the .GOT.PLT entry: \"\n \" %s\" % (DEBUG, func_info.got_entry.value))\n\n self.state = self.ExecStates.STEP_INST_PLT\n\n elif self.state == self.ExecStates.STEP_INST_PLT:\n\n prev_pc = self.debugger.get_pc_from_frame(0)\n code, _ = self.debugger.step_instruction()\n current_pc = self.debugger.get_pc_from_frame(0)\n\n pc, code = self.debugger.print_frame(0)\n self.write_asm_display_sig.emit(code, pc)\n\n pc, code = self.debugger.print_function(0)\n if pc != -1:\n self.write_c_display_sig.emit(code, pc)\n\n # We are in the PLT, if the got.plt indicates the next plt\n # instruction, then the loader is to be called. Otherwise\n # we have a direct jump to the code in the library.\n if prev_pc + 6 == current_pc:\n self.step = 4\n self.state = self.ExecStates.INVOKE_LOADER\n\n self.write_console_output_sig.emit(\"[%s] It is the first \"\n \"call to %s. Lazy binding takes place. Jump returns to \"\n \"the .PLT stub. The dynamic linker will be \"\n \"called.\" % (DEBUG, func_info.name))\n else:\n self.state = self.ExecStates.CALL_FUNC\n self.write_console_output_sig.emit(\"[%s] It is not the first\"\n \" call to %s. The address indicated by the .GOT.PLT \"\n \"is %s and is the actual routine address \"\n \".\" % (DEBUG, func_info.name, func_info.got_entry.value))\n\n self.write_console_output_sig.emit(\"[%s] In the actual \"\n \"routine for the function.\" % (DEBUG))\n\n elif self.state == self.ExecStates.INVOKE_LOADER:\n self.logger.info(\"step instruction in invoke loader \"\n + str(self.step))\n\n code, _ = self.debugger.step_instruction()\n\n (pc, code) = self.debugger.print_frame(0)\n self.write_asm_display_sig.emit(code, pc)\n\n pc, code = self.debugger.print_function(0)\n if pc != -1:\n self.write_c_display_sig.emit(code, pc)\n\n if self.step == 4:\n self.write_console_output_sig.emit(\"[%s] Program jumps at \"\n \"the beginnig of the .plt section. Here there are \"\n \"a couple of instructions \"\n \"which invoke the dynamic linker.\" % (DEBUG))\n if self.step == 1: \n self.write_console_output_sig.emit(\"[%s] Dynamic linker \"\n \"invoked. It will resolve the address of the function \"\n \"called and set the correct address in the .got.plt. \"\n \"It also calls the function.\"% (DEBUG))\n\n self.step -= 1\n if self.step == 0:\n self.state = self.ExecStates.RET\n\n elif self.state == self.ExecStates.RET:\n b = self.debugger.continue_target()\n\n pc, code = self.debugger.print_frame(0)\n self.write_asm_display_sig.emit(code, pc)\n\n pc, code = self.debugger.print_function(0)\n if pc != -1:\n self.write_c_display_sig.emit(code, pc)\n\n self.logger.info(\"Returned from PLT\")\n self.write_console_output_sig.emit(\"[%s] Return to caller \"\n \"context.\" % (DEBUG))\n\n self.state = self.ExecStates.ON_BP_SHOW_PREV_FRAME\n\n elif self.state == self.ExecStates.CALL_FUNC:\n self.logger.info(\"in call_func\")\n\n b = self.debugger.continue_target()\n\n pc, code = self.debugger.print_frame(0)\n self.write_asm_display_sig.emit(code, pc)\n\n pc, code = self.debugger.print_function(0)\n if pc != -1:\n self.write_c_display_sig.emit(code, pc)\n\n self.state = self.ExecStates.ON_BP_SHOW_PREV_FRAME\n self.write_console_output_sig.emit(\"[%s] Return to caller \"\n \"context.\" % (DEBUG))\n\n else:\n self.write_console_output_sig.emit(\"[%s] Execution finished. \"\n \"Process exited normally.\" % (DEBUG))\n self.set_cont_btn_sig.emit(False)\n self.write_c_display_sig.emit(\"\", -1)\n\n # Update got table data\n data = self.debugger.get_got()\n self.update_got_plt_table_data(data)\n\n # Update sections table data\n data = self.debugger.get_sections()\n self.update_sections_table_data(data)\n\n # stdout\n out = self.debugger.get_stdout()\n self.write_stdout_sig.emit(out)", "def __smartdebug__(co,func_globals):\n\n from byteplay import Code,SetLineno,Label,LOAD_GLOBAL,POP_JUMP_IF_FALSE,POP_JUMP_IF_TRUE,JUMP_FORWARD\n code = Code.from_code(co)\n instructions = code.code\n\n # First, find all the \"if DEBUG:\" and \"if not DEBUG\"\n # We collect in reverse order so that we can update\n # in place more easily\n debugs = []\n for offset,op_arg in enumerate(instructions):\n if op_arg == (LOAD_GLOBAL,'DEBUG') and instructions[offset+1][0] in (POP_JUMP_IF_FALSE,POP_JUMP_IF_TRUE):\n debugs.insert(0,offset)\n\n # We want the bounds of the DEBUG true part and DEBUG false part for each\n # most ifs look like\n # LOAD_GLOBAL DEBUG\n # POP_JUMP_IF_FALSE L1 (sense may be reversed with _TRUE)\n # ...\n # JUMP_FORWARD L2\n # L1:\n # ...\n # L2:\n # They look different at the ends of loops, but I'm skipping those\n def back_one(x):\n while x > 0:\n opcode = instructions[x][0]\n if opcode != SetLineno and not isinstance(opcode,Label):\n break\n x -= 1\n return x\n def offset_of(L):\n for off,(op,_) in enumerate(instructions):\n if op is L: return off\n return None\n def true_false(x):\n pop_jump,L1 = instructions[x+1]\n O1 = offset_of(L1)\n if O1 < x: return None # Jumping backward, Loop if\n OJF = back_one(O1)\n jf,L2 = instructions[OJF]\n if jf != JUMP_FORWARD: return None # Not my pattern\n O2 = offset_of(L2)\n if pop_jump == POP_JUMP_IF_FALSE:\n return ((x+2,OJF),(OJF+1,O2),(x,O2))\n return ((OJF+1,O2),(x+2,OJF),(x,O2))\n \n\n while debugs:\n x = debugs[0]\n del debugs[0]\n bounds = true_false(x)\n if not bounds: continue\n (t0,t1),(f0,f1),(a,b) = bounds\n if func_globals.get('DEBUG',False):\n using = instructions[t0:t1]\n else:\n using = instructions[f0:f1]\n instructions[a:b] = using\n\n return code.to_code()", "async def set_breakpoint(self,\n request: debugger.SetBreakpointRequest = None,\n *,\n debuggee_id: str = None,\n breakpoint_: data.Breakpoint = None,\n client_version: str = None,\n retry: retries.Retry = gapic_v1.method.DEFAULT,\n timeout: float = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> debugger.SetBreakpointResponse:\n # Create or coerce a protobuf request object.\n # Sanity check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([debuggee_id, breakpoint_, client_version])\n if request is not None and has_flattened_params:\n raise ValueError(\"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\")\n\n request = debugger.SetBreakpointRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if debuggee_id is not None:\n request.debuggee_id = debuggee_id\n if breakpoint_ is not None:\n request.breakpoint_ = breakpoint_\n if client_version is not None:\n request.client_version = client_version\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.set_breakpoint,\n default_timeout=600.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response", "def test_setFlagsSilently(self):\n self._flagsSilentlyTest('setFlags', b'FLAGS.SILENT')", "def zero_opcodes(self):\n if self.opcode == 0x00E0:\n self.display.clear_display()\n logger.info(\"Cleared display\")\n elif self.opcode == 0x00EE:\n logger.info(\"Returned from subroutine at {}\".format(hex(self.pc)))\n self.pc = self.stack[self.stack_pointer]\n self.stack.pop()\n self.stack_pointer -= 1\n logger.info(\"to address at {}\".format(hex(self.pc)))", "def pause_debugging(self):\n\n body = DAPStoppedEventBody.create(reason=debugger.pause_reason, description=debugger.frame_location_info(),\n thread_id=0, preserve_focus_hint=False,\n all_threads_stopped=True)\n self.next_seq += 1\n DAPStoppedEvent.create(self.next_seq, body).send(self._current_client)", "def set_flag(self, flag_name, value):\n flags = {'C':0, # Carry\n 'Z':1, # Zero\n 'I':2, # Interrupt mask\n 'D':3, # Decimal\n 'B':4, # Break\n 'V':6, # Overflow\n 'N':7} # Negative\n\n flag_reg = self.get_register('P')\n if value == 1:\n new_flag = flag_reg | 1 << flags[flag_name]\n else:\n new_flag = flag_reg & ~(1 << flags[flag_name])\n\n self.set_register('P', new_flag)", "def fl_set_form_hotspot(ptr_flform, xpos, ypos):\n _fl_set_form_hotspot = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_form_hotspot\", \\\n None, [cty.POINTER(xfdata.FL_FORM), xfdata.FL_Coord,\n xfdata.FL_Coord], \\\n \"\"\"void fl_set_form_hotspot(FL_FORM * form, FL_Coord x,\n FL_Coord y) \"\"\")\n library.check_if_flinitialized()\n library.verify_flformptr_type(ptr_flform)\n i_xpos = library.convert_to_FL_Coord(xpos)\n i_ypos = library.convert_to_FL_Coord(ypos)\n library.keep_elem_refs(ptr_flform, xpos, i_xpos, ypos, i_ypos)\n _fl_set_form_hotspot(ptr_flform, i_xpos, i_ypos)", "def set_breakpoint(self, lineno, state):\n found = False\n for line in self.avr.list:\n if line['line'] == lineno:\n found = True\n break\n if line['line'] > lineno:\n break\n if found:\n if state == 0:\n brkpt.remove(lineno)\n elif state == 1:\n brkpt.append(lineno)\n return found", "def get_breakpoint(self, id):\r\n res = self.bpoints.filter( keys=('id',), values=( id,) )\r\n if res==[]:\r\n return None\r\n return res[0]", "def set_system_flags(self, sNewVmSystemFlags):\n\t\tcall_sdk_function('PrlVmCfg_SetSystemFlags', self.handle, sNewVmSystemFlags)", "def setHalt(self):\n self.set_register(REG_TYPE, SETPOINT_HALT, 'int')", "def RET(self):\n\t\tself.SP -= 1\n\t\tself.IP = self.stack[self.SP]", "def msg_console_switched(self, msg):\r\n #update the paused/line number markers\r\n self.frame.notebook.UpdatePauseMarkers()\r\n\r\n #update the bp markers in the editor pages\r\n pages = self.frame.notebook.GetAllPages()\r\n for page in pages:\r\n page.UpdateBreakpointSymbols()", "def pdbin(self, pdbin):\n self._pdbin = pdbin", "def settrace_patch(tracefunc: Any) -> None:\n global _is_debugger_active\n _is_debugger_active = bool(tracefunc)\n try:\n _original_settrace(tracefunc)\n except Exception:\n # IDEs, such as PyCharm, may ban calls to settrace().\n # http://pydev.blogspot.com/2007/06/why-cant-pydev-debugger-work-with.html\n # In such cases, do nothing.\n pass", "def wdb_f(self, arg):\n\n global rpdb_started\n if not arg.strip():\n print __doc__\n return\n \n if arg.strip() == 'pass':\n passwd = raw_input('Enter new winpdb session password: ')\n ip.db['winpdb_pass'] = passwd\n print \"Winpdb password changed\"\n if rpdb_started:\n print \"You need to restart IPython to use the new password\"\n return \n \n path = os.path.abspath(arg)\n if not os.path.isfile(path):\n raise UsageError(\"%%wdb: file %s does not exist\" % path)\n if not rpdb_started:\n passwd = ip.db.get('winpdb_pass', None)\n if passwd is None:\n import textwrap\n print textwrap.dedent(\"\"\"\\\n Winpdb sessions need a password that you use for attaching the external\n winpdb session. IPython will remember this. You can change the password later \n by '%wpdb pass'\n \"\"\")\n passwd = raw_input('Enter new winpdb session password: ')\n ip.db['winpdb_pass'] = passwd\n \n print \"Starting rpdb2 in IPython process\"\n rpdb2.start_embedded_debugger(passwd, timeout = 0)\n rpdb_started = True\n \n rpdb2.set_temp_breakpoint(path)\n print 'It is time to attach with WinPdb (launch WinPdb if needed, File -> Attach)'\n ip.magic('%run ' + arg)", "def patchCode(code, value, bits):\n assert(type(code) == type(value) == type(bits) == int)\n mask = 2**bits-1\n return (code & ~mask) | (value & mask)", "def interrupt(v):\n print(\" \" + bcolors.OKBLUE + \"[!] Detected CTRL+C ! restoring setting, please wait...\" + bcolors.ENDC)\n bash = \"ip link delete dummy type dummy\"\n os.system(bash)\n if v.spoof:\n restoreSpoof(v)\n if v.ntpStatus:\n ntpToggle(v)\n print(\" \" + bcolors.OKGREEN + \"Done\")\n print(\" --------------------------------------------------------\" + bcolors.ENDC)\n exit()", "def _breakpoint_info(self, index: Optional[int]) -> Tuple[Optional[int], Optional[XValue[T]], float]:\n try:\n breakpoint, value = self.breakpoints.peekitem(index)\n except IndexError:\n index = None\n breakpoint, value = None, self.breakpoints.values()[-1]\n return (index, breakpoint, value)", "def _markForBackpatch(self, index, label, bits, relative):\n assert(type(label) == str)\n assert(type(relative) == bool)\n self.backpatchQueue.append((self.codeptr, label, bits, relative))", "def _injectFault(self, regName, regAddress, mask):\n try:\n self.target.halt()\n current = self.target.readMemory(regAddress)\n logging.debug(\"Current {}: 0x{:X}\".format(regName,current))\n newContent = current ^ mask\n logging.debug(\"New content to be written: 0x%X\" % newContent)\n self.target.writeMemory(regAddress, newContent)\n logging.log(25, \"Successfully injected fault into %s\" % regName)\n except Exception as e:\n logging.error(\"Failed to inject a fault into {}!\\n{}\".format(regName,e))\n finally:\n self.target.resume()", "def instruction_jt(self, value, address):\n if Vm.is_register(value):\n value = self.get_register(value)\n\n if value > 0:\n self.exec_ptr = Vm.filter_mem_address(address)\n else:\n self.exec_ptr += 3" ]
[ "0.5881102", "0.5785291", "0.5218082", "0.5208455", "0.5201885", "0.51153344", "0.5073578", "0.50049704", "0.49999252", "0.4934474", "0.48571247", "0.4838905", "0.48162797", "0.47711107", "0.47597492", "0.4733747", "0.46354747", "0.46129856", "0.46129563", "0.46084633", "0.45189667", "0.45098227", "0.4471261", "0.44446117", "0.44293693", "0.4393159", "0.4339281", "0.43294948", "0.4319227", "0.43135446", "0.43098253", "0.4300976", "0.42877787", "0.4286201", "0.42801777", "0.4270642", "0.42636997", "0.4238033", "0.42280143", "0.4227811", "0.4225579", "0.42038712", "0.41964638", "0.41953373", "0.41764104", "0.41665068", "0.41636136", "0.4162425", "0.41445905", "0.4135007", "0.41336572", "0.41310462", "0.411999", "0.41177157", "0.4116505", "0.40945703", "0.4088557", "0.40878883", "0.40873405", "0.40829152", "0.40521446", "0.40511712", "0.40483215", "0.40459147", "0.40165168", "0.39682502", "0.39656726", "0.3963929", "0.39638814", "0.39457136", "0.39357147", "0.3934939", "0.3926605", "0.3917512", "0.39174688", "0.3910791", "0.3883383", "0.3875219", "0.38584605", "0.38531515", "0.38331085", "0.3831304", "0.38266042", "0.38238344", "0.38229477", "0.38204864", "0.38167965", "0.38055915", "0.38048786", "0.3801732", "0.3800866", "0.3797758", "0.37952736", "0.37900928", "0.3777099", "0.3769775", "0.37669593", "0.37614894", "0.37599376", "0.3759617" ]
0.65059483
0
Wrapper around the Node structure of tree for inserting, querying
def __init__(self, theta, k, num_buckets, fp_size, bucket_size, max_iter): self.root: Optional[Node] = None self.theta: float = theta self.k: int = k self.num_buckets = num_buckets self.fp_size = fp_size self.bucket_size = bucket_size self.max_iter = max_iter self.aggregate_size = self.get_insternal_size()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binary_tree():\n\n class Node(object):\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n\n # Create a root\n root = Node(data=1)\n root.left = Node(data=2)\n root.right = Node(data=3)\n root.left.left = Node(data=4)\n \"\"\" Structure\n 1 <-- root\n / \\\n 2 3 \n / \n 4\n \"\"\"", "def construct_tree():\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.right = TreeNode(8)\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(7)\n return root", "def __init__(self):\n self.root = self.get_new_node();", "def build():\n r = TreeNode(1)\n r.left = TreeNode(2)\n r.left.left = TreeNode(4)\n r.left.right = TreeNode(5)\n\n r.right = TreeNode(3)\n\n return r\n return TreeNode(3)", "def __init__(self, tree_node=None):\n self.root = tree_node", "def __init__(self, node):\n self.node = node\n self.parent = None\n self.depth = None", "def test_tree_two_nodes_right(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1", "def build():\n root = TreeNode(3)\n root.left = TreeNode(2)\n root.right = TreeNode(4)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(5)\n return root", "def insertnode(self, node_path, node_val):\n\t\t# Get to the correct tree\n\t\tcurr_tree = self\n\t\tfor node_name in node_path[1:]:\n\t\t\tcurr_tree = curr_tree[node_name]\n\t\t\n\t\t# Allocate to tree (only once)\n\t\tif curr_tree.name == None:\n\t\t\tcurr_tree.name = node_path[-1]\n\t\t\tcurr_tree.value = node_val\n\t\telse:\n\t\t\tprint curr_tree.name\n\t\t\tprint node_path\n\t\t\tassert(False)", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self):\n self.tree = {}", "def tree(self) -> Node:\n return Node(self.to_string())", "def __init__(self):\n self.root = TridNode()", "def __init__(self):\n self.left = None\n self.right = None\n self.depth = 0\n self.val = None\n self.id = None", "def __insert_tree(self, t):\n\t\tif not t:\n\t\t\treturn\n\t\tif t.value > self.value:\n\t\t\tif self.right == None:\n\t\t\t\tself.right = t\n\t\t\telse:\n\t\t\t\tself.right.__insert_tree(t)\n\t\telif t.value < self.value:\n\t\t\tif self.left == None:\n\t\t\t\tself.left = t\n\t\t\telse:\n\t\t\t\tself.left.__insert_tree(t)", "def __init__(self,tree):\n self._tree = tree", "def binary_search_tree_run():\n\n # no need for Tree object as the Tree itself is a concept; its made of connected nodes\n # nodes are the object; connections are self contained\n\n def binary_insert(root, node):\n if root is None:\n root = node\n else:\n if root.data > node.data:\n if root.l_child is None:\n root.l_child = node\n else:\n binary_insert(root.l_child, node)\n else:\n if root.r_child is None:\n root.r_child = node\n else:\n binary_insert(root.r_child, node)\n\n def in_order_print(root):\n if not root:\n return\n in_order_print(root.l_child)\n print(root.data)\n in_order_print(root.r_child)", "def __init__(self, tree):\n self._tree = tree", "def __init__(self, tree):\n self._tree = tree", "def build():\n\n root = Node(9)\n root.left = Node(6)\n root.left.left = Node(3)\n root.left.right = Node(8)\n root.left.right.left = Node(7)\n root.right = Node(14)\n root.right.left = Node(12)\n return root", "def _insert(self, key: int) -> TreeNode:\n node = self.root\n while True:\n # Check if a key is greater than node.\n if key > node.val:\n if not node.right:\n # node.right is a leaf\n node.right = TreeNode(val=key)\n node.right.parent = node\n return node\n node = node.right\n elif key < node.val:\n if not node.left:\n # node.left is a leaf\n node.left = TreeNode(val=key)\n node.left.parent = node\n return node\n node = node.left\n else:\n # print(f\"{key}: already in a Tree.\")\n return", "def __init__(self):\n self.root = TreeNode(\"\")", "def __init__(self):\n self.root = TreeNode(\"\")", "def insert(self, key):\n if self.root is None:\n self.root = self.Node(key)\n else:\n self.root = self.root.insert(key)", "def __init__(self):\n self.root = TreeNode(None)", "def insert(self,node,key):\n position=self.find(node,key)\n if position.key==key:\n print(\"node already present\")\n elif position.key>key:\n n=Node(key)\n position.setLeftChild(n)\n n.setParent(position)\n print(n.getParent())\n else:\n n=Node(key)\n position.setRightChild(n)\n n.setParent(position)", "def __init__(self):\n self.root = Node()", "def __init__(self):\n self.root = Node()", "def __init__(self):\n self.root = Node()", "def __init__(self):\n self.root = Node('')", "def __init__(self):\n self.root = self.Node()", "def _insert(self, node, key, value_ref):\n #create a tree if there was none so far\n if node is None:\n #print ('a')\n new_node = RedBlackNode(\n RedBlackNodeRef(), key, value_ref, RedBlackNodeRef())\n elif key < node.key:\n newleft_ref = self._insert(self._follow(node.left_ref), key, value_ref)\n newleft = self.balance(self._follow(newleft_ref))\n new_node = self.balance(RedBlackNode.from_node(\n node,\n left_ref=RedBlackNodeRef(referent=newleft)))\n elif key > node.key:\n newright_ref = self._insert(self._follow(node.right_ref), key, value_ref)\n newright = self.balance(self._follow(newright_ref))\n new_node = self.balance(RedBlackNode.from_node(\n node,\n right_ref=RedBlackNodeRef(referent=newright)))\n else: #create a new node to represent this data\n new_node = RedBlackNode.from_node(node, value_ref=value_ref)\n #new_node = self._blacken(new_node)\n return RedBlackNodeRef(referent=new_node)", "def add_node(self, node):", "def tree():\n return defaultdict(tree)", "def __init__(self):\n self.root = self.Node(None)", "def __init__(self):\n self.root = Node(\"\")", "def __init__(self):\n self.root = Node(\"\")", "def insert(node, key):\n # If the tree is empty, return a new node\n if node is None:\n return Node(key)\n\n # Otherwise recur down the tree\n if key < node.key:\n node.left = insert(node.left, key)\n else:\n node.right = insert(node.right, key)\n\n # return the (unchanged) node pointer\n return node", "def __init__(self):\n self.root = SimpleNode()", "def tree(self):\n this_node = Node(self.name)\n [this_node.add_child(child.tree()) for child in self.children]\n\n return this_node", "def build():\n # root = TreeNode(5)\n # root.left = TreeNode(2)\n # root.right = TreeNode(7)\n # return root\n\n \"\"\"\n 5\n / \\\n 2 6\n / \\\n 1 3\n [5,2,1,3,6]\n \"\"\"\n _5 = TreeNode(5)\n _2 = TreeNode(2)\n _6 = TreeNode(6)\n _1 = TreeNode(1)\n _3 = TreeNode(3)\n _5.left = _2\n _5.right = _6\n _2.left = _1\n _2.right = _3\n return _5", "def test_tree_binary_tree() -> None:\n t = generate_binary_tree_resources(4, 3)\n field(t, (\"root\", \"ds\", \"f1\")).identity = \"email\"\n field(t, (\"root.0.1.0\", \"ds.0.1.0\", \"f1\")).identity = \"ssn\"\n field(t, (\"root.1.1\", \"ds.1.1\", \"f1\")).identity = \"user_id\"\n assert generate_traversal({\"email\": \"X\"}, *t)\n assert generate_traversal({\"ssn\": \"X\"}, *t)\n assert generate_traversal({\"user_id\": \"X\"}, *t)", "def _tree():\n return collections.defaultdict(_tree)", "def _gen_test_tree_3():\n tree = BinaryNode(5)\n tree.left = BinaryNode(1)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(3)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(8)\n tree.right.right = BinaryNode(9)\n return tree", "def testInsertDeep(self):\n\n #insert\n for i in xrange(randint(50, 180)):\n self.s.insert(randint(-2147483648,2147483647), i)\n\n #walk through the tree\n self.assertIsNotNone(self.s._root)\n self.assertIsNone(self.s._root.parent)\n self.assertIsNotNone(self.s._root.left)\n self.assertIsNotNone(self.s._root.right)\n\n def traversalHelper(n):\n if not n:\n return\n self.assertTrue((n.parent.left is n) or (n.parent.right is n))\n traversalHelper(n.left)\n traversalHelper(n.right)\n\n traversalHelper(self.s._root.left)\n traversalHelper(self.s._root.right)", "def __init__(self):\n self.root = Node(None)", "def __init__(self):\n # use a Trie as a data structure\n self.root = Node()", "def __init__(self): # 用dict模拟字典树即可\n self.root = {}", "def __getitem__(self, i: int) -> 'Tree':\n ...", "def __init__(self):\n Tree.__init__(self, \"\")", "def test_tree_two_nodes_left_has_depth_one(one_t):\n one_t.insert(5)\n assert one_t.depth() == 1", "def _insert(self, root: TreeNode, node: TreeNode):\n if root is None:\n return # Could simply return/\"rebound\" the node parameter up the stack and assign where needed, or return\n\n if node.key < root.key: # First check to determine direction: left\n if root.left is None: # Second check to check if a left child doesn't exist\n root.left = node # If it doesn't simply assign\n else:\n self._insert(root.left, node) # Else, simply recur left\n\n elif node.key > root.key: # Similar for the right subtree\n if root.right is None:\n root.right = node\n else:\n self._insert(root.right, node)", "def __init__(self, root):\n self._root = root\n self._leaves = [root]", "def create_node_tree(self, node_tree):\n # allow it to accept both a list or dict\n if isinstance(node_tree, list):\n created_root_nodes = []\n for item in node_tree:\n created_root_nodes.append(\n self.create_node_tree(item)\n )\n return created_root_nodes\n\n node_type = node_tree['type']\n\n self.comp.Lock()\n node = self.comp.AddTool(node_type)\n self.comp.Unlock()\n\n # attributes\n if 'attr' in node_tree:\n attributes = node_tree['attr']\n for key in attributes:\n value = attributes[key]\n if isinstance(value, dict):\n new_node = self.create_node_tree(value)\n node.Input = new_node\n else:\n node.SetAttrs({key: value})\n\n # input lists\n if 'input_list' in node_tree:\n input_list = node_tree['input_list']\n for key in input_list:\n node_input_list = node.GetInputList()\n for input_entry_key in node_input_list.keys():\n input_entry = node_input_list[input_entry_key]\n input_id = input_entry.GetAttrs()['INPS_ID']\n if input_id == key:\n value = input_list[key]\n input_entry[0] = value\n break\n\n # ref_id\n if 'ref_id' in node_tree:\n node.SetData('ref_id', node_tree['ref_id'])\n\n # connected to\n if 'connected_to' in node_tree:\n connected_to = node_tree['connected_to']\n if 'Input' in connected_to:\n input_node = self.create_node_tree(connected_to['Input'])\n node.Input = input_node\n elif 'ref_id' in node_tree['connected_to']:\n ref_id = node_tree['connected_to']['ref_id']\n print('ref_id: %s' % ref_id)\n # find a node with ref_id equals to ref_id that is given in the\n # node tree\n all_nodes = self.comp.GetToolList().values()\n for r_node in all_nodes:\n node_ref_id = r_node.GetData('ref_id')\n print('node_ref_id: %s' % node_ref_id)\n if node_ref_id == ref_id:\n node.Input = r_node\n break\n\n return node", "def __init__(self, val=None):\n self.val = val\n self.parent = None\n if val is not None:\n self.left = BSTree()\n self.right = BSTree()\n else:\n self.left = None\n self.right = None", "def _gen_test_tree_2():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.left.left = BinaryNode(1)\n tree.left.right = BinaryNode(4)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n tree.right.right.right = BinaryNode(9)\n return tree", "def addChild(node):", "def _gen_test_tree_6():\n tree = BinaryNode(20)\n tree.left = BinaryNode(10)\n tree.right = BinaryNode(30)\n tree.left.right = BinaryNode(25)\n return tree", "def __init__(self, data, parent):\n self.left = None\n self.right = None\n self.data = data\n self.parent = parent", "def __init__(self):\n self.__root = Node()", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = node\n else:\n self.right.insert(node)", "def __init__(self, root_node):\n\n\t\tself.root = root_node\n\t\tself.left_child = None\n\t\tself.right_child = None", "def Insert(root, node):\n target = root.ChooseLeaf(node)\n node.father = target\n target.leaves.append(node)\n target.MBR = merge(target.MBR, node.MBR)\n target.AdjustTree()\n if root.father != None:\n root = root.father\n return root", "def __init__(self, container=[]):\n # Initialize empty tree.\n self.root = None\n # Insert every item from container.\n for item in container:\n self.insert(item)", "def insert(self, key, value=None):\n if key in self.nodes:\n return None\n else:\n new_node = Node(key, value)\n (self.nodes)[key] = new_node \n current = self.root\n last = current\n\n if current is None:\n self.root = self.nodes[key]\n self.root.height = 0\n return new_node\n\n while (current is not None):\n if new_node.key > current.key:\n last = current\n current = current.right\n if (current != None and current.left == None) or (current == self.root):\n current.height += 1\n else:\n last = current\n current = current.left\n if (current != None and current.left == None) or (current == self.root):\n current.height += 1\n\n if new_node.key > last.key:\n last.right = new_node\n new_node.parent = last\n else:\n last.left = new_node\n new_node.parent = last\n\n self.root.height = self.get_height_tree()\n return new_node", "def insert(self, index: int, tree: 'Tree') -> None:\n ...", "def __call__(self, node):\n if node.children:\n if len(node.children) == 1:\n if self.TagEqual(node.children[0], node):\n #print node.ToString()\n node.tag = self.Tag(node, node.children[0]);\n lst = node.children[0].children;\n node.children = lst;", "def insert(self, node):\n if node is None:\n return\n if node.key < self.key:\n if self.left is None:\n node.parent = self\n self.left = node\n else:\n self.left.insert(node)\n else:\n if self.right is None:\n node.parent = self\n self.right = none\n else:\n self.right.insert(node)", "def _store_node(self, item, name=None, args=None, kwargs=None):\n if isinstance(item, NodeDef):\n if item.owner == self._root:\n # Don't re-add nodes already in this graph, just return the path\n return item.path\n else:\n # Node definitions from another graph need to be rebased\n return self._store_node_def(item.rebase(self._root, self._prefix))\n elif isinstance(item, functools.partial):\n if args is not None or kwargs is not None:\n raise ValueError(\"Extra arguments and nesting not supported for partial functions.\")\n return self._store_node(item.func, name=name, args=item.args, kwargs=item.keywords)\n elif isinstance(item, NamedFunc):\n return self._store_node(item.func, item.name)\n elif callable(item):\n # Try to extract the node key if it is not known yet\n if name is None:\n if hasattr(item, \"im_func\"):\n name = item.im_func.func_name\n elif hasattr(item, \"func_name\"):\n name = item.func_name\n if name == \"<lambda>\":\n raise ValueError(\"Anonymous lambda functions are unsupported\")\n else:\n raise ValueError(\"Can't deduce name for %s\" % item)\n\n # Ensure args is a tuple and not a mutable list\n if args is not None:\n args = tuple(args)\n\n return self._store_node_def(NodeDef(self._root, item, self._prefix, name, args, kwargs))\n else:\n raise ValueError(\"Unsupported node specification %s\" % item)", "def __setitem__(self, i: int, o: 'Tree') -> None:\n ...", "def deserialize(self, data):\n if len(data) == 0:\n return None\n root = TreeNode(data[0])\n root.left = self.deserialize(data[1]) \n root.right = self.deserialize(data[2])\n return root", "def __init__(self, key, tree=None, parent=None, left=None, right=None):\n super().__init__(key)\n\n self.tree = tree if isinstance(tree, BinaryTree) else None\n self.parent = BinaryNode.or_none(parent)\n self.left = BinaryNode.or_none(left)\n self.right = BinaryNode.or_none(right)", "def __init__(self):\n\n self.root = Node(name='root',children=set())\n\n self.map = {}\n self.map['root'] = self.root\n\n self.jsonStr = \"\"", "def insert_node(self, data):\n\t\tif self.root is None:\n\t\t\tself.root = Node(data)\n\t\telse:\n\t\t\tcurrent_node = self.root\n\t\t\twhile current_node.next is not None:\n\t\t\t\tcurrent_node = current_node.next\n\t\t\tcurrent_node.next = Node(data, current_node)", "def bst_insert(root, data):\n if root is None:\n root = Tree(d=data)\n elif data > root.data:\n root.right = bst_insert(root.right, data)\n else:\n root.left = bst_insert(root.left, data)\n return root", "def _gen_test_tree_1():\n tree = BinaryNode(5)\n tree.left = BinaryNode(5)\n return tree", "def __init__(self):\n\n\t\tself.root = None\n\t\tself.numNodes = 0", "def __init__(self, root: Node = None):\n # this alllows us to initialize by copying an existing tree\n self.root = deepcopy(root)\n if self.root:\n self.root.parent = None\n self.size = 0 if not self.root else self.root.subtree_size()", "def serialize(node, tree=\"\"):\n \n \n if (not node): #Base case\n tree += \"# \"\n return tree\n tree += (str(node.val) + \" \")\n tree = serialize(node.left, tree)\n tree = serialize(node.right, tree)\n\n return tree", "def _gen_test_tree_4():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(10)\n tree.right = BinaryNode(9)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n return tree", "def __init__(self, c):\n TreeNode.__init__(self)\n self.c = c", "def __insert(self, node, value):\n #if DEBUG: print('\\t__insert({})'.format(value))\n\n new = Node(value, node.next)\n node.next = new\n return new", "def save_node(self):\n # save node in path2node\n if self.full_path in self.file.path2node:\n print \"** Error, created node with path twice:\\n%s\" % self.full_path\n traceback.print_stack()\n sys.exit(1)\n self.file.path2node[self.full_path] = self \n # save node in id_lookups\n id = self.sdef['id']\n ns = self.sdef['ns']\n type = self.sdef['type']\n custom = 'custom' in self.sdef and self.sdef['custom']\n if self.parent is None and self.sdef['df'] and not custom:\n # structure (not custom) created at top level, save in id_lookups\n if id not in self.file.id_lookups[ns]:\n print \"** Error: Unable to find id '%s' in id_lookups when saving node\" % id\n traceback.print_stack()\n sys.exit(1)\n if self.path not in self.file.id_lookups[ns][id]:\n print (\"** Error: Unable to find path '%s' in id_lookups when\"\n \" saving node %s\") % (self.path, id)\n print \"self.sdef['df'] is:\"\n pp.pprint (self.sdef['df'])\n traceback.print_stack()\n sys.exit(1)\n self.file.id_lookups[ns][id][self.path]['created'].append(self)\n # save node in all_nodes, either at top level (if no parent) or inside\n # mstats structure of parent node\n if self.parent is None:\n if self.path in self.file.all_nodes:\n self.file.all_nodes[self.path].append(self)\n else:\n self.file.all_nodes[self.path] = [self, ]\n else:\n if id not in self.parent.mstats:\n if custom:\n # custom node created, add id to mstats of parent\n self.parent.mstats[id] = { 'df': {}, 'type':type, 'ns': ns,\n 'created': [ self, ], 'qty':'?' }\n else:\n print \"** Error: Unable to find key '%s' in parent mstats\" % id\n print \"self.parent.mstats is\"\n pp.pprint (self.parent.mstats)\n traceback.print_stack()\n sys.exit(1)\n else: \n # append node to parent created mstats \n self.parent.mstats[id]['created'].append(self)", "def insert(self, k):\n node = self.klass(None, k)\n if self.root is None:\n # The root's parent is None.\n self.root = node\n else:\n self.root.insert(node)\n return node", "def add_node(self, title='', parent=None, by_title=False):\n\n sel_stmt = []\n conn = self.engine.connect()\n\n # cover cases where id is sent as int, str or row object\n parent_id = parent\n try:\n parent_id = parent_id.id\n except AttributeError:\n pass\n\n if by_title:\n parent_id = self.get_first_id(parent)\n if not parent_id:\n raise Exception('Parent node does not exist.')\n\n if parent_id is not None:\n # check parent exists\n if not self.node_exists(parent_id):\n raise Exception('Parent node does not exist.')\n\n # store new node\n new_node_pk = conn.execute(self.nodes.insert(), {'title': title}).inserted_primary_key[0]\n\n # add new paths for all the ancestors of the parent node\n sel_stmt.append(\n select(\n [self.paths.c.ancestor, bindparam('d1', new_node_pk), self.paths.c.depth + 1]\n ).where(\n self.paths.c.descendant == parent_id\n )\n )\n else:\n # add new node\n new_node_pk = conn.execute(self.nodes.insert(), {'title': title}).inserted_primary_key[0]\n\n # add path to self\n sel_stmt.append(\n select(\n [bindparam('a2', new_node_pk), bindparam('d2', new_node_pk), bindparam('l2', 0)]\n )\n )\n\n # add paths\n conn.execute(self.paths.insert().from_select(['ancestor', 'descendant', 'depth'], union_all(*sel_stmt)))\n\n return new_node_pk", "def insert(self, child, key):\n childNode = BTNode(key)\n if child == \"left\":\n self.left = childNode\n elif child == \"right\":\n self.right = childNode", "def __init__(self, klass = BSTNode):\n self.root = None\n self.klass = klass", "def __init__(self, klass = BSTNode):\n self.root = None\n self.klass = klass", "def _bddnode(root, lo, hi):\n\t# print(\"_bddnode\")\n\tif lo is hi:\n\t\tnode = lo\n\telse:\n\t\tkey = (root, lo, hi)\n\t\ttry:\n\t\t\tnode = _NODES[key]\n\t\texcept KeyError:\n\t\t\tnode = _NODES[key] = BDDNode(*key)\n\treturn node", "def __init__(self, word):\n TreeNode.__init__(self)\n self.word = word\n self.children = []", "def __init__(self):\n self.root = TrieNode()\n self.first_row = self.root.children", "def node_create(self, parent, path):\n\n q = (\"insert into nodes (parent, path) \"\n \"values (?, ?)\")\n props = (parent, path)\n return self.execute(q, props).lastrowid", "def __init__(self, p=None, l=None, r=None, d=None):\n self.parent = p\n self.left = l\n self.right = r\n self.data = d", "def add(tree, item):\n # This is a non recursive add method. A recursive method would be cleaner.\n if tree.root == None: # ... Empty tree ...\n tree.root = Node(item, None, None) # ... so, make this the root\n else:\n lst = []\n # Find where to put the item\n child_tree = tree.root\n while child_tree != None:\n parent = child_tree\n lst.append(parent)\n if item < child_tree.item: # If smaller ...\n child_tree = child_tree.left # ... move to the left\n elif item > child_tree.item:\n child_tree = child_tree.right\n\n # child_tree should be pointing to the new node, but we've gone too far\n # we need to modify the parent nodes\n if item < parent.item:\n parent.left = Node(item, None, None)\n elif item > parent.item:\n parent.right = Node(item, None, None)\n # Ignore the case where the item is equal.\n for items in lst[-2::-1]:\n if abs(tree.recurse_height(items.left) - tree.recurse_height(items.right)) > 1:\n return items.item\n\n #\n # Note that you can get the height of a node by calling tree.recurse_height().\n # For example, the height of the root is tree.recurse_height(tree.root)\n #", "def insert(self, data):\n \n def _find_parent(current, node):\n \"\"\"Recursively descend through the tree to find the node that\n should be the parent of the new node. Do not allow for duplicates.\n \"\"\"\n \n if node == current:\n raise ValueError(str(node.data) + \" is already in the tree.\")\n if node < current: # Travel left\n if current.left:\n return _find_parent(current.left,node)\n else:\n return current\n else: # Travel right\n if current.right:\n return _find_parent(current.right,node)\n else:\n return current\n \n n = KDTNode(data) # Make a new node\n if len(data) != self.k:\n raise ValueError(\"data must be of length \" + str(self.k))\n if not self.root:\n self.root = n # Case 1: empty tree\n n.axis = 0\n else: # Case 2: use _find_parent\n parent = _find_parent(self.root, n) # Get the parent\n if n < parent: parent.left = n # Insert the node\n else: parent.right = n\n n.prev = parent # Double link\n n.axis = (n.prev.axis + 1) % self.k\n return n", "def build_UNIST_tree():\n root = LinkedBinaryTree()", "def __init__(self, root):\r\n self.root = root\r\n self.nodes = [root]\r\n self.nodes.extend(Node.all_descendants(self.root))\r\n self.node_ids = [ n.id for n in self.nodes ]", "def insert(self, item):\n # Handle the case where the tree is empty\n if self.is_empty():\n # if self.root is None:\n # TODO: Create a new root node\n self.root = ...\n # TODO: Increase the tree size\n self.size ...\n return\n # Find the parent node of where the given item should be inserted\n parent = self._find_parent_node(item)\n # TODO: Check if the given item should be inserted left of the parent node\n if ...:\n # TODO: Create a new node and set the parent's left child\n parent.left = ...\n # TODO: Check if the given item should be inserted right of the parent node\n elif ...:\n # TODO: Create a new node and set the parent's right child\n parent.right = ...\n # TODO: Increase the tree size\n self.size ...", "def __init__(self):\n self.root = TreeNode('#')", "def tree(self):\r\n return self._tree" ]
[ "0.71559274", "0.6676784", "0.6605543", "0.6523582", "0.650738", "0.6498414", "0.64883316", "0.648779", "0.64652824", "0.6465034", "0.6465034", "0.64216363", "0.64207244", "0.6376458", "0.6372433", "0.63648695", "0.6343655", "0.63167363", "0.6310797", "0.6310797", "0.63025945", "0.62941474", "0.62934977", "0.62934977", "0.6274675", "0.6270851", "0.6269986", "0.62642336", "0.62642336", "0.62642336", "0.6255411", "0.62457836", "0.6245747", "0.6234959", "0.6234438", "0.6207895", "0.6202519", "0.6202519", "0.6200633", "0.6199087", "0.6188376", "0.6174654", "0.6173721", "0.61664706", "0.61456543", "0.6141447", "0.6128344", "0.612683", "0.6123582", "0.61135215", "0.61106086", "0.61070156", "0.61035573", "0.61032593", "0.6096938", "0.6096594", "0.60886323", "0.6082853", "0.6081729", "0.6078335", "0.6075237", "0.60725343", "0.6067889", "0.6065609", "0.6064819", "0.6062851", "0.60603726", "0.6056098", "0.605194", "0.60466206", "0.6032479", "0.6028757", "0.6011276", "0.6006423", "0.59894705", "0.5981941", "0.5980279", "0.597026", "0.5967344", "0.5960216", "0.5955494", "0.5948944", "0.5943872", "0.59412676", "0.5940667", "0.5931222", "0.5928138", "0.59273267", "0.59273267", "0.59234357", "0.5919615", "0.59159106", "0.591574", "0.591572", "0.5913737", "0.5912903", "0.59106076", "0.59026086", "0.5902581", "0.58976007", "0.5890211" ]
0.0
-1
Creates a new node from this read and adds it into the tree
def insert(self, dataset: List[Read]) -> bool: node_to_insert = Node(self.k, self.num_buckets, self.fp_size, self.bucket_size, self.max_iter) node_to_insert.populate_dataset_info(dataset) self.aggregate_size += node_to_insert.get_size() if self.root is None: self.root = node_to_insert return True parent = None current = self.root while current: if current.num_children() == 0: """ current is a leaf representing a dataset, so create a new parent that contains node_to_insert and current as children """ new_parent = Node(self.k, self.num_buckets, self.fp_size, self.bucket_size, self.max_iter) self.aggregate_size += new_parent.get_size() new_parent.parent = parent # Kmers from existing and new leaf new_parent.filter = deepcopy(current.filter) new_parent.insert_kmers_from_dataset(dataset) # Set appropriate parent/child pointers current.parent = new_parent node_to_insert.parent = new_parent new_parent.children.append(current) new_parent.children.append(node_to_insert) # Special case where root is a leaf if parent is None: # current is root -> new_parent is now root self.root = new_parent return True # Set new_parent as child of old parent idx = parent.children.index(current) parent.children[idx] = new_parent return True elif current.num_children() == 1: # insert kmers current.insert_kmers_from_dataset(dataset) # we found an empty slot to insert into current.children.append(node_to_insert) return True elif current.num_children() == 2: # insert kmers current.insert_kmers_from_dataset(dataset) # select "best" child score_0 = current.children[0].score(dataset) score_1 = current.children[1].score(dataset) best_child = 0 if score_0 < score_1 else 1 # recur parent = current current = current.children[best_child] raise Exception("Did not insert successfully!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_node(self, node):", "def add_node(self, name):\n for node in self.get_children():\n if node.read_name() == name:\n break\n else:\n root = self.get_sobj()\n sobj = self._bld.NewObject(root)\n node = self.__class__(self._std, self._bld, sobj.GetID(), self)\n node.write_name(name)\n return node", "def create_node(self, name, parent):\n\n try:\n node = self.map[name]\n return node\n except:\n node = Node(name,parent=parent.name)\n parent.children.add(node)\n\n node.parent = parent.name\n\n self.map[name] = node\n\n return node", "def _create_node(\n self,\n name,\n ):\n pass", "def _new_node(self):\n self._size += 1\n return self._node_factory()", "def add_node(self, metadata, pos):\n node = Node(metadata, pos)\n self.addItem(node)\n self.nodes[node.id] = node\n return node", "def _read_node(self, offset):\n self.fh.seek(offset)\n node = _unpack_struct_from_file(B_LINK_NODE_V1, self.fh)\n assert node['signature'] == b'TREE'\n\n keys = []\n addresses = []\n for _ in range(node['entries_used']):\n key = struct.unpack('<Q', self.fh.read(8))[0]\n address = struct.unpack('<Q', self.fh.read(8))[0]\n keys.append(key)\n addresses.append(address)\n # N+1 key\n keys.append(struct.unpack('<Q', self.fh.read(8))[0])\n node['keys'] = keys\n node['addresses'] = addresses\n return node", "def new_node(self, offset):\n # First we get the name of the node\n nameidx = self.string[offset:].find(b'\\0')\n name = self.string[offset: offset + nameidx]\n string_offset = offset + calc_length_word_align(nameidx + 1)\n node = FDTNode(name)\n return string_offset, node", "def add_node(self) -> Node:\n new_node = Node(self.__next_id)\n self.__nodes[self.__next_id] = new_node\n self.__next_id += 1\n return new_node", "def create_node(self, data):\n node = RealNode(data, layer=self)\n self.append_node(node)\n return node", "def new_node(name):\n\n return name, []", "def add_node(self, name, state):\n if self.has_node(name):\n raise ValueError('Node {} already exists'.format(name))\n self.source_net.add_node(name, attr_dict=state)", "def _read_node(self, offset):\n self.fh.seek(offset)\n node = _unpack_struct_from_file(B_LINK_NODE_V1, self.fh)\n assert node['signature'] == b'TREE'\n assert node['node_type'] == 1\n\n keys = []\n addresses = []\n for _ in range(node['entries_used']):\n chunk_size, filter_mask = struct.unpack('<II', self.fh.read(8))\n fmt = '<' + 'Q' * self.dims\n fmt_size = struct.calcsize(fmt)\n chunk_offset = struct.unpack(fmt, self.fh.read(fmt_size))\n chunk_address = struct.unpack('<Q', self.fh.read(8))[0]\n\n keys.append(OrderedDict((\n ('chunk_size', chunk_size),\n ('filter_mask', filter_mask),\n ('chunk_offset', chunk_offset),\n )))\n addresses.append(chunk_address)\n node['keys'] = keys\n node['addresses'] = addresses\n return node", "def createNode(self, name):\n return Node(name)", "def add_node (self, node):\n raise NotImplementedError", "def add_node(self, node):\n self.nodes[node.name] = node\n self.dirty = True", "def get_new_node(self):\n return TrieNode()", "def copy(self, new_tree):\n new_node = new_tree.new_tree_node(parent=self.parent, node_id=self.id)\n new_node.name = self.name\n new_node.branch = self.branch\n new_node.support = self.support\n new_node.support_type = self.support_type\n new_node.comment = self.comment\n new_node.children = self.children[::]\n new_node._been_processed = self._been_processed\n return new_node", "def add_node(self, data):\n new_node = Node(data)\n if self.cur_node is not None:\n new_node.next, self.cur_node.next = self.cur_node.next, new_node\n self.cur_node = new_node\n self.length += 1\n self.cur_pos += 1\n if self.start_node is None:\n self.start_node = self.cur_node\n # print(\"Node({}) added to {}\".format(new_node.data, self.cur_pos-1))", "def __init__(self):\n self.root = self.get_new_node();", "def add_node(self, id, document, source):\n raise NotImplementedError()", "def add_node(self, state, other):\n\t\tnew_node = Node()\n\t\tnew_node.state = state\n\t\tnew_node.info = other\n\n\t\tif self.head == None:\n\t\t\tself.current = new_node\n\t\t\tself.head = new_node\n\t\telse:\n\t\t\tself.current.next = new_node\n\t\t\tself.current = self.current.next", "def _new_tree_node(board, current_turn, side):\n global visited\n node_key = (board.get_hash_value(), current_turn)\n if (node_key not in visited):\n visited.add(node_key)\n treenodes[node_key] = TreeNode(board, current_turn, side)\n return treenodes[node_key]", "def add_node(self, id, info, parent_id):\r\n assert id not in self.node_ids, \"Node id already exists in tree\"\r\n\r\n parent_node = self.get_node_by_id(parent_id)\r\n assert parent_id in self.node_ids, \"Parent does not exist in tree\"\r\n\r\n new_node = Node(id, info, parent_node)\r\n self.node_ids.append(id)\r\n self.nodes.append(new_node)", "def add(self, d):\n new_node = Node(d)\n self.root = new_node\n self.size += 1\n return d", "def addNode(self, parent, names, data, level = 0):\n\t\t# Gets the names composing the path\n\t\ttry:\n\t\t\t# Gets the name of the current node\n\t\t\tname = names[0]\n\t\t\t\n\t\t\t# Search a given node\n\t\t\tcurrent = parent.getSoon(name)\n\t\t\t\n\t\t\t# If the name of the current node already exists\n\t\t\tif parent.getSoon(name) == None:\n\t\t\t\t# adds a new node\n\t\t\t\tcurrent = parent.setSoon(name, self.NodeClass(parent = parent, name = name))\n\t\t\t\n\t\t\t# Browse by node\n\t\t\tself.addNode(current, names[1:], data, level + 1)\n\t\texcept:\n\t\t\t# There are no more sub node, store the information of node\n\t\t\tparent.setData(data)", "def _add_node(self, node_name, node_type):\n q = 'MATCH (r:' + node_type + ') WHERE r.name=\"' \\\n + node_name + '\" RETURN r'\n results = self.db.query(q, returns=(client.Node, str, client.Node))\n res = self.db.labels.create(node_type)\n\n if (len(results) == 0):\n r = self.db.nodes.create(name=node_name)\n res.add(r)\n else:\n r = results[0][0]\n return r", "def add_node(self, node):\n temp = self.head.post\n self.head.post = node\n node.pre = self.head\n node.post = temp\n temp.pre = node", "def load_node(self, meta, parent=None):\n # Create the node\n node = Node(name=meta.name)\n self.scene.nodes.append(node)\n\n if meta.matrix is not None:\n node.matrix = Matrix44(value=meta.matrix)\n\n if meta.mesh is not None:\n # Since we split up meshes with multiple primitives, this can be a list\n # If only one mesh we set it on the node as normal\n if len(self.meshes[meta.mesh]) == 1:\n node.mesh = self.meshes[meta.mesh][0]\n # If multiple meshes we add them as new child node\n elif len(self.meshes[meta.mesh]) > 1:\n for mesh in self.meshes[meta.mesh]:\n node.add_child(Node(mesh=mesh))\n\n if meta.camera is not None:\n # FIXME: Use a proper camera class\n node.camera = self.gltf.cameras[meta.camera]\n\n if parent:\n parent.add_child(node)\n\n # Follow children\n if meta.has_children:\n for node_id in meta.children:\n self.load_node(self.gltf.nodes[node_id], parent=node)\n\n return node", "def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> Node:\r\n return Node(graph=self._graph, index=index, name=name, external_id=external_id)", "def add_node(self, key: str) -> bpy.types.Node:\n\t\treturn self.group.nodes.new(key)", "def create_node(self, hx, data):\n return Node(hx, data)", "def addChild(node):", "def add_node(self, new_node):\n current = self.root\n\n while True:\n\n if current is None:\n current = new_node\n return\n\n if new_node.data < current.data:\n current = current.left\n else:\n current = current.right", "def _add_node(data, entry, flux, name=None, show_compound_img=False):\r\n\r\n entry_type = entry.type\r\n graphics = entry.graphics[0]\r\n\r\n node_name = name or graphics.name\r\n\r\n node_data = {'id': entry.id,\r\n 'name': node_name[:10] + '...',\r\n 'full_name': node_name,\r\n 'label': node_name,\r\n 'content': node_name,\r\n 'size': 5,\r\n 'x': graphics.x,\r\n 'y': graphics.y,\r\n 'flux': flux,\r\n 'cumflux': abs(flux)}\r\n\r\n # not applicable yet\r\n if entry_type == 'compound' and show_compound_img:\r\n node_data.update({\r\n 'type': 'rectangle',\r\n 'backgroundImage': ''.join([Kegg.BASE_URL, 'get/',\r\n entry.name[4:], '/image']),\r\n 'borderWidth': 0,\r\n\r\n })\r\n\r\n data['nodes'].append(node_data)", "def add_node(self, state):\n state_as_string = Node.state_as_string(state)\n\n if not self.nodes.get(state_as_string):\n node = Node(state, self.final_state)\n\n self.nodes[state_as_string] = node\n\n return self.nodes.get(state_as_string)", "def insertnode(self, node_path, node_val):\n\t\t# Get to the correct tree\n\t\tcurr_tree = self\n\t\tfor node_name in node_path[1:]:\n\t\t\tcurr_tree = curr_tree[node_name]\n\t\t\n\t\t# Allocate to tree (only once)\n\t\tif curr_tree.name == None:\n\t\t\tcurr_tree.name = node_path[-1]\n\t\t\tcurr_tree.value = node_val\n\t\telse:\n\t\t\tprint curr_tree.name\n\t\t\tprint node_path\n\t\t\tassert(False)", "def addnode(self, uid, **attrs):\n\n raise NotImplementedError", "def add_node_field(self,name,data,on_exists='fail'):\n if name in np.dtype(self.node_dtype).names:\n if on_exists == 'fail':\n raise GridException(\"Node field %s already exists\"%name)\n elif on_exists == 'pass':\n return\n elif on_exists == 'overwrite':\n self.nodes[name] = data\n else:\n self.nodes=recarray_add_fields(self.nodes,\n [(name,data)])\n self.node_dtype=self.nodes.dtype", "def add_node(self, properties=None, **kwargs):\r\n\t\tif properties is None:\r\n\t\t\tproperties={}\r\n\t\t# may change method sig of Node since we can always combine arguments\r\n\t\t# here\r\n\t\tnode = Node(self._nextid, properties, **kwargs)\r\n\t\tself._nodes[self._nextid] = node\r\n\t\tself._nextid += 1\r\n\t\treturn node", "def _add_node(self, parent, model, relation, reverse, related_name,\n accessor_name, nullable, depth):\n # Reverse relationships\n if reverse and '+' in related_name:\n return\n\n node_hash = self._nodes.get(model, None)\n\n # don't add node if a path with a shorter depth exists. this is applied\n # after the correct join has been determined. generally if a route is\n # defined for relation, this will never be an issue since there would\n # only be one path available. if a route is not defined, the shorter\n # path will be found\n if not node_hash or node_hash['depth'] > depth:\n if node_hash:\n node_hash['parent'].remove_child(model)\n\n node = ModelTreeNode(model, parent, relation, reverse,\n related_name, accessor_name, nullable, depth)\n\n self._nodes[model] = {\n 'parent': parent,\n 'depth': depth,\n 'node': node,\n }\n\n node = self._find_relations(node, depth)\n parent.children.append(node)", "def add_node_to_mptt(self, key, tree_tag, parent_node=None):\n try:\n (key, eq_tree_tag_slug) = key.split('>')\n except:\n eq_tree_tag_slug = None\n\n if eq_tree_tag_slug:\n # add a node with null place reference,\n # and non-null equivalent_to reference\n equivalent_node = ClassificationTreeNode.objects.get(\n place__slug=key, tag__slug=eq_tree_tag_slug\n )\n n = ClassificationTreeNode(\n tag=tree_tag,\n equivalent_to=equivalent_node\n )\n else:\n # add a standard node with non-null place reference\n try:\n place = Place.objects.get(slug=key)\n except:\n raise Exception(\"Place with slug {0} must exist\".format(\n key\n ))\n n = ClassificationTreeNode(\n place=place, tag=tree_tag,\n )\n\n ClassificationTreeNode.objects.insert_node(n, parent_node)\n n.save()\n return n", "def add_node(nodeL, nodeR, city):\n new_node = Node(city)\n new_node.set_right(nodeR)\n new_node.set_left(nodeL)\n node_count[0] = node_count[0] + 1\n return new_node", "def _create_new(self, key):\n return AVLTreeNode(key)", "def add(self, data):\n root_copy= self\n while (root_copy.right):\n root_copy = root_copy.right\n new_node = DoublyLinkedList(data, root_copy, None)\n root_copy.right = new_node\n return new_node", "def add_node(self, node) -> None:\n\t\tnode.nested = True\n\t\tsuper(Node, self).add_node(node)", "def do_add_node(self, line=''):\n self.fibbing.add_node()", "def addNode(self, arg):\n if type(arg) is str:\n node = self.findNode(arg)\n if node is None:\n node = self.createNode(arg)\n elif isinstance(arg, Node):\n node = arg\n else:\n raise ValueError(\"Illegal node specification\")\n self._nodes[node.getName()] = node\n return node", "def add_node(graph, node_name, label, shape='record', style='filled', fillcolor='lightgrey'):\n node = Node(name=node_name, shape=shape, style=style, fillcolor=fillcolor, label=label)\n graph.add_node(node)\n return node", "def add_node(self, nnode, value):\n new_node = Node(nnode, value)\n self.vert_dict[nnode] = new_node\n return new_node", "def append_node(self, new_data):\n\n #create a new node and put new data.\n new_node = Node(new_data)\n\n if self.head is None:\n self.head = new_node\n return\n\n end = self.head\n while end.next:\n end = end.next\n\n end.next = new_node", "def __init__(self):\r\n super(AppendNode, self).__init__()", "def create_new_node(subgraph, prev_node, label, bb):\n return add_node(subgraph, update_node_name(prev_node.get_name(), bb-1), label=update_bb_string(label, bb-1))", "def addNode(self, appendIt=False, nodeId=None, childId=None, sublist=None, label=''):\n node = super().addNode(appendIt=appendIt, nodeId=nodeId, childId=childId,\n sublist=sublist, label=label)\n self.save(node)\n if self.atHead():\n self.saveHeadId(node.nodeId)\n return node", "def add_node(self, **kwargs):\n self._content.append(Node(**kwargs))", "def add_node(self, node) -> None:\n\t\tnode.nested = True\n\t\tsuper(__class__, self).add_node(node)", "def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node", "def newnode(self, name=None, num_states=0):\n # (const char* name, int num_states, net_bn* net)\n if num_states == 0:\n print(\"Warning: Set the number of states when using newnode() \" +\n \"or adding discrete levels won't work.\")\n\n cnetica.NewNode_bn.argtypes = [c_char_p, c_int, c_void_p]\n cnetica.NewNode_bn.restype = c_void_p\n return cnetica.NewNode_bn(ccharp(name), num_states, self.net)", "def add_node(self, node: Node):\n prop_str = \",\\n\".join([\"n.%s = '%s'\" % (k, v) for k, v in node.data.items()])\n query = \"\"\"\n MERGE (n:%s {id: '%s'})\n SET %s\n \"\"\" % (\n node.labels,\n norm_id(node.db_ns, node.db_id),\n prop_str,\n )\n return self.create_tx(query)", "def save_node(self):\n # save node in path2node\n if self.full_path in self.file.path2node:\n print \"** Error, created node with path twice:\\n%s\" % self.full_path\n traceback.print_stack()\n sys.exit(1)\n self.file.path2node[self.full_path] = self \n # save node in id_lookups\n id = self.sdef['id']\n ns = self.sdef['ns']\n type = self.sdef['type']\n custom = 'custom' in self.sdef and self.sdef['custom']\n if self.parent is None and self.sdef['df'] and not custom:\n # structure (not custom) created at top level, save in id_lookups\n if id not in self.file.id_lookups[ns]:\n print \"** Error: Unable to find id '%s' in id_lookups when saving node\" % id\n traceback.print_stack()\n sys.exit(1)\n if self.path not in self.file.id_lookups[ns][id]:\n print (\"** Error: Unable to find path '%s' in id_lookups when\"\n \" saving node %s\") % (self.path, id)\n print \"self.sdef['df'] is:\"\n pp.pprint (self.sdef['df'])\n traceback.print_stack()\n sys.exit(1)\n self.file.id_lookups[ns][id][self.path]['created'].append(self)\n # save node in all_nodes, either at top level (if no parent) or inside\n # mstats structure of parent node\n if self.parent is None:\n if self.path in self.file.all_nodes:\n self.file.all_nodes[self.path].append(self)\n else:\n self.file.all_nodes[self.path] = [self, ]\n else:\n if id not in self.parent.mstats:\n if custom:\n # custom node created, add id to mstats of parent\n self.parent.mstats[id] = { 'df': {}, 'type':type, 'ns': ns,\n 'created': [ self, ], 'qty':'?' }\n else:\n print \"** Error: Unable to find key '%s' in parent mstats\" % id\n print \"self.parent.mstats is\"\n pp.pprint (self.parent.mstats)\n traceback.print_stack()\n sys.exit(1)\n else: \n # append node to parent created mstats \n self.parent.mstats[id]['created'].append(self)", "def add_node(self, parent_node, new_node_name, attributes={}, position=0):\n for key in attributes:\n attributes[key] = format(attributes[key])\n\n if position == -1:\n count_children = len(list(parent_node))\n position = count_children\n\n new_node = ET.Element(new_node_name, attributes)\n parent_node.insert(position, new_node)\n\n return new_node", "def addNode(self, new_data):\r\n curr = self.head\r\n\r\n # Add new Node\r\n if curr is None:\r\n n = Node(new_data) \r\n self.head = n\r\n return\r\n \r\n # Sort Nodes \r\n if curr.data > new_data:\r\n n = Node(new_data) \r\n n.next = curr\r\n self.head = n\r\n return\r\n\r\n while curr.next is not None:\r\n if curr.next.data > new_data:\r\n break\r\n curr = curr.next\r\n n = Node(new_data) \r\n n.next = curr.next\r\n curr.next = n\r\n return", "def to_model(self):\r\n node = Node.objects.get_or_create(\r\n name=self.name,\r\n description=self.description\r\n )[0]\r\n \r\n return node", "def push(self, new_data):\n new_node = Node(new_data)\n new_node.next = self.head\n self.head = new_node", "def createNode(self):\n\n for i in range(self.nobjects):\n # Create a new array\n self.fileh.create_array('/', 'array' + str(i), self.a1)\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print(\"Time for Undo, Redo (createNode):\", undo, \"s, \", redo, \"s\")", "def add_node(self, name, node):\n self.nodes.setdefault(name, node)", "def add_node(self, node_data):\n self.__rtags.append(True)\n self.__nodedata.append(data)\n self.__ltags.append(True)", "def createNode(*args, name: AnyStr=\"\", parent: AnyStr=\"\", shared: bool=True, skipSelect:\n bool=True, **kwargs)->AnyStr:\n pass", "def addNode(self, val):\n\t\tnode = self.createNode(val)\n\t\tif self.head is None:\n\t\t\tself.head = node\n\t\t\treturn node\n\t\tcur = self.head\n\t\twhile cur.getNext() is not None:\n\t\t\tcur = cur.getNext()\n\t\tcur.setNext(node)\n\t\treturn node", "def push(self, new_data):\n new_node = Node(new_data)\n new_node.next = self.head\n self.head = new_node", "def node_create(self, parent, path):\n\n q = (\"insert into nodes (parent, path) \"\n \"values (?, ?)\")\n props = (parent, path)\n return self.execute(q, props).lastrowid", "def add_new_node(self):\n\n\n new_node = str(self.form.newnode_text.toPlainText())\n if not new_node:\n self.form.newnode_text.clear()\n self.show_dialog(\"Empty argument.\")\n return\n \n self.form.newnode_text.clear()\n \n if self.G.has_node(new_node):\n self.show_dialog(f\"{new_node} is already constructed.\")\n \n else:\n self.G.add_node(new_node)\n self.form.plot_canvas.plot(self.G)", "def tree(self) -> Node:\n return Node(self.to_string())", "def add_child(self, node):\n if self is node:\n parent_id = \"\"\n _nodeid=\"N_\"+str(0)\n else:\n if not issubclass(node.__class__, Node):\n raise TypeError(\"{}.add_child: arg «node»=«{}», type {} not valid.\".format(self.__class__.__name__, node, type(node)))\n self.childs.append(node)\n node.parent = self\n parent_id = self.TV.selection()[0]\n _nodeid=\"N_\"+str(self.node_count)\n # parent = self.rootnode.get_node_by_id(parent_id)\n # if parent is None:\n # return None\n\n # print(\"self.TV.insert node._nodeid\", node._nodeid)\n # print(\"self.TV.insert node.data\", node.data)\n \n self.TV.insert(parent_id, 'end', _nodeid, text=node.name)\n\n # parent_id = self.TreeView.selection()[0]\n # node_name = askstring(\"New Child\", prompt=\"Enter the node name\", initialvalue=\"\")\n # if not node_name:\n # node_name = \"no-name-node\"\n # # self.TV.insert(item, 'end', 'LC_'+str(self.TVleafref), \n # # text='Load case '+str(self.TVleafref))\n # #self.node_count += 1\n \n # self.TreeView.insert(parent_id, 'end', self._nodeid, text=self.name)\n\n return node", "def addtree(self, dct) -> None:\n namelst = dct['name'].split('\\\\')\n # print('nlst {}'.format(namelst))\n n_n = self\n for curname in namelst:\n nextlevel = n_n.child_dct.get(curname, None)\n if nextlevel is None:\n nextlevel = n_n.child_dct[curname] = LocNode(curname)\n n_n = nextlevel\n n_n.setval(dct)", "def deserialize(self, data):\n data = data.split(\",\")\n # print(data)\n self.idx = 0\n \n def dfs():\n if data[self.idx] == 'N':\n self.idx += 1\n return None\n node = TreeNode(int(data[self.idx]))\n self.idx += 1\n node.left = dfs()\n node.right = dfs()\n return node\n return dfs()", "def create_node_tree(self, node_tree):\n # allow it to accept both a list or dict\n if isinstance(node_tree, list):\n created_root_nodes = []\n for item in node_tree:\n created_root_nodes.append(\n self.create_node_tree(item)\n )\n return created_root_nodes\n\n node_type = node_tree['type']\n\n self.comp.Lock()\n node = self.comp.AddTool(node_type)\n self.comp.Unlock()\n\n # attributes\n if 'attr' in node_tree:\n attributes = node_tree['attr']\n for key in attributes:\n value = attributes[key]\n if isinstance(value, dict):\n new_node = self.create_node_tree(value)\n node.Input = new_node\n else:\n node.SetAttrs({key: value})\n\n # input lists\n if 'input_list' in node_tree:\n input_list = node_tree['input_list']\n for key in input_list:\n node_input_list = node.GetInputList()\n for input_entry_key in node_input_list.keys():\n input_entry = node_input_list[input_entry_key]\n input_id = input_entry.GetAttrs()['INPS_ID']\n if input_id == key:\n value = input_list[key]\n input_entry[0] = value\n break\n\n # ref_id\n if 'ref_id' in node_tree:\n node.SetData('ref_id', node_tree['ref_id'])\n\n # connected to\n if 'connected_to' in node_tree:\n connected_to = node_tree['connected_to']\n if 'Input' in connected_to:\n input_node = self.create_node_tree(connected_to['Input'])\n node.Input = input_node\n elif 'ref_id' in node_tree['connected_to']:\n ref_id = node_tree['connected_to']['ref_id']\n print('ref_id: %s' % ref_id)\n # find a node with ref_id equals to ref_id that is given in the\n # node tree\n all_nodes = self.comp.GetToolList().values()\n for r_node in all_nodes:\n node_ref_id = r_node.GetData('ref_id')\n print('node_ref_id: %s' % node_ref_id)\n if node_ref_id == ref_id:\n node.Input = r_node\n break\n\n return node", "def add(self, inp, out):\n self.curr_node.input_frequencies[inp] += 1\n if inp not in self.curr_node.children.keys() or out not in self.curr_node.children[inp].keys():\n node = Node(out)\n self.curr_node.children[inp][out] = node\n\n self.curr_node = self.curr_node.children[inp][out]\n self.curr_node.frequency += 1", "def _to_node(self, data):\n return Node(\n id = data['ps'],\n name = data['ps'],\n state = NodeState.UNKNOWN,\n public_ip = [data['ip']],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'current_size' : data['memory_mb'],\n 'account_id' : data['account_id'],\n 'type' : data['type']\n }\n )", "def _create_copy(self, node, direction):\n new_node = copy.copy(node)\n new_node.set_uri(direction)\n new_location = get_location(direction)\n # Insert into db \n self.sm.create_node(new_node.tostring(), direction, NODETYPES[node.TYPE], location = new_location)\n return new_location", "def add_node(self, node: Node) -> None:\n with scandir(node.path) as it:\n for entry in it:\n if entry.name.startswith('.') or entry.name.startswith('__'):\n continue\n if entry.is_dir():\n if len(node.children) > 50:\n pass\n else:\n node.children.append(Node(node, entry))\n else:\n node.files.append(entry)\n for child in node.children:\n self.add_node(child)\n if child.depth > self.depth:\n self.depth = child.depth", "def add_child(self, state):\n child = RRT.Node(state=state, parent=self)\n self.children.append(child)\n return child", "def _add_node(self, node_entry, idx):\n if node_entry[\"op\"].name not in relay_to_onnx_op_mapping:\n raise NotImplementedError(\n f\"Currently the operator '{node_entry['op'].name}' is \" \"not supported.\"\n )\n converter = relay_to_onnx_op_mapping[node_entry[\"op\"].name]()\n\n return converter.convert(node_entry, self._mc, self._node_dict)", "def createnode(node, content):\n for c in content:\n if isinstance(c, SpanNode):\n # Sub-node\n node.nodelist.append(c)\n c.pnode = node\n elif c[0] == 'span':\n node.eduspan = (c[1], c[2])\n elif c[0] == 'relation':\n node.relation = c[1]\n elif c[0] == 'leaf':\n node.eduspan = (c[1], c[1])\n node.nucspan = (c[1], c[1])\n node.nucedu = c[1]\n elif c[0] == 'text':\n node.text = c[1]\n else:\n raise ValueError(\"Unrecognized property: {}\".format(c[0]))\n return node", "def add_node(self, node):\n\n # Add node only if it does not exist yet\n if node.id() in self.__nodes:\n return\n\n labels = node.labels()\n for label in labels:\n break\n\n if label not in self.__labels:\n self.__labels[label] = len(self.__labels)\n\n js = \"nodes.push({index: \" + str(node.id()) + \", \" +\\\n \"name: \\\"\" + str(node.id()) + \"\\\", \" +\\\n \"group: \" + str(self.__labels[label]) + \\\n \" });\"\n\n d3_node_id = self.frame.evaluateJavaScript(js) - 1\n self.__nodes[node.id()] = str(d3_node_id)\n logger.info(\"node id %s - > d3 id: %s\", node.id(), d3_node_id)", "def add(self, h, name, parent=None, action=None, observation=None,\n particle=None, budget=None, cost=None):\n history = h[:]\n\n # instantiate node\n if action is not None:\n n = ActionNode(self.counter, name, history, parent=parent, action_index=action, cost=cost)\n else:\n n = BeliefNode(self.counter, name, history, parent=parent, obs_index=observation, budget=budget)\n\n if particle is not None:\n n.add_particle(particle)\n\n # add the node to belief tree\n self.nodes[n.id] = n\n self.counter += 1\n\n # register node as parent's child\n if parent is not None:\n parent.add_child(n)\n return n", "def AddNode(self, node):\n self.nodes.append(node)\n return node", "def add_node(parent_name, child_name, node):\n if node.name == parent_name:\n return node.add(Node(child_name))\n else:\n for child in node.children:\n add_node(parent_name, child_name, child)", "def add_node(self, node_class: rc.Node, location: Tuple[int, int] = (0, 0), **kwargs) -> rc.Node:\n ryven_node = self.script.flow.create_node(node_class, data=kwargs)\n x, y = location\n self.flow_view.node_items[ryven_node].setX(x)\n self.flow_view.node_items[ryven_node].setY(y)\n data = ryven_node.complete_data(ryven_node.data())\n\n super().add_node(key=ryven_node.GLOBAL_ID, attr_dict={\"ryven_data\": data})\n return ryven_node", "def add_node(self, new_node: 'GraphNode'):\n self.operator.add_node(new_node)", "def make_node(self, node_property):\n # Try except because Ubigraph is old as hell!\n try: n = self.G.new_vertex()\n except: pass\n for prop, val in node_property.items():\n try: self.G.set_vertex_attribute(n, prop, val)\n except: return make_node(node_property)\n return n", "def buildTree(self,newick):\n\t\tfor i in range(len(newick)):\n\t\t\tif newick[i] == \"(\":\n\t\t\t\tself.currNode.children.append(node.node(self.currNode))\n\t\t\t\tself.currNode=self.currNode.children[0]\n\t\t\t#polytomy support enabled\n\t\t\telif newick[i] == \",\":\n\t\t\t\tself.currNode=self.currNode.parent\n\t\t\t\tself.currNode.children.append(node.node(self.currNode))\n\t\t\t\tself.currNode=self.currNode.children[-1]\n\t\t\telif newick[i] == \")\":\n\t\t\t\tself.currNode=self.currNode.parent\n\t\t\telse:\n\t\t\t\tself.currNode.info+=newick[i]", "def create_node(self, node_class, *args, **kwds):\n assert isinstance(node_class, str)\n cls = nodelist.all_nodes[node_class]\n node = cls(*args, **kwds)\n self.add_node(node)\n return node", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def add_node(graph, node, parent, label):\n neg = node['neg']\n pos = node['pos']\n total = str(neg + pos)\n neg = str(neg)\n pos = str(pos)\n samples_info = total + ' samples\\n' + neg + ' of class 0, ' + pos + ' of class 1'\n if 'final_class' in node:\n legend = str(node['id']) + '. final class is ' + str(node['final_class'])\n new_node = pydot.Node(legend)\n else:\n legend = str(node['id']) + '. ' + node['split_attr'] + \\\n ' < ' + str(node['split_value']) + '\\n' + samples_info\n new_node = pydot.Node(legend)\n graph.add_node(new_node)\n if parent:\n graph.add_edge(pydot.Edge(parent, new_node, label=str(label),labelfontcolor=\"#009933\", fontsize=\"10.0\", color=\"blue\"))\n if 'left_child' in node:\n add_node(graph, node['left_child'], new_node, True)\n if 'right_child' in node:\n add_node(graph, node['right_child'], new_node, False)", "def __init__(self):\n self.root = Node('')", "def __init__(self):\n self.root = Node(\"\")", "def __init__(self):\n self.root = Node(\"\")", "def addNode( self, n, **attr ):\n self._G.add_node(n, attr)", "def create_node(name, node_type):\n if node_type in NODE_REGISTRY:\n return Node(name, NODE_REGISTRY[node_type])\n raise TypeError('The specified node type \\'%s\\' could not be found within imagegen.' % node_type)", "def create_node(self, name, img, size):\n if self._create_needed(name):\n node = self._driver.create_node(\n name=name,\n image=self.get_ami_by_id(img),\n size=self.get_size_by_id(size))\n self._driver.wait_until_running([node])\n return node\n else:\n logger.debug(\"node %s already created, no action\" % name)\n return None" ]
[ "0.69235027", "0.68231606", "0.6610997", "0.66002446", "0.6579251", "0.6456872", "0.6415634", "0.6410044", "0.63237524", "0.63021314", "0.62788886", "0.6263254", "0.6262798", "0.62442696", "0.6242336", "0.62385815", "0.62191033", "0.6210085", "0.62071997", "0.61693835", "0.6157834", "0.6150335", "0.6136132", "0.61325586", "0.6129944", "0.61206603", "0.6118609", "0.61124414", "0.6108114", "0.6105767", "0.6089306", "0.60847783", "0.6083147", "0.6062471", "0.6039787", "0.60312533", "0.6019529", "0.6007155", "0.59990907", "0.5985071", "0.5979165", "0.5967598", "0.59657705", "0.5964508", "0.59547305", "0.59496635", "0.59443927", "0.59435785", "0.59431297", "0.5932957", "0.58940864", "0.58734363", "0.5863229", "0.5856945", "0.5856551", "0.58361745", "0.58350307", "0.5829982", "0.58292043", "0.5822091", "0.5808569", "0.57992727", "0.57842034", "0.5781701", "0.5759564", "0.5749266", "0.57437366", "0.5741894", "0.57378477", "0.5737223", "0.5726377", "0.5723971", "0.5722073", "0.5718729", "0.5717422", "0.5713264", "0.57075423", "0.5703914", "0.5703914", "0.56958395", "0.5692631", "0.56882364", "0.56875324", "0.56870997", "0.5682469", "0.5675536", "0.5674606", "0.56715083", "0.56710017", "0.566602", "0.56619126", "0.56603307", "0.5631927", "0.5622371", "0.56197757", "0.5615785", "0.56134254", "0.56134254", "0.56044734", "0.56037", "0.56004846" ]
0.0
-1
Perform a DFS of the tree and collects reads that pass similarity test.
def query(self, query: str) -> List[str]: nodes_to_explore: Deque[Node] = deque() nodes_to_explore.append(self.root) out: List[str] = [] while nodes_to_explore: current = nodes_to_explore.popleft() total_kmers_found = 0 total_kmers = 0 for kmer in kmers_in_string(query, self.k): if current.filter.contains(kmer): total_kmers_found += 1 total_kmers += 1 if total_kmers_found >= self.theta * total_kmers: for child in current.children: nodes_to_explore.append(child) if current.num_children() == 0: out.append(current.dataset_id) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_scan_recursive(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount + 1)", "def _analyze(self, node, visited = set([])):\n for ch in self._get_children(node):\n if ch not in visited:\n visited.add(ch)\n # In this first sweep I want to gather data\n # like the number of core types\n # How many values are present?\n if type(ch) == pfp.fields.Dom or \\\n self._base_name(ch) == 'Struct':\n pass\n elif self._base_name(ch) == 'Array':\n self.nr_array_types += 1\n else:\n self.nr_core_types += 1\n\n self._analyze(ch, visited)", "def _dfs_for_nearest(self, root, dist):\n if dist > self.best_dist:\n return\n if root.element:\n self._register_best_element(dist, root.element)\n return\n for child in root.edges.values():\n for c,next_child in child.edges.items(): \n self._dfs_for_nearest(next_child, dist + c*c)", "def dfs( self ):\n\n #print self.state; \n #print self.visited;\n SearchProblem.stateVisited= SearchProblem.stateVisited+1 \n \n if self.stop: # check class variable and stop searching...\n return;\n\n for action in self.edges(): # consider each edge leading out of this node\n\n action.destination.path = self.path + str(action.label); \n # get the label associated with the\n # action and append it to the path\n # string\n\n action.destination.visited = self.visited.copy();\n # make copy of source node's visited set\n # and use it as destination node's\n # visited set\n\n action.destination.visited.add( repr(action.destination.state) );\n\n if action.destination.is_target(): \n # check if destination of edge is target node\n action.destination.target_found(); # perform target found action\n if not self.continue_search(): # stop searching if not required\n SearchProblem.stop = True; # set class variable to record that we\n break; # are done\n\n if repr(action.destination.state) in self.visited:\n continue; # skip if we've visited this one before\n\n action.destination.dfs(); # resume recursive search ", "def dfs(self):\n return self.__dfs(self.document, self.root)", "def _measure_distance_single(seppresults, err=sys.stderr, verbose=True):\n if verbose:\n err.write('read tree...')\n tree = TreeNode.read(StringIO(seppresults))\n if verbose:\n err.write('OK: ')\n results = []\n treesize = tree.count(tips=True)\n for j, fragment in enumerate(tree.tips()):\n if fragment.name.startswith('seqIDs:'):\n # seqIDs:2789969,2491172,4462991,4456388;\n # otuIDs:4462991;\n # num_pointmutations:6;\n # num_non-representative-seqs:3;\n # only_repr._sequences:False\n seq_data = {}\n for field in fragment.name.split(';'):\n kv = field.split(':')\n if kv[0] in ['seqIDs', 'otuIDs']:\n seq_data[kv[0]] = list(map(str, kv[1].split(',')))\n else:\n seq_data[kv[0]] = kv[1]\n trueOTUids = seq_data['otuIDs']\n\n # metric 'lca':\n try:\n node_lca = tree.lca(trueOTUids)\n try:\n dist_lca = node_lca.distance(fragment)\n except NoLengthError:\n dist_lca = np.nan\n except MissingNodeError:\n dist_lca = np.nan\n\n # metric 'closest':\n dists = []\n for trueOTU in trueOTUids:\n try:\n dists.append(tree.find(trueOTU).distance(fragment))\n except NoLengthError:\n dists.append(np.nan)\n except MissingNodeError:\n dists.append(np.nan)\n dist_closest = min(dists)\n\n seq_data['distance_lca'] = dist_lca\n seq_data['distance_closest'] = dist_closest\n results.append(seq_data)\n if verbose:\n if j % max(1, int(treesize/100)) == 0:\n err.write('.')\n if verbose:\n err.write(' done.\\n')\n return pd.DataFrame(results)", "def run(self):\n for i in range(len(self.edges)):\n if self.tracks[i] == -1:\n self._dfs(i)\n self.cnt += 1\n return self.tracks", "def dfs(node):\n nonlocal ans\n if not node: return []\n if node.left is node.right is None: return [0]\n left,right = dfs(node.left), dfs(node.right)\n ans += sum(2 + x + y <= distance for x in left for y in right)\n return [1 + x for x in left + right]", "def dfs(node: TreeNode):\n if not node:\n return\n helper(node, 0, sum)\n dfs(node.left)\n dfs(node.right)", "def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)", "def dfs(g):\n global time\n time = 0\n\n for v in g:\n v.discovery = 0\n v.finish_time = 0\n v.color = 'white'\n\n for v in g:\n if v.color == 'white':\n dfs_visit(v)", "def compare_all_levels(self, weights=_BEST_WEIGHTS, save=False, validate=False):\n\n # Generate distances between each relevant levels pair\n scores = []\n for left in range(251):\n if left in self.labels:\n for right in range(left, 251):\n if right in self.labels:\n scores.append(self.compare_vectors(left, right, weights))\n\n # Create the dataframe\n scores_df = pd.DataFrame(scores, columns=[\"left\", \"right\", \"cosine\"])\n\n # Normalize cosine scores\n distances = scores_df[\"cosine\"].values\n distances = (((distances + 1) * (4 - 1)) / 2) + 1\n scores_df[\"cosine\"] = distances\n\n # Sort the dataframe\n scores_df.sort_values(\"cosine\", inplace=True, ascending=False)\n\n # Save the csv\n if save:\n self.save_df(scores_df)\n\n # Validate results\n if validate:\n self.validate_results(scores_df)\n\n return scores_df", "def test_filtered_scan(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount, ext=\".txt\")", "def testTreeF(node, test):\n total = len(test)\n success = 0\n for d in test:\n i = searchTreeF(node, d)\n if i == d[-1]:\n success += 1\n return success / total", "def _dfs(self, i):\n self.tracks[i] = self.cnt\n for j in self.edges[i]:\n if self.tracks[j] == -1:\n self._dfs(j)", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def eval_tree(tree: GPTree, dataset: Iterable) -> list:\n results = []\n for data in zip(*dataset):\n try:\n output = tree.compute_tree(data[0])\n results.append(\n 0 if output == data[1] else 1\n ) # right or wrong, but no error.\n except Exception:\n results.append(2) # Fails to run.\n\n return results", "def dfs(node, traversal):\n if traversal.terminated: return\n\n g = traversal.graph\n node_key = g.key_func(node)\n traversal.node_state[node_key] = DISCOVERED\n traversal.entry_times[node_key] = traversal.curr_time\n traversal.curr_time += 1\n\n if traversal.should_process_node(node) is not False:\n # Now go through all children\n children = list(traversal.select_children(node, reverse = True))\n # print \"Node, Children: \", g.key_func(node), children\n for n,edge in children:\n child_key = g.key_func(n)\n if traversal.node_state[child_key] != None:\n traversal.process_edge(node, n, edge)\n else: # Node has not even been discovered yet\n traversal.parents[child_key] = node\n traversal.process_edge(node, n, edge)\n dfs(n, traversal)\n\n traversal.node_state[node_key] = PROCESSED\n traversal.curr_time += 1\n traversal.exit_times[node_key] = traversal.curr_time\n traversal.node_processed(node)", "def compare_functional_loci(con):\n cur = con.cursor()\n sql = \"select dnds_testid, fscore_testid from Compare_DNDS_Fscores\"\n cur.execute(sql)\n x = cur.fetchall()\n\n outl = \"\"\n for ii in x:\n \"\"\"For each test\"\"\"\n dnds_testid = int(ii[0])\n fscore_testid = int(ii[1])\n\n \"\"\"Get the context -- which ancestors? alignment method? phylo model? etc...\"\"\"\n almethod = None\n phylomodel = None\n anc1id = None\n anc2id = None\n sql = \"select almethod, phylomodel, anc1, anc2 from DNDS_Tests where id=\" + \\\n dnds_testid.__str__()\n cur.execute(sql)\n x = cur.fetchall()\n if x.__len__() > 1:\n write_error(\n con, \"There are multiple entires for the DNDS test \" + dnds_testid.__str__())\n exit()\n almethod = int(x[0][0])\n phylomodel = int(x[0][1])\n anc1id = int(x[0][2])\n anc2id = int(x[0][3])\n\n site_anc1mlstate = {}\n site_anc1mlpp = {}\n site_anc2mlstate = {}\n site_anc2mlpp = {}\n\n tups = get_site_ml(con, anc1id, skip_indels=False)\n for site in tups:\n state = tups[site][0]\n pp = tups[site][1]\n site_anc1mlstate[site] = state\n site_anc1mlpp[site] = pp\n tups = get_site_ml(con, anc2id, skip_indels=False)\n for site in tups:\n state = tups[site][0]\n pp = tups[site][1]\n site_anc2mlstate[site] = state\n site_anc2mlpp[site] = pp\n\n \"\"\"Get scores for sites.\"\"\"\n site_nebppcat2 = {}\n site_nebppcat3 = {}\n site_nebppcat4 = {}\n site_nebsigflag = {}\n site_nebmut = {}\n\n site_bebppcat2 = {}\n site_bebppcat3 = {}\n site_bebppcat4 = {}\n site_bebsigflag = {}\n site_bebmut = {}\n\n site_df = {}\n site_k = {}\n site_p = {}\n\n sql = \"select site, ppcat1, ppcat2, ppcat3, ppcat4, ancmu, significant from NEB_scores where testid=\" + dnds_testid.__str__()\n cur.execute(sql)\n qq = cur.fetchall()\n for jj in qq:\n site = jj[0]\n site_nebppcat2[site] = jj[2]\n site_nebppcat3[site] = jj[3]\n site_nebppcat4[site] = jj[4]\n site_nebmut[site] = jj[5]\n site_nebsigflag[site] = jj[6]\n\n sql = \"select site, ppcat1, ppcat2, ppcat3, ppcat4, ancmu, significant from BEB_scores where testid=\" + dnds_testid.__str__()\n cur.execute(sql)\n qq = cur.fetchall()\n for jj in qq:\n site = jj[0]\n site_bebppcat2[site] = jj[2]\n site_bebppcat3[site] = jj[3]\n site_bebppcat4[site] = jj[4]\n site_bebmut[site] = jj[5]\n site_bebsigflag[site] = jj[6]\n\n sql = \"select site, df, k, p from FScore_Sites where testid=\" + fscore_testid.__str__()\n cur.execute(sql)\n qq = cur.fetchall()\n for jj in qq:\n site = jj[0]\n site_df[site] = jj[1]\n site_k[site] = jj[2]\n site_p[site] = jj[3]\n\n \"\"\"Resolve differences between NEB and BEB\"\"\"\n for s in site_nebppcat2:\n if s not in site_bebppcat2:\n site_bebppcat2[s] = None\n site_bebppcat3[s] = None\n site_bebppcat4[s] = None\n site_bebmut[s] = None\n site_bebsigflag[s] = None\n for s in site_bebppcat2:\n if s not in site_nebppcat2:\n site_nebppcat2[s] = None\n site_nebppcat3[s] = None\n site_nebppcat4[s] = None\n site_nebmut[s] = None\n site_nebsigflag[s] = None\n\n dnds_sites = site_nebppcat2.keys()\n df_sites = site_df.keys()\n sites = []\n for s in dnds_sites:\n if s in df_sites:\n sites.append(s)\n\n print \"\\n. \" + sites.__len__().__str__() + \" sites have scores for both Df and dN/dS.\"\n print \"\\n. \" + (dnds_sites.__len__() - sites.__len__()).__str__() + \" do not match.\"\n sites.sort()\n for s in sites:\n line = dnds_testid.__str__()\n line += \"\\t\" + fscore_testid.__str__()\n line += \"\\t\" + anc1id.__str__()\n line += \"\\t\" + anc2id.__str__()\n\n line += \"\\t\" + s.__str__()\n line += \"\\t\" + site_nebppcat2[s].__str__()\n line += \"\\t\" + site_nebppcat3[s].__str__()\n line += \"\\t\" + site_nebppcat4[s].__str__()\n line += \"\\t\" + site_nebmut[s].__str__()\n line += \"\\t\" + site_nebsigflag[s].__str__()\n\n line += \"\\t\" + site_bebppcat2[s].__str__()\n line += \"\\t\" + site_bebppcat3[s].__str__()\n line += \"\\t\" + site_bebppcat4[s].__str__()\n line += \"\\t\" + site_bebmut[s].__str__()\n line += \"\\t\" + site_bebsigflag[s].__str__()\n\n line += \"\\t\" + site_df[s].__str__()\n line += \"\\t\" + site_k[s].__str__()\n line += \"\\t\" + site_p[s].__str__()\n\n line += \"\\t\" + site_anc1mlstate[s]\n line += \"\\t\" + site_anc1mlpp[s].__str__()\n line += \"\\t\" + site_anc2mlstate[s]\n line += \"\\t\" + site_anc2mlpp[s].__str__()\n\n outl += line + \"\\n\"\n\n if outl.__len__() > 1:\n fout = open(\"compare_dnds_Df.txt\", \"w\")\n fout.write(outl)\n fout.close()", "def traverseTree(mdsnode,dead_branches=False,depth=float('Nan'),current_depth=0,noisy=False,strict=False,tags=False):\n tagdict={}\n if isinstance(mdsnode,mds.tree.Tree): \n mdsnode=mdsnode.getNode(\"\\\\TOP\")\n \n name = get_mds_shortname(mdsnode) \n me = Branch(mdsnode)#put node information here if you like\n if noisy: print (\" \"*current_depth + name)\n\n #Members are data/signals, put them directly the current Node object\n #if they are arrays\n if mdsnode.getNumMembers()>0:\n leaves=mdsnode.getMembers()\n for leaf in leaves:\n leafname=get_mds_shortname(leaf)\n leafshape=get_mds_shape(leaf)\n if dead_branches or not len(leafshape) ==0:\n if noisy: print (\" \"*(current_depth+1) + leafname +\": array%s\"%str(leafshape))\n setattr(me,leafname,Leaf(leaf,strict))\n tagdict[leafname]=getattr(me,leafname)\n else:\n if noisy: print(\" \"*(current_depth+1) + leafname)\n #Children contain no immediate data, just links to more nodes. If depth is\n #not beyond limit, go down these 'branches' and add contents to the current\n #Node object\n if not depth <= current_depth and mdsnode.getNumChildren()>0:\n branches = mdsnode.getChildren()\n for b in branches:\n subname,subnode,subtags=traverseTree(b, dead_branches,depth,current_depth+1,noisy,strict)\n if len(subnode.__getDescendants__())>0:\n setattr(me,subname,subnode)\n tagdict[subname]=getattr(me,subname)\n for k,v in subtags.items(): #merge tags in\n tagdict[k]=v\n \n if current_depth==0:#we are done, returning to user\n if tags: \n for tag,obj in tagdict.items():\n setattr(me,tag,obj)\n else:\n tagbranch=Branch(mdsnode)\n for tag,obj in tagdict.items():\n setattr(tagbranch,tag,obj)\n setattr(me,'tags',tagbranch) \n return me\n return (name, me,tagdict) #else, we are still recursing back down the tree", "def _mutate_file(self, node, visited = set([])):\n for ch in self._get_children(node):\n\n if ch not in visited:\n visited.add(ch)\n\n try:\n self._mutate_node(ch)\n except Exception as e:\n print(e)\n\n # Recursion is a bitch\n self._mutate_file(ch, visited)", "def test_run(self, *args, **kwargs):\n\n self.b_relativeDir = True\n d_test = self.tree_process(\n inputReadCallback = self.inputReadCallback,\n analysisCallback = self.inputAnalyzeCallback,\n outputWriteCallback = self.outputSaveCallback,\n persistAnalysisResults = False\n )\n return d_test", "def assemble(self):\n\n # Calculate overlaps between each pair of reads.\n\n for r1, r2 in combinations(self.reads, 2):\n self.calculate_overlap(r1, r2)\n\n # If there are equal reads, they overlap too\n\n for read in self.reads:\n if self.reads[read].visit_limit > 1:\n self.reads[read].overlaps[read] = 0\n\n # Find the read to start the DFS algorithm,\n # The good candidate is a read that can't be glued\n # to any other read on the right side.\n\n start_candidates = self.reads.copy()\n\n for read in self.reads:\n r = self.reads[read]\n for other_read in r.overlaps:\n if other_read in start_candidates:\n del start_candidates[other_read]\n\n if len(start_candidates):\n for read in start_candidates:\n if len(self.reads[read].overlaps):\n self.find_path(1, read)\n break\n else:\n\n # If there no good candidates where to start\n # the DFS algorithm, try each node.\n\n for read in self.reads:\n if len(self.reads[read].overlaps):\n self.find_path(1, read)\n if len(self.path) == self.num_reads:\n break\n\n # Assemble the original sequence:\n # start from the first node in the path,\n # glue subsequent reads, according to how\n # much they are supposed to protrude.\n\n self.sequence = self.path[0]\n\n if len(self.path) > 1:\n for i in range(len(self.path)-1):\n r = self.reads[self.path[i]]\n overlap = r.overlaps[self.path[i+1]]\n if overlap > 0:\n self.sequence += self.path[i+1][-overlap:]\n elif overlap < 0:\n self.sequence = self.sequence[:overlap]", "def test_dfs():\r\n assert DFS(valid_graph, sorted(list(valid_graph.get_graph().nodes))[0]) == \\\r\n list(nx.dfs_preorder_nodes(valid_graph.get_graph(), sorted(list(valid_graph.get_graph().nodes))[0]))", "def dfs(graph, root, method='dfs', max_depth=10000):\n \n # Get node object from node ID\n root = graph.getnodes(root)\n \n # Define the search method\n stack_pop = -1\n if method == 'bfs':\n stack_pop = 0\n \n visited = []\n stack = [root.nid]\n depth = 0\n \n while stack or depth == max_depth:\n node = stack.pop(stack_pop)\n \n if node not in visited:\n visited.append(node)\n stack.extend(\n [x for x in node_neighbors(graph, node) if x not in visited])\n depth += 1\n \n return visited", "def __walk_tree(self):\n for root, dirnames, files in os.walk(self.path, topdown=True):\n self.dirCount += 1\n # Create a tuple with the file size, the file name and the files inode (for tracking hard links).\n files = [\n (os.lstat(os.path.join(root, fi)).st_size, os.path.join(root, fi), os.lstat(os.path.join(root, fi)).st_ino) for fi\n in files if (os.lstat(os.path.join(root, fi)).st_size > self.size)]\n self.fileList.extend(files)\n if len(self.excludeList) > 0:\n dirnames[:] = [dir for dir in dirnames if dir not in self.excludeList]\n if not self.cross_mount_points:\n dirnames[:] = [dir for dir in dirnames if not os.path.ismount(os.path.join(root, dir))]", "def _dfs(node, cb, parent=None):\n parent = cb(node, parent)\n for child in _children(node):\n _dfs(child, cb, parent)", "def dfs_visit(self, node):\n super(MutantGenerator, self).generic_visit(node)", "def process_samples(self, itr, paths):\n samples_data = dict()\n critic_rewards = self.critic.critique(itr, paths)\n for level in self.hierarchy:\n samples_data[level.depth] = level.process_samples(itr, paths, critic_rewards)\n return samples_data", "def test_find_path_dfs():\n g = Graph()\n node_1 = Node({'A':['B','C']})\n g.add(node_1)\n node_2 = Node({'B':['C','D']})\n g.add(node_2)\n node_3 = Node({'C':['D']})\n g.add(node_3)\n node_4 = Node({'D':['C']})\n g.add(node_4)\n node_5 = Node({'E':['C']})\n g.add(node_5)\n\n # zero path between node_1 and node_5\n path_0 = g.find_path_dfs(node_1, node_5)\n assert path_0 == None\n\n # only one path between node_5 and node_4\n path_1 = g.find_path_dfs(node_5, node_4)\n assert [ node.name for node in path_1 ] == [ node_5.name, node_3.name, node_4.name ]\n\n # three paths between node_1 and node_3, verify anyone of the three is returned\n path_3 = g.find_path_dfs(node_1, node_3)\n assert [ node.name for node in path_3 ] == [ node_1.name, node_2.name, node_3.name ] or \\\n [ node.name for node in path_3 ] == [ node_1.name, node_2.name, node_4.name, node_3.name ] or \\\n [ node.name for node in path_3 ] == [ node_1.name, node_3.name ]", "def Trees__LCA_LowestCommonDenominator():\n # Python2 ported to Python3 via 2to3-3.7\n # URL:# URL:https://www.hackerrank.com/challenges/binary-search-tree-lowest-common-ancestor/problem\n '''\n class Node:\n def __init__(self,info): \n self.info = info \n self.left = None \n self.right = None \n // this is a node of the tree , which contains info as data, left , right\n '''\n def lca(root, v1, v2):\n # Find a and b. Link child nodes to parent to be able to backtrack.\n # (1) Note, we add 'parent' attribute to node dynamically via node.parent = ...\n root.parent = None\n node_stack = []\n node_stack.append(root)\n v1_node, v2_node = None, None\n while node_stack:\n node = node_stack.pop()\n if not v1_node and node.info == v1:\n v1_node = node\n if not v2_node and node.info == v2:\n v2_node = node\n for child_node in [node.left, node.right]:\n if child_node:\n child_node.parent = node # (1)\n node_stack.append(child_node)\n\n # Generate path from A to root.\n curr = v1_node\n a_to_root = set()\n while curr:\n a_to_root.add(curr.info)\n curr = curr.parent\n\n # traverse up b until you come across an element in a's path to parent.\n curr = v2_node\n while curr:\n if curr.info in a_to_root:\n return curr\n else:\n curr = curr.parent\n\n print(\"Shouldn't be here, Something went wrong\")\n\n # # Recursive. (Iterative is better, but did recursive for practice.) ~15 min.\n # # Main idea is that we count the number of v1/v2's found of the subnodes.\n # # If a node has sum of 2, we know it's the lca.\n # def lca(root, v1, v2):\n # def lca_helper(node):\n # ret_node = None\n # if not node:\n # return 0, None\n # v_match_counter = 0\n # if node.info in [v1, v2]:\n # v_match_counter += 1\n # left_count, left_node_ret = lca_helper(node.left)\n # right_count, right_node_ret = lca_helper(node.right)\n # v_match_counter += left_count + right_count\n # if v_match_counter == 2:\n # ret_node = node\n # if left_node_ret:\n # ret_node = left_node_ret\n # if right_node_ret:\n # ret_node = right_node_ret\n # return v_match_counter, ret_node\n\n # _, node = lca_helper(root)\n # return node", "def sample(self, root, tree, sample_num, for_d):\n\n # all_score = self.sess.run(self.generator.all_score)\n # all_score is a matrix with shape [n_node, n_node]\n all_score = self.generator.all_score\n samples = []\n paths = []\n n = 0\n\n while len(samples) < sample_num:\n current_node = root\n previous_node = -1\n paths.append([])\n is_root = True\n paths[n].append(current_node)\n while True:\n node_neighbor = tree[current_node][1:] if is_root else tree[current_node]\n # print(\"////\", tree[current_node])\n is_root = False\n if len(node_neighbor) == 0: # the tree only has a root\n return None, None\n if for_d: # skip 1-hop nodes (positive samples)\n if node_neighbor == [root]:\n # in current version, None is returned for simplicity\n return None, None\n if root in node_neighbor:\n node_neighbor.remove(root)\n\n # we retrieve embeddings corresponding to current node's neighbors\n # the multiply of g_v with shape (1, 50) and g_vi with shape(1, 50) is a scala\n # to calculate the multiply of g_v and g_vi: we calculate the \"multiplication\" (inner product) between embedding_matrix with shape(n_node, 50) and its transpose\n # then saved the result in self.score with shape (n_node, n_node) in dis_torch.py\n # all_score has the shape = (5254, 5254), each row is a list of scala, each scala is the \"multiplication\" (inner product) between a particular node to an other node in the graph\n # due to for each current_node, we have a list of its neighbors, saved in [node_neighbor]\n # we can retrieve a list of scalas that equal to the \"multiplications\" (inner product) between g_v(current node) to its neighbor g_vi\n # to do that, we have:\n relevance_probability = all_score[current_node][node_neighbor]\n\n # convert tensor to numpy array\n relevance_probability = relevance_probability.cpu().detach().numpy()\n\n # finally, applying softmax function, we get the relevance probability of current_node and its neighbors, as formed in the paper\n relevance_probability = utils.softmax(relevance_probability)\n \n # pick a random node from its neighbors based on relevance_probability\n next_node = np.random.choice(node_neighbor, size=1, p=relevance_probability)[0] # select next node\n # print(\"???\", next_node)\n paths[n].append(next_node)\n if next_node == previous_node: # terminating condition\n samples.append(current_node)\n break\n previous_node = current_node\n current_node = next_node\n n = n + 1 # n equal to sample_num\n return samples, paths # for each sample, we get one path from root to that sample", "def iter_dfs(self, depth=0):\n yield self, depth\n yield from self.left.iter_dfs(depth=depth + 1)\n yield from self.right.iter_dfs(depth=depth + 1)", "def compare_tree(self):\n result = []\n \n pathA = os.path.join(self.testpath,'A')\n pathB = os.path.join(self.testpath,'B')\n\n filesA = [os.path.relpath(f,pathA) for f in self.tree(pathA)]\n filesB = [os.path.relpath(f,pathB) for f in self.tree(pathB)]\n\n filesAB = set(filesA).union(filesB)\n for fileAB in sorted(list(filesAB)):\n\n fileA = os.path.join(self.testpath,'A',fileAB)\n fileB = os.path.join(self.testpath,'B',fileAB)\n try:\n fileAtxt = open(fileA).read()\n except IOError:\n result.append( ('missing_inA',fileAB) )\n continue\n \n try:\n fileBtxt = open(fileB).read()\n except IOError:\n result.append( ('missing_inB',fileAB) )\n continue\n\n if not fileAtxt == fileBtxt:\n result.append( ('disagree',fileAB))\n \n return result", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def idfs(start_node, goal_state, improved_descendants = False):\t\n\tnumber_nodes_expanded = 0\n\tt0 = time.time()\n\n\tfor lim in range(21): #from depth 0 to 20\n\t\tsolution, number_nodes_expanded_iter = dfs(start_node, goal_state, lim, iterative= True, improved_descendants= improved_descendants)\n\t\tnumber_nodes_expanded += number_nodes_expanded_iter\n\n\t\tt1 = time.time()\n\t\tif (t1 - t0) > 900:\n\t\t\tprint(\"It took more than 15 min\")\n\t\t\treturn False\n\n\t\tif solution:\n\t\t\tprint(\"Expanded nodes: \" + str(number_nodes_expanded))\n\t\t\treturn True\n\t\t\n\treturn False", "def DFS(self, nDepth, treenode, state):\n \n visited = []\n visited.insert(0, (state, treenode))\n \n for index in range(0, nDepth-1): \n actions = self.priorProb(state)\n treenode.expansion(actions)\n treenode.updateU_value(actions)\n treenode, action = treenode.selection() \n state = state.do_move(action).copy()\n visited.insert(0, (state, treenode)) \n \n for index in range(0, len(visited)-1): \n if(visited[index][1].isLeaf() == True):\n value = self.leafEvaluation(visited[index][0])\n else: \n value = visited[index][1].backUp(value)\n visited[-1][1].updateQ_value(value)\n visited[-1][1].updateVisits()\n return visited[-1][1]", "def test_tree_search(self) -> None:\n class DumbModel(GymNet):\n count: int = 0\n\n def initial_inference(self, observations: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray, float]:\n s, pi, v = super().initial_inference(observations)\n self.count += 1\n return np.ones_like(s) * self.count, np.array([6/8 - 1e-8, 2/8 + 1e-8]), 0\n\n def recurrent_inference(self, latent_state: np.ndarray, action: int) -> typing.Tuple[float, np.ndarray]:\n r, s, pi, v = super().recurrent_inference(latent_state, action)\n self.count += 1\n return 0, np.ones_like(latent_state) * self.count, np.array([6/8 - 1e-8, 2/8 + 1e-8]), action\n\n memory_net = self.net\n memory_search = self.mcts\n\n # Swap class variables\n self.net = DumbModel(self.g, self.config.net_args)\n self.mcts = MuZeroMCTS(self.g, self.net, self.config.args)\n\n # No discounting and no exploration to ensure deterministic behaviour.\n self.config.args.gamma = 1\n self.config.args.exploration_fraction = 0\n\n # Experiment 1\n self.config.args.numMCTSSims = 4\n pi_1, v_1 = self.mcts.runMCTS(np.zeros(4), np.ones(2))\n np.testing.assert_array_almost_equal(pi_1, [1/2, 1/2])\n np.testing.assert_almost_equal(v_1, 1/4)\n self.mcts.clear_tree()\n\n # Experiment 2\n self.config.args.numMCTSSims = 8\n pi_2, v_2 = self.mcts.runMCTS(np.zeros(4), np.ones(2))\n np.testing.assert_array_almost_equal(pi_2, [5/8, 3/8])\n np.testing.assert_almost_equal(v_2, 1/4)\n self.mcts.clear_tree()\n\n # Undo class variables swap\n self.net = memory_net\n self.mcts = memory_search", "def traverse_tree(file, tree):\n\n\tfor node in tree.get_children():\n\t\tpass", "def test_tree_collection_read_write_file(self):\n def eval_klass(coll):\n coll.writeToFile('sample.trees')\n read = LoadTrees('sample.trees')\n self.assertTrue(type(read) == type(coll))\n \n eval_klass(LogLikelihoodScoredTreeCollection(self.scored_trees))\n \n # convert lnL into p\n eval_klass(WeightedTreeCollection([(exp(s), t) \n for s,t in self.scored_trees]))\n remove_files(['sample.trees'], error_on_missing=False)", "def _explore(self, node, visited, skip_father=None):\n if node in visited:\n return\n\n visited = visited + [node]\n\n fathers_context = AbstractState()\n fathers_context.merge_fathers(node, skip_father, self)\n\n # Exclude path that dont bring further information\n if node in self.visited_all_paths:\n if self.visited_all_paths[node].does_not_bring_new_info(fathers_context):\n return\n else:\n self.visited_all_paths[node] = AbstractState()\n\n self.visited_all_paths[node].add(fathers_context)\n\n node.context[self.KEY] = fathers_context\n\n contains_call = fathers_context.analyze_node(node, self)\n node.context[self.KEY] = fathers_context\n\n sons = node.sons\n if contains_call and node.type in [NodeType.IF, NodeType.IFLOOP]:\n if _filter_if(node):\n son = sons[0]\n self._explore(son, visited, node)\n sons = sons[1:]\n else:\n son = sons[1]\n self._explore(son, visited, node)\n sons = [sons[0]]\n\n for son in sons:\n self._explore(son, visited)", "def main():\n with read_std_files(OUT_FILE) as (qrys_file, docs_file, out_file):\n doc_count, token_count, word_map = map_docs(docs_file)\n avg_doc_len = token_count / float(doc_count)\n for doc_id, doc_tokens in tokenize(docs_file):\n doc_len = len(doc_tokens)\n doc_dct = dictify(doc_tokens)\n for query_id, query_tokens in tokenize(qrys_file):\n query_dct = dictify(query_tokens)\n similarity = tfidf(query_dct, doc_dct, doc_len, doc_count, avg_doc_len, word_map)\n log(out_file, query_id, doc_id, similarity)", "def buildTreePandas(rows, res, min_ppl = None, maxDepth=None, scoref=entropy, depth=0):\n minimum_ppl = deepcopy(min_ppl)\n num_ppl = len(rows)\n \n if min_ppl is not None and num_ppl <= min_ppl:\n #Extra protection to stop the recursion\n return decisionNode(results=__uniqueCountsPandas(rows, res)) \n if num_ppl==0: \n return decisionNode( )\n newDepth = depth + 1\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth):\n #print \"Hooray I got here.\"\n return decisionNode(results=__uniqueCountsPandas(rows, res))\n current_score=scoref(rows, resCol = res)\n # Set up some variables to track the best criteria\n best_gain=0.0\n best_criteria=None\n best_sets=None\n \n featColumns=rows.columns.tolist()\n featColumns.remove(res)\n for col in featColumns:\n # Generate the list of different values in\n # this column\n column_values=rows.loc[:,col].unique()\n # Now try dividing the rows up for each value\n # in this column\n copy = rows.sort(columns = col)\n for value in column_values:\n (set1,set2)=__dividePandas(copy,col,value)\n # Information gain\n p=float(len(set1))/len(rows)\n gain=current_score-p*scoref(set1, resCol = res)-(1-p)*scoref(set2, resCol = res)\n size_min = 0 if minimum_ppl is None else minimum_ppl - 1\n if gain>best_gain and len(set1)>size_min and len(set2)>size_min:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Create the subbranches\n if best_gain>0:\n trueBranch=buildTreePandas(best_sets[0], res, min_ppl = minimum_ppl, maxDepth = maxDepth, depth=newDepth)\n falseBranch=buildTreePandas(best_sets[1], res, min_ppl = minimum_ppl, maxDepth = maxDepth, depth=newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCountsPandas(rows, res))", "def _analyze(self):\n for _, self.subdirs, files in os.walk(self.path):\n if self.p.sort:\n self.subdirs.sort()\n files.sort()\n for f in files:\n self._analyze_file(fileextlow(f), f)\n break # stop walk() from entering subdirectories\n\n self.p.nr_dirs += 1\n if self.lossless or self.compressed or self.videos:\n if self.lossless or self.compressed:\n if not self.images:\n if self.p.warn_covers:\n print(f\"{W}{self.path}{R}: no cover file\")\n self.p.nr_no_cover += 1\n elif not have_valid_cover_name(self.images):\n if self.p.warn_covers:\n print(f\"{W}{self.path}{R}: wrong cover names\")\n self.p.nr_wrong_cover_name += 1\n if self.lossless:\n if self.compressed:\n self.p.nr_mixed_lossless_compressed += 1\n else:\n self.p.nr_lossless_dirs += 1\n\n if self.cue:\n if not self.lossless:\n if self.p.warn_cue:\n print(f\"{W}{self.path}{R}: cue but no lossless files\")\n self.p.nr_lossy_cue += 1\n elif not self.compressed:\n if len(self.cue) == 1:\n self.p.nr_cue += 1\n else:\n if self.p.warn_cue:\n print(f\"{W}{self.path}{R}: {len(self.cue)} cue files\")\n self.p.nr_multiple_cue += 1\n\n self.p.nr_media_dirs += 1\n self.p.nr_lossless += len(self.lossless)\n self.p.nr_compressed += len(self.compressed)\n self.p.nr_video_files += len(self.videos)\n self.p.nr_ignored += self.ignored\n self.p.unknown.update(self.unknown)\n else:\n if self.images and not self.subdirs:\n self.p.nr_only_images += 1\n else:\n self.p.nr_no_media_dirs += 1", "def process_tree_nodes(self):\n self.leaves, self.internal = set(), set()\n _is_cladogram = True\n for node in self.nodes:\n if not node._been_processed:\n if not node.name:\n node.name = node.id\n elif self._remove_name_quotes and (node.name[0] == node.name[-1] == \"'\" or node.name[0] == node.name[-1] == '\"'):\n node.name = node.name[1:-1].strip()\n if node.branch != '' and node.branch != None:\n node.branch = float(node.branch)\n _is_cladogram = False\n else:\n node.branch = 0.0\n if not node.children:\n self.leaves.add(node)\n else:\n self.internal.add(node)\n if not node._been_processed and node.support:\n try:\n node.support = float(node.support)\n if not node.support_type:\n node.support_type = self._support_label\n except ValueError:\n if not node.comment:\n node.comment = node.support\n node.support = None\n if self._is_cladogram == None:\n self._is_cladogram = _is_cladogram\n self.node_names = {}\n for node in self.nodes:\n if node != self.root:\n if self._is_cladogram:\n node.branch = self._cladogram_branch\n if node.name in self.node_names:\n i = 2\n name = '{}_{}'.format(node.name, i)\n while name in self.node_names:\n i += 1\n name = '{}_{}'.format(node.name, i)\n if verbose:\n print('Warning: non-unique node \"{}\" was renamed to \"{}\"'.format(node.name, name))\n node.name = name\n self.node_names[node.name] = node\n node._been_processed = True\n self.calculate_paths()", "def loop_nonThreaded():\n nonlocal index, total\n nonlocal d_tree\n nonlocal fn_inputReadCallback\n nonlocal fn_analysisCallback\n nonlocal fn_outputWriteCallback\n nonlocal dret_inputSet\n nonlocal dret_analyze\n nonlocal dret_outputSet\n nonlocal str_desc\n\n b_analyzeStatusHist: bool = False\n b_inputStatusHist: bool = False\n b_outputStatusHist: bool = False\n\n if int(self.verbosityLevel) and self.toConsole():\n iterator = tqdm( self.d_inputTree.items(),\n desc = str_desc)\n else:\n iterator = self.d_inputTree.items()\n\n for path, data in iterator:\n dret_inputSet = {}\n dret_analyze = {}\n dret_outputSet = {}\n # Read (is sometimes skipped) / Analyze / Write (also sometimes skipped)\n if fn_inputReadCallback:\n dret_inputSet = inputSet_read(path, data)\n try:\n b_inputStatusHist = b_inputStatusHist or dret_inputSet['status']\n except:\n pass\n if fn_analysisCallback:\n try:\n dret_analyze = analysis_do(path, d_tree[path], index)\n except:\n dret_analyze['status'] = False\n self.dp.qprint(\"Analysis failed\", comms = 'error')\n try:\n b_analyzeStatusHist = b_analyzeStatusHist or dret_analyze['status']\n except:\n pass\n if fn_outputWriteCallback:\n if 'status' in dret_analyze.keys():\n if dret_analyze['status']:\n dret_outputSet = outputSet_write(path, d_tree[path])\n try:\n b_outputStatusHist = b_outputStatusHist or dret_outputSet['status']\n except:\n pass\n index += 1\n dret_inputSet['status'] = b_inputStatusHist\n dret_analyze['status'] = b_analyzeStatusHist\n dret_outputSet['status'] = b_outputStatusHist\n tree_removeDeadBranches()", "def mcts_search(self, state):\n assert state.current_player() == self.player\n root = SearchNode(None, 1)\n for _ in range(self.max_simulations):\n visit_path, working_state = self._apply_tree_policy(root, state)\n if working_state.is_terminal():\n node_value = working_state.player_return(self.player)\n else:\n node_value = self.evaluator.evaluate(\n working_state, self.player, self._random_state)\n\n for node in visit_path:\n node.total_reward += node_value * node.player_sign\n node.explore_count += 1\n\n most_visited = root.most_visited_child()\n\n if self.verbose:\n print(\"Root:\", root.to_str())\n print(\"Children:\")\n print(root.children_str(working_state))\n print(\"Children of chosen:\")\n chosen_state = state.clone()\n chosen_state.apply_action(most_visited.action)\n print(most_visited.children_str(chosen_state))\n\n return most_visited.action", "def test5(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('O[CH-][CH2-]','O[CH-][C-]=O')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,1)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,1)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def test4(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('CCC','OCC','OCC=O','OCCO','CCCC','OC=O','CC(O)C')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,0)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,0)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def spdfs(self, match):\n\n if self.flag == 1:\n print match\n # print \"Match : \", match\n\n if self.center == 0 and self.left == 0 and self.right == 0:\n return\n\n if self.center != 0:\n self.center.spdfs(match + self.center.ch)\n\n if self.right != 0:\n self.right.spdfs(match[:-1] + self.right.ch)\n\n if self.left != 0:\n self.left.spdfs(match[:-1]+self.left.ch)", "def legitimate_mark_changes(self, verbose=False):\n if self._undirected:\n raise ValueError('Only defined for DMAGs')\n\n disc_paths = self.discriminating_paths()\n\n mark_changes_dir = set()\n for i, j in self._directed:\n if verbose: print(f'{i}->{j} => {i}<->{j} ?')\n # FIRST CONDITIONS\n parents_condition = all(self.has_directed(parent, j) for parent in self._parents[i])\n if not parents_condition:\n if verbose: print('Failed parents condition')\n continue\n spouses_condition = all(self.has_any_edge(spouse, j) for spouse in self._spouses[i])\n if not spouses_condition:\n if verbose: print('Failed spouses condition')\n continue\n\n # SECOND CONDITION\n disc_paths_for_i = [path for path in disc_paths.keys() if path[-2] == i]\n disc_paths_condition = all(path[-1] != j for path in disc_paths_for_i)\n if not disc_paths_condition:\n if verbose: print('Failed discriminating path condition')\n continue\n\n # FINAL CONDITION\n if i in self.ancestors_of(j, exclude_arcs={(i, j)}):\n if verbose: print('Failed ancestral condition')\n continue\n\n if verbose: print('Passed')\n mark_changes_dir.add((i, j))\n\n mark_changes_bidir = set()\n for i, j in self._bidirected | set(map(reversed, self._bidirected)):\n if verbose: print(f'{i}<->{j} => {i}->{j} ?')\n # FIRST CONDITIONS\n parents_condition = all(self.has_directed(parent, j) for parent in self._parents[i])\n if not parents_condition:\n if verbose: print('Failed parents condition')\n continue\n spouses_condition = all(self.has_any_edge(spouse, j) for spouse in self._spouses[i] if spouse != j)\n if not spouses_condition:\n if verbose: print('Failed spouses condition')\n continue\n\n # SECOND CONDITION\n disc_paths_for_i = [path for path in disc_paths.keys() if path[-2] == i]\n disc_paths_condition = all(path[-1] != j for path in disc_paths_for_i)\n if not disc_paths_condition:\n if verbose: print('Failed discriminating path condition')\n continue\n\n # FINAL CONDITION\n if i in self.ancestors_of(j):\n if verbose: print('Failed ancestral condition')\n continue\n\n if verbose: print('Passed')\n mark_changes_bidir.add((i, j))\n\n return mark_changes_dir, mark_changes_bidir", "def calcSccs(N):\n pass2N = []\n numNodes = len(N)\t\t\t# for sanity check\n \n if Globals.debug: \n\tprint('DEBUG list after loading')\n\tprintNodeList(N)\n\t \n if Globals.debug: print('BEGIN 1ST PASS: order by finishing times')\n \n while len(N) != 0:\t\t\t# first pass to get finishing times, outer loop to make sure catch all nodes\n\tn = N.pop()\n\tif Globals.debug: printNode(n)\n\t\n\tif not n.visitedThisPass:\n\t #dfsFirstPassRevNotRecursive(n, pass2N)\n\t dfsFirstPassRev(n, N, pass2N)\t# start the dfs for this particular node, N will lose other nodes in here, they're added in order to pass2N\n \n if len(pass2N) != numNodes:\n\traise ValueError('node counts differ from after first pass, before', numNodes, 'after', len(pass2N))\n \n if Globals.debug: print('BEGIN 2ND PASS: make sccs')\n \n for n in pass2N: \t\t\t# reset visited flags\n\tn.visitedThisPass = False\n \n leaderSets = []\n while len(pass2N) != 0:\t\t# second pass to get sets, outer loop to make sure catch all nodes\n\tn = pass2N.pop()\n\tif n.visitedThisPass:\n\t continue\n\t\n\tif Globals.debug: printNode(n)\n\tleaderSet = [n]\t\t\t# each starting node is a new leader and the first in its set\n\tleaderSets.append(leaderSet)\n\tdfsSecondPass(n, pass2N, leaderSet)\n\t#dfsSecondPassNotRecursive(n, pass2N, leaderSet)\n \n print('RESULTS: sccs')\n \n leaderSets.sort(cmp=bySize)\t\t# sort the results by size\n \n results = ''\n for i in range (0, 5):\n\tif len(leaderSets) > i:\n\t ls = leaderSets[i]\n\t results = results + str(len(ls))\n\t if Globals.debug: \n\t\totherNodes = ''\n\t\tfor nn in ls[1:]: otherNodes = otherNodes + ', ' + str(nn.label)\n\t\tprint('set leader node', ls[0].label, 'size', len(ls), 'scc', otherNodes)\n\telse:\n\t results = results + '0'\n\t\n\tif i < 4:\n\t results = results + ','\n\t \n print(results)", "def get_stats(sents, **dummy_args):\n from collections import Counter\n statsmode_results = Counter() \n # first, put the relevant trees into temp file\n\n for sent in sents:\n statsmode_results['Sentences'] += 1\n deps = get_deps(sent, dep_type)\n numpass = len([x for x in deps.links if x.type.endswith('pass')])\n statsmode_results['Passives'] += numpass\n statsmode_results['Tokens'] += len(sent.tokens)\n words = [w.word for w in sent.tokens if w.word is not None and w.word.isalnum()]\n statsmode_results['Words'] += len(words)\n statsmode_results['Characters'] += len(''.join(words))\n\n to_open = '\\n'.join(s.parse_string.strip() for s in sents)\n\n from corpkit.dictionaries.process_types import processes\n from corpkit.other import as_regex\n tregex_qs = {'Imperative': r'ROOT < (/(S|SBAR)/ < (VP !< VBD !< VBG !$ NP !$ SBAR < NP !$-- S !$-- VP !$ VP)) !<< (/\\?/ !< __) !<<- /-R.B-/ !<<, /(?i)^(-l.b-|hi|hey|hello|oh|wow|thank|thankyou|thanks|welcome)$/',\n 'Open interrogative': r'ROOT < SBARQ <<- (/\\?/ !< __)', \n 'Closed interrogative': r'ROOT ( < (SQ < (NP $+ VP)) << (/\\?/ !< __) | < (/(S|SBAR)/ < (VP $+ NP)) <<- (/\\?/ !< __))',\n 'Unmodalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP !< MD)))',\n 'Modalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP < MD)))',\n 'Open class': r'/^(NN|JJ|VB|RB)/ < __',\n 'Closed class': r'__ !< __ !> /^(NN|JJ|VB|RB)/',\n 'Clauses': r'/^S/ < __',\n 'Interrogative': r'ROOT << (/\\?/ !< __)',\n 'Mental processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \\\n as_regex(processes.mental, boundaries='w'),\n 'Verbal processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \\\n as_regex(processes.verbal, boundaries='w'),\n 'Relational processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \\\n as_regex(processes.relational, boundaries='w'),\n 'Verbless clause': r'/^S/ !<< /^VB.?/'}\n\n for name, q in sorted(tregex_qs.items()):\n res = tregex_engine(query=q, \n options=['-o', '-C'], \n corpus=to_open, \n root=root\n )\n statsmode_results[name] += int(res)\n if root:\n root.update()\n return statsmode_results, []", "def run(args):\n # Load the pedigree file\n print('Parsing pedigree...', file=sys.stderr)\n pedigree = Pedigree.parse(args.input_ped)\n for member in pedigree.members:\n print(member, file=sys.stderr)\n # load the HLA calls for each donor\n print('Loading HLA calls...', file=sys.stderr)\n all_calls = {}\n calls = {}\n for donor, fcalls in zip(args.donor_name, args.donor_calls):\n print(donor, fcalls.name, file=sys.stderr)\n all_calls[donor] = []\n calls[donor] = {'A': [], 'B': [], 'C': []}\n for hla_type in sorted(HLAType.parse(line.strip())\n for line in fcalls):\n calls[donor][hla_type.gene_name].append(hla_type)\n all_calls[donor].append(hla_type)\n print('Checking for consistency...', file=sys.stderr)\n # get IDs of parents\n index_member = pedigree.by_name[args.index_donor]\n father = index_member.father\n mother = index_member.mother\n # check for 4 digit consistency\n mm4 = check_consistency(4, calls[args.index_donor], calls.get(father),\n calls.get(mother))\n # check for 2 digit consistency\n mm2 = check_consistency(2, calls[args.index_donor], calls.get(father),\n calls.get(mother))\n num_parents = len({father, mother} - {'0'})\n # print result line\n print('\\t'.join(map(str, [\n args.index_donor, num_parents, mm2 or 'OK', mm4 or 'OK'\n ])))", "def compare_trees(tree1, tree2):\n \tresponse = {}\n \tstart_time = time.time()\n \ttry:\t\n \t\ttns = dendropy.TaxonNamespace() \t\n \t\n \t\ttree_obj1 = dendropy.Tree.get(data=tree1, schema=\"newick\",taxon_namespace=tns)\n \t\ttree_obj2 = dendropy.Tree.get(data=tree2, schema=\"newick\",taxon_namespace=tns)\n\n \t\ttree_obj1.encode_bipartitions()\n \t\ttree_obj2.encode_bipartitions()\n\n \t\t#-----------------------------------------------------------\n \t\t#This method returns the symmetric distance between two trees. \n \t\t#The symmetric distance between two trees is the sum of the number of splits found in one of the trees but not the other. \n \t\t#It is common to see this statistic called the Robinson-Foulds distance\n\n \t\tareSame = True if treecompare.symmetric_difference(tree_obj1, tree_obj2) == 0 else False\n \t\tstatus = 200\n \t\tmessage = \"Success\"\n \t\tresponse['are_same_tree'] = areSame\n \n \texcept Exception, e:\n \t\tif \"Incomplete or improperly-terminated tree statement\" in str(e): #invalid: \"((A,B),C,D));\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderIncompleteTreeStatementError: \" + str(e)\n \t \t\tstatus = 400\n \t\telif \"Unbalanced parentheses at tree statement\" in str(e): #invalid: \"((A,B),(C,D);\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderMalformedStatementError: \"+str(e) \n \t \t\tstatus = 400\n \t\telif \"Multiple occurrences of the same taxa\" in str(e): #invalid: \"((A,B),(C,C));\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"NewickReaderDuplicateTaxonError: \"+str(e)\n \t \t\tstatus = 400\n \t\telif \"Unexpected end of stream\" in str(e): # invalid: \"((A,B),(C,D))\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"UnexpectedEndOfStreamError: \"+str(e)\n \t \t\tstatus = 400\n \t\telse:\n \t\t\tmessage = \"Error: Failed to compare trees. \"+str(e)\n \t \t\tstatus = 500\n \t \t\n \tresponse['status_code'] = status\n \tresponse['message'] = message\n\n \tend_time = time.time()\n \texecution_time = end_time-start_time\n #service result creation time\n \tcreation_time = datetime.datetime.now().isoformat()\n \tmeta_data = {'creation_time': creation_time, 'execution_time': float('{:4.2f}'.format(execution_time)), 'source_urls':[\"http://dendropy.org/library/treecompare.html#module-dendropy.calculate.treecompare\"] }\n\n \tresponse['meta_data'] = meta_data\n \tprint response\n \treturn response", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Stack()\n return GraphSearch(problem, 'dfs').search(fringe)", "def _dfs_assign(self, filetree):\n stack = [filetree]\n while stack:\n node = stack.pop()\n if isinstance(node, tuple) and node[0][\"packmode\"] is None:\n # all children have been seen already, assing packmode\n node = node[0] # unpack the actual node\n weights = defaultdict(int)\n for child in node[\"children\"].values():\n weights[child[\"packmode\"]] += child[\"weight\"]\n packmode, weight = max(weights.items(), key=lambda x: x[1])\n node[\"weight\"] = weight\n node[\"packmode\"] = packmode\n elif node[\"children\"]:\n # schedule that node for computation\n stack.append((node,))\n # visit all children first\n for child in node[\"children\"].values():\n stack.append(child)", "def __deep_count_errors(node, testSet, res):\n if node.results is not None: #Check if this node is a leaf node\n return __count_errors(node, testSet, res) #If so, return the test set classification errors made by this node.\n else:\n tbSet = testSet[testSet[node.col] >= node.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[node.col] < node.value] #find which test observations belong to this tree's false branch\n \n if node.tb.results is None: #Check if the true branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term1 = __deep_count_errors(node.tb, tbSet, res)\n else: #If the true branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term1 = __count_errors(node.tb, tbSet,res)\n if node.fb.results is None: #Check if the false branch is a branch node\n #If so, get the count of all misclassifications made by this branch's descendent leaf nodes on the test observations\n term2 = __deep_count_errors(node.fb, fbSet, res)\n else: #If the false branch is a leaf node, return the count of all test set classification errors made by the leaf.\n term2 = __count_errors(node.fb, fbSet, res) \n return term1 + term2 #Sum the classification errors made by this nodes descendant leaves.", "def crawl(root):\n global emptyindex\n sizes = {}\n inodes = []\n\n def crawl_thread(root, top, depth, maxdepth, sizes, inodes):\n global total_doc_count\n global scan_paths\n thread = current_thread().name\n\n crawl_start = time.time()\n docs = []\n with crawl_thread_lock:\n scan_paths.append(top)\n logger.debug('[{0}] starting crawl {1} (depth {2}, maxdepth {3})...'.format(thread, top, depth, maxdepth))\n if options.verbose or options.vverbose:\n logger.info('[{0}] starting crawl {1} (depth {2}, maxdepth {3})...'.format(thread, top, depth, maxdepth))\n size, size_du, file_count, dir_count = get_tree_size(thread, root, top, top, docs, sizes, inodes, depth, maxdepth)\n doc_count = len(docs)\n if doc_count > 0:\n start_bulk_upload(thread, root, docs)\n with crawl_thread_lock:\n total_doc_count[root] += doc_count\n docs.clear()\n # Add sizes of subdir to root dir \n if depth > 0:\n with crawl_thread_lock:\n sizes[top] = {\n 'size': size,\n 'size_du': size_du,\n 'file_count': file_count,\n 'dir_count': dir_count\n }\n if size > 0:\n with crawl_thread_lock:\n sizes[root]['size'] += sizes[top]['size']\n sizes[root]['size_du'] += sizes[top]['size_du']\n sizes[root]['dir_count'] += sizes[top]['dir_count']\n sizes[root]['file_count'] += sizes[top]['file_count']\n \n crawl_time = get_time(time.time() - crawl_start)\n logger.info('[{0}] finished crawling {1} ({2} dirs, {3} files, {4}) in {5}'.format(\n thread, top, dir_count, file_count, convert_size(size), crawl_time))\n with crawl_thread_lock:\n scan_paths.remove(top)\n\n\n scandir_walk_start = time.time()\n\n # find all subdirs at level 1\n subdir_list = []\n for entry in os.scandir(root):\n if entry.is_symlink():\n pass\n elif entry.is_dir() and not dir_excluded(entry.path):\n subdir_list.append(entry.path)\n if len(subdir_list) > 0:\n logger.info('found {0} subdirs at level 1, starting threads...'.format(len(subdir_list)))\n else:\n logger.info('found 0 subdirs at level 1')\n \n with futures.ThreadPoolExecutor(max_workers=maxthreads) as executor:\n # Set up thread to crawl rootdir (not recursive)\n future = executor.submit(crawl_thread, root, root, 0, 0, sizes, inodes)\n try:\n data = future.result()\n except Exception as e:\n logmsg = 'FATAL ERROR: an exception has occurred: {0}'.format(e)\n logger.critical(logmsg, exc_info=1)\n if logtofile: logger_warn.critical(logmsg, exc_info=1)\n close_app_critical_error()\n \n # Set up threads to crawl (recursive) from each of the level 1 subdirs\n futures_subdir = {executor.submit(crawl_thread, root, subdir, 1, options.maxdepth, sizes, inodes): subdir for subdir in subdir_list}\n for future in futures.as_completed(futures_subdir):\n try:\n data = future.result()\n except Exception as e: \n logmsg = 'FATAL ERROR: an exception has occurred: {0}'.format(e)\n logger.critical(logmsg, exc_info=1)\n if logtofile: logger_warn.critical(logmsg, exc_info=1)\n close_app_critical_error()\n\n scandir_walk_time = time.time() - scandir_walk_start\n end_time = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S\")\n \n # check if directory is empty or all files/dirs excluded\n if not root in sizes:\n emptyindex = True\n logger.info('*** finished walking {0} ***'.format(root))\n logger.info('*** directory is empty or all files/dirs excluded ***')\n # delete index if no file/dir docs in index\n es.indices.refresh(options.index)\n res = es.count(index=options.index, body={'query':{'query_string':{'query':'type:(file OR directory)'}}})['count']\n if res == 0:\n logger.info('*** deleting empty index {0} ***'.format(options.index))\n es.indices.delete(index=options.index, ignore=[400, 404])\n # upload the directory doc for the root top level directory to ES\n else:\n es.index(options.index, sizes[root])\n total_doc_count[root] += 1\n\n # add data to info index\n \n index_info_crawlend(es, options.index, root, sizes[root]['size'], \n sizes[root]['size_du'], filecount[root], dircount[root], \n end_time, scandir_walk_time)\n\n logger.info('*** finished walking {0} ***'.format(root))\n logger.info('*** walk files {0}, skipped {1} ***'.format(filecount[root], skipfilecount[root]))\n logger.info('*** walk size {0} ***'.format(convert_size(sizes[root]['size'])))\n logger.info('*** walk du size {0} ***'.format(convert_size(sizes[root]['size_du'])))\n logger.info('*** walk dirs {0}, skipped {1} ***'.format(dircount[root], skipdircount[root]))\n logger.info('*** walk took {0} ***'.format(get_time(scandir_walk_time)))\n logger.info('*** walk perf {0:.3f} inodes/s ***'.format(inodecount[root] / scandir_walk_time))\n logger.info('*** docs indexed {0} ***'.format(total_doc_count[root]))\n logger.info('*** indexing perf {0:.3f} docs/s ***'.format(total_doc_count[root] / scandir_walk_time))\n logger.info('*** bulk uploads took {0} ***'.format(get_time(bulktime[root])))\n logger.info('*** warnings/errors {0} ***'.format(warnings))", "def parse_reads_and_select_candidates(self, reads):\n st_time = time.time()\n # read_id_list = []\n total_reads = 0\n read_unique_id = 0\n for read in reads:\n # check if the read is usable\n if read.mapping_quality >= DEFAULT_MIN_MAP_QUALITY and read.is_secondary is False \\\n and read.is_supplementary is False and read.is_unmapped is False and read.is_qcfail is False:\n\n read.query_name = read.query_name + '_' + str(read_unique_id)\n if self.find_read_candidates(read=read):\n # read_id_list.append(read.query_name)\n total_reads += 1\n read_unique_id += 1\n\n if total_reads == 0:\n return []\n\n selected_allele_list = []\n postprocess_read_id_list = set()\n for pos in self.positional_allele_dictionary:\n if pos < self.region_start_position or pos > self.region_end_position:\n continue\n ref = self.reference_dictionary[pos]\n\n all_allele_dictionary = self.positional_allele_dictionary[pos]\n all_mismatch_count = 0\n for allele in all_allele_dictionary:\n all_mismatch_count += all_allele_dictionary[allele]\n\n # pick the top 2 most frequent allele\n allele_frequency_list = list(sorted(all_allele_dictionary.items(), key=operator.itemgetter(1, 0),\n reverse=True))[:PLOIDY]\n allele_list = self._filter_alleles(pos, allele_frequency_list)\n alt1 = allele_list[0] if len(allele_list) >= 1 else None\n alt2 = allele_list[1] if len(allele_list) >= 2 else '.'\n if alt1 is None:\n continue\n mq_rms = round(math.sqrt(self.rms_mq[pos]/self.coverage[pos]), 3) if self.coverage[pos] > 0 else 0\n dp = self.coverage[pos]\n ref_count = self.coverage[pos] - all_mismatch_count\n candidate_record = [self.chromosome_name] + self._get_record(pos, alt1, alt2, ref, ref_count) + [mq_rms] + [dp]\n postprocess_read_id_list.update(self.read_id_by_position[pos])\n selected_allele_list.append(candidate_record)\n\n postprocess_read_id_list = list(postprocess_read_id_list)\n if len(selected_allele_list) > 0:\n self.postprocess_reference()\n self.postprocess_reads(postprocess_read_id_list)\n\n return selected_allele_list", "def _run():\n matching_terms = {'a', 'b'}\n source_counts = {'a': 10, 'b': 50, 'c': 25}\n target_counts = {'a': 4, 'b': 73, 'c': 15}\n source_chunk = ['a', 'b']\n target_chunk = ['a', 'c', 'b']\n source_distance = score.find_distance(\n matching_terms, source_chunk, source_counts)\n target_distance = score.find_distance(\n matching_terms, target_chunk, target_counts)\n match_score = score.vanilla(\n matching_terms, source_distance, target_distance, source_counts,\n target_counts)\n print('Calculated score:', match_score)", "def run(self, scheduler=\"single-threaded\"):\n _ = dask.compute(self.leaves, scheduler=scheduler)\n # when dask goes thru the tree, it knows the full sequence of ops\n # needed to compute each leaf, so this gives dask full authority in\n # determining the best dispatch path.", "def det_eval(gt_dir, dt_parent,save_parent):\r\n # load all groundtruths into a dict of {<image-name>: <list-of-polygons>}\r\n\r\n res_flag = False\r\n n_gt = 0\r\n all_gt = {}\r\n gt_files = glob.glob(join(gt_dir, 'image_*.txt'))\r\n # assert(len(gt_files) == N_TEST)\r\n print('Number of GT files: %d' % len(gt_files))\r\n for gt_file in gt_files:\r\n with open(gt_file, 'r') as f:\r\n gt_lines = f.readlines()\r\n polygons = [polygon_from_str(o) for o in gt_lines]\r\n n_gt += len(polygons)\r\n #找文件名 basename 取/后面 splitext去掉扩展名\r\n fname = splitext(basename(gt_file))[0]\r\n all_gt[fname] = polygons\r\n\r\n # scores and match status of all dts in a single list\r\n all_dt_match = []\r\n all_dt_scores = []\r\n\r\n def _recursive_find_sub_dirs(curr_dir):\r\n for root, subdirs, files in os.walk(curr_dir):\r\n for s in files:\r\n if s.endswith(\".txt\"):\r\n #dt_files.append(os.path.join(root, s ))\r\n #sub_id_sub_dir_pairs.append(os.path.join(root, s))\r\n return root\r\n else:\r\n _recursive_find_sub_dirs(s)\r\n return ''\r\n dt_dir = _recursive_find_sub_dirs(dt_parent)\r\n if not dt_dir:\r\n s = 'file type not consistent'\r\n return [res_flag, s,0,0,0,0,'']\r\n # for every detection, calculate its match to groundtruth\r\n dt_files = glob.glob(join(dt_dir, '*.txt'))\r\n print('Number of DT files: %d' % len(dt_files))\r\n p = re.compile(r'.*(image_\\d+)\\.txt')\r\n print('Calculating matches')\r\n try:\r\n for dt_file in tqdm(dt_files):\r\n # find corresponding gt file\r\n fname = basename(dt_file)\r\n key = p.match(fname).group(1)\r\n\r\n if key not in all_gt:\r\n print('Result %s not found in groundtruths! This file will be ignored')\r\n continue\r\n\r\n # calculate matches to groundtruth and append to list\r\n gt_polygons = all_gt[key]\r\n with open(dt_file, 'r') as f:\r\n dt_lines = [o.strip() for o in f.readlines()]\r\n dt_polygons = [polygon_from_str(o) for o in dt_lines]\r\n # dt_match = []\r\n # gt_match = [False] * len(gt_polygons)\r\n # for dt_poly in dt_polygons:\r\n # match = False\r\n # for i, gt_poly in enumerate(gt_polygons):\r\n # if gt_match[i] == False and polygon_iou(dt_poly, gt_poly) >= IOU_THRESH:\r\n # gt_match[i] = True\r\n # match = True\r\n # break\r\n # dt_match.append(match)\r\n # all_dt_match.extend(dt_match)\r\n\r\n #####################################\r\n # match scheme by YMK\r\n dt_match = [False] * len(dt_polygons)\r\n gt_match = [False] * len(gt_polygons)\r\n all_ious = defaultdict(tuple)\r\n for index_gt, gt_poly in enumerate(gt_polygons):\r\n for index_dt, dt_poly in enumerate(dt_polygons):\r\n iou = polygon_iou(dt_poly, gt_poly)\r\n if iou >= IOU_THRESH:\r\n all_ious[(index_gt, index_dt)] = iou\r\n sorted_ious = sorted(all_ious.items(), key=operator.itemgetter(1), reverse=True)\r\n sorted_gt_dt_pairs = [item[0] for item in sorted_ious]\r\n for gt_dt_pair in sorted_gt_dt_pairs:\r\n index_gt, index_dt = gt_dt_pair\r\n if gt_match[index_gt] == False and dt_match[index_dt] == False:\r\n gt_match[index_gt] = True\r\n dt_match[index_dt] = True\r\n all_dt_match.extend(dt_match)\r\n #####################################\r\n\r\n # calculate scores and append to list\r\n dt_scores = [float(o.split(',')[8]) for o in dt_lines]\r\n all_dt_scores.extend(dt_scores)\r\n # calculate precision, recall and f-measure at all thresholds\r\n all_dt_match = np.array(all_dt_match, dtype=np.bool).astype(np.int)\r\n all_dt_scores = np.array(all_dt_scores)\r\n\r\n sort_idx = np.argsort(all_dt_scores)[::-1] # sort in descending order\r\n all_dt_match = all_dt_match[sort_idx]\r\n all_dt_scores = all_dt_scores[sort_idx]\r\n\r\n n_pos = np.cumsum(all_dt_match)\r\n n_dt = np.arange(1, len(all_dt_match)+1)\r\n precision = n_pos.astype(np.float) / n_dt.astype(np.float)\r\n recall = n_pos.astype(np.float) / float(n_gt)\r\n eps = 1e-9\r\n fmeasure = 2.0 / ((1.0 / (precision + eps)) + (1.0 / (recall + eps)))\r\n\r\n\r\n rec = n_pos / float(n_gt)\r\n prec = n_pos / np.maximum(n_dt, np.finfo(np.float64).eps)\r\n ap = voc_ap(rec, prec)\r\n\r\n except Exception as e:\r\n # s = str(e)\r\n s = repr(e)\r\n return [res_flag, s,0,0,0,0,'']\r\n else:\r\n # find maximum fmeasure\r\n max_idx = np.argmax(fmeasure)\r\n\r\n eval_results = {\r\n 'fmeasure': fmeasure[max_idx],\r\n 'precision': precision[max_idx],\r\n 'recall': recall[max_idx],\r\n 'ap': ap,\r\n 'threshold': all_dt_scores[max_idx],\r\n 'all_precisions': precision,\r\n 'all_recalls': recall\r\n }\r\n\r\n # # evaluation summary\r\n # print('=================================================================')\r\n # print('Maximum f-measure: %f' % eval_results['fmeasure'])\r\n # print(' |-- precision: %f' % eval_results['precision'])\r\n # print(' |-- recall: %f' % eval_results['recall'])\r\n # print(' |-- threshold: %f' % eval_results['threshold'])\r\n # print('=================================================================')\r\n\r\n # save evaluation results\r\n dt_name = os.path.split(dt_dir)[-1]+'_eval'\r\n save_dir = os.path.join(save_parent, dt_name)\r\n if not exists(save_dir):\r\n os.makedirs(save_dir)\r\n data_save_path = join(save_dir, 'eval_data.pkl')\r\n with open(data_save_path, 'wb') as f:\r\n pickle.dump(eval_results, f)\r\n print('Evaluation results data written to {}'.format(data_save_path))\r\n\r\n # plot precision-recall curve\r\n vis_save_path = join(save_dir, 'pr_curve.png')\r\n plt.clf()\r\n plt.plot(recall, precision)\r\n plt.xlim(0, 1)\r\n plt.ylim(0, 1)\r\n plt.title('Precision-Recall Curve')\r\n plt.grid()\r\n plt.xlabel('Recall')\r\n plt.ylabel('Precision')\r\n plt.savefig(vis_save_path, dpi=200)\r\n print('Precision-recall curve written to {}'.format(vis_save_path))\r\n # save evaluation results\r\n if not exists(save_dir):\r\n os.makedirs(save_dir)\r\n data_save_path = join(save_dir, 'eval_data.pkl')\r\n with open(data_save_path, 'wb') as f:\r\n pickle.dump(eval_results, f)\r\n print('Evaluation results data written to {}'.format(data_save_path))\r\n dst_dir = save_dir\r\n src_root = os.path.split(dst_dir)[0]\r\n src_dir = os.path.split(dst_dir)[1]\r\n zip_dir = shutil.make_archive(dst_dir, 'zip', src_root, src_dir)\r\n res_flag = True\r\n return [res_flag, '', eval_results['fmeasure'],eval_results['precision'],eval_results['recall'],\r\n eval_results['ap'],zip_dir]", "def search(d,key):\n\treturn dfs(d,key)", "def testSampleRichness(self):\n self.tree.calculate_richness()\n self.assertEqual(1167, self.tree.get_species_richness(1))\n self.assertEqual(1171, self.tree.get_species_richness(2))\n self.assertEqual(self.tree.get_species_richness(1), self.tree.get_species_richness(1))\n self.assertEqual(self.tree.get_species_richness(2), self.tree.get_species_richness(2))\n self.assertEqual(self.tree.get_species_richness(3), self.tree.get_species_richness(3))", "def find_genes_for_mutants(self, genome_version, genefile, detailed_features=True, include_RISCC_reads=False, \n nearest_genes_for_intergenic=False, N_run_groups=3, verbosity_level=1): \n if self.multi_dataset: raise MutantError(\"find_genes_for_mutants not implemented for multi-datasets!\")\n # MAYBE-TODO implement for multi-datasets? The actual gene-finding would be easy, since it'd just work on \n # multi-dataset mutants instead of single-dataset ones; adding stuff to summary would be harder.\n\n # Group all the mutants by chromosome, so that I can go over each chromosome in genefile separately\n # instead of reading in all the data at once (which uses a lot of memory)\n # Inclue both the main mutants, AND all the RISCC genome-side read sub-mutants if wanted.\n insertion_data_by_chromosome = defaultdict(list)\n for mutant in self:\n if mutant.position not in SPECIAL_POSITIONS.all_undefined:\n insertion_data_by_chromosome[mutant.position.chromosome].append(mutant)\n if include_RISCC_reads:\n for RISCC_read_data in mutant.RISCC_genome_side_aligned_reads.values():\n insertion_data_by_chromosome[RISCC_read_data[0].chromosome].append(RISCC_read_data)\n self._find_genes_for_list(insertion_data_by_chromosome, genome_version, genefile, \n detailed_features, nearest_genes_for_intergenic, N_run_groups, verbosity_level)", "def id_dfts(self):\n \n def dls(node, limit):\n \"\"\"Recursively performs a Depth Limited Search (DLS) on the puzzle's search space\n starting at the given node with given limit.\n \n Returns a DLSResult class instance that contains the problem solution, a flag\n indicating the cutoff was reached, or a flag indicating a search failure.\n \"\"\"\n if self.check_goal_state(node.state):\n # Search success\n # Return final state and list of actions along path to the goal\n # as part of the Solution portion of the DLSResult class\n return DLSResult(solution=Solution(final_state=node.state, actions=self.get_action_path(node)))\n \n elif limit == 0:\n # The cutoff has been reached\n return DLSResult(cutoff=True)\n \n else:\n cutoff_occurred = False\n \n # Generate all possible actions for the given state\n actions = self.get_actions(node.state)\n \n for action in actions:\n # Apply this action to the current state to get the new state\n new_state = self.get_result(node.state, action)\n \n # Create a new child search node with the new state and action\n child_node = SearchNode(new_state, node, action)\n \n # Recursively call DLS on the child node with a reduced limit\n result = dls(child_node, limit - 1)\n \n if result.cutoff:\n # A cutoff occurred\n cutoff_occurred = True\n \n elif not result.failure:\n # Search success\n return result\n \n if cutoff_occurred:\n # A cutoff occurred\n return DLSResult(cutoff=True)\n \n else:\n # This search has failed\n return DLSReturn(failure=True)\n\n\n print('Performing ID-DFTS\\n')\n\n # Iterate through depths from 0 to infinity\n for depth in count(0):\n print('Trying depth', depth)\n\n # Get the DLS result for this depth\n result = dls(SearchNode(self.initial_state), depth)\n \n if not result.cutoff:\n # A solution has been found or a search failure has ocurred\n # Return the result\n return result", "def improve_tree(tree, freq_dict):\n # todo", "def test_Tree():", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()", "def _walk(self, level=0):\n l_dict = self.list_all()\n indent = level * \" \"\n for node in l_dict[\"nodes\"]:\n print(indent + \"node\", node)\n for group in l_dict[\"groups\"]:\n print(indent + \"group: \", group)\n with self.open(group) as hdf_group:\n hdf_group._walk(level=level + 1)", "def main(args):\n dm = DistanceMatrix()\n for filename in args.samFile:\n dm.addFile(filename, scoreTag=args.scoreTag)\n\n if args.minMatchingReads is None:\n wantedReferenceIds = sorted(set(dm.scores))\n else:\n wantedReferenceIds = sorted(\n set(\n referenceId\n for (referenceId, reads) in dm.scores.items()\n if len(reads) >= args.minMatchingReads\n )\n )\n if args.verbose:\n print(\n f\"Found {len(wantedReferenceIds)} references with at least \"\n f\"{args.minMatchingReads} matching reads.\",\n file=sys.stderr,\n )\n\n if args.verbose:\n for referenceId in wantedReferenceIds:\n nReads = len(dm.scores[referenceId])\n print(f\"Reference {referenceId!r} matched {nReads} reads.\")\n\n dump(\n dict((id_, dm.scores[id_]) for id_ in wantedReferenceIds),\n sys.stdout,\n sort_keys=True,\n indent=4,\n )\n print()", "def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def fit(self, dataset, verbose=False):\n self.inputs = dataset.shape[1]-1\n self.bits = np.ceil(\n np.log2(\n np.abs(\n np.amax(dataset, axis=0) -\n np.amin(dataset, axis=0)))).astype(np.int32)\n self.is_neg = (np.amin(dataset, axis=0) < 0).astype(np.int8)\n\n self.trees = []\n\n for i in range(self.n_trees):\n if verbose:\n print(\"... creating tree {}\".format(i))\n\n # as subsample is an expensive operation, we will only perform it if it\n # reduces the dataset substantially\n\n if self.sample_size and self.sample_size < 0.3 * dataset.shape[0]:\n if verbose:\n print(\"... generated subsample of size {}\".format(self.sample_size))\n sample = self.subsample(dataset)\n else:\n sample = dataset\n\n self.trees.append(fit_parallel(\n self.max_depth, self.min_size, sample, True))", "def driver(rootdir, destination, dataset_name):\n global metric_result \n global result\n metric_result = {\"query image\": [], \n \"k\": [], \n \"precision for k = 3\": [], \n \"reciprocal rank for k = 3\": [],\n \"precision for k = 5\": [], \n \"reciprocal rank for k = 5\": [], \n \"precision for k = 7\": [],\n \"reciprocal rank for k = 7\": [], \n \"time in seconds\": []}\n \n siamese_model = get_siamese(input_shape=(1, 48, 48))\n siamese_model.summary()\n APlist_3 = []\n RRlist_3 = []\n APlist_5 = []\n RRlist_5 = []\n APlist_7 = []\n RRlist_7 = []\n # destination = \"..\\\\result\\\\seamese_net_avg_images_seed_np_2_tf_2\\\\\" # + subdir1.split(\"\\\\\")[-1]\n \n \n for subdir1, dirs1, files1 in os.walk(rootdir):\n start = time.time()\n query1_name = subdir1.split(\"\\\\\")[-1]\n \n os.makedirs(destination, exist_ok=True)\n \n query1_average_image_time_start = time.time()\n query1 = averageImage(subdir1)\n query1_average_image_time_end = time.time()\n \n result = {\"query1\": [], \"query2\":[], \"size\": [], \"siamese_distance\": [], \"average_image_time_query1\": [], \"average_image_time_query2\": [], \"patch_retrieval_time\": [], \"image_comparison_time\": [],\"total_time\": []}\n \n \n if not subdir1.endswith(\"\\\\\"+ dataset_name +\"\\\\\"):\n for subdir2, dirs2, files2 in os.walk(rootdir):\n if not subdir2.endswith(\"\\\\\"+ dataset_name +\"\\\\\"):\n if (subdir1 != subdir2):\n \n start_per_image = time.time()\n \n query2_name = subdir2.split(\"\\\\\")[-1]\n # print(subdir1, subdir2)\n \n query2_average_image_time_start = time.time()\n query2 = averageImage(subdir2)\n query2_average_image_time_end = time.time()\n\n siamese_distance = compare(siamese_model, query1, query2)\n # print(\"siamese_distance between {} and {} value : {}\".format(query1_name, query2_name, siamese_distance))\n end_per_image = time.time()\n \n result[\"query1\"].append(query1_name)\n result[\"query2\"].append(query2_name)\n result[\"size\"].append((496, 512))\n result[\"siamese_distance\"].append(siamese_distance)\n result[\"average_image_time_query1\"].append(query1_average_image_time_end - query1_average_image_time_start)\n result[\"average_image_time_query2\"].append(query2_average_image_time_end - query2_average_image_time_start)\n result[\"total_time\"].append(end_per_image - start_per_image)\n \n #save result tp csv file sorted w.r.t siamese_distance\n df = pd.DataFrame(data=result)\n df = df.sort_values(by=[\"siamese_distance\"])\n df.to_csv(destination + \"\\\\\" + query1_name +\".csv\")\n \n APlist_3.append(calculateAvgPrecision(df, 3))\n RRlist_3.append(calculateReciprocalRank(df, 3))\n \n APlist_5.append(calculateAvgPrecision(df, 5))\n RRlist_5.append(calculateReciprocalRank(df, 5))\n \n APlist_7.append(calculateAvgPrecision(df, 7))\n RRlist_7.append(calculateReciprocalRank(df, 7))\n \n # print(APlist, RRlist)\n end = time.time()\n metric_result[\"query image\"].append(query1_name)\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(calculateAvgPrecision(df, 3))\n metric_result[\"reciprocal rank for k = 3\"].append(calculateReciprocalRank(df, 3))\n \n metric_result[\"precision for k = 5\"].append(calculateAvgPrecision(df, 5))\n metric_result[\"reciprocal rank for k = 5\"].append(calculateReciprocalRank(df, 5))\n \n metric_result[\"precision for k = 7\"].append(calculateAvgPrecision(df, 7))\n metric_result[\"reciprocal rank for k = 7\"].append(calculateReciprocalRank(df, 7))\n metric_result[\"time in seconds\"].append((end - start))\n \n print(\"Average Precision (AP) considering K = 3 : {}\".format(sum(APlist_3)/len(APlist_3)))\n print(\"Reciprocal Rank (RR) considering K = 3 : {}\".format(sum(RRlist_3)/len(RRlist_3)))\n \n print(\"Average Precision (AP) considering K = 5 : {}\".format(sum(APlist_5)/len(APlist_5)))\n print(\"Reciprocal Rank (RR) considering K = 5 : {}\".format(sum(RRlist_5)/len(RRlist_5)))\n \n print(\"Average Precision (AP) considering K = 7 : {}\".format(sum(APlist_7)/len(APlist_7)))\n print(\"Reciprocal Rank (RR) considering K = 7 : {}\".format(sum(RRlist_7)/len(RRlist_7)))\n \n metric_result[\"query image\"].append(\"Average AP and Average RR\")\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(sum(APlist_3)/len(APlist_3))\n metric_result[\"reciprocal rank for k = 3\"].append(sum(RRlist_3)/len(RRlist_3))\n \n metric_result[\"precision for k = 5\"].append(sum(APlist_5)/len(APlist_5))\n metric_result[\"reciprocal rank for k = 5\"].append(sum(RRlist_5)/len(RRlist_5))\n \n metric_result[\"precision for k = 7\"].append(sum(APlist_7)/len(APlist_7))\n metric_result[\"reciprocal rank for k = 7\"].append(sum(RRlist_7)/len(RRlist_7))\n \n metric_result[\"time in seconds\"].append(sum(metric_result[\"time in seconds\"]))\n\n\n MAP = (sum(APlist_3)/len(APlist_3) + sum(APlist_5)/len(APlist_5) + sum(APlist_7)/len(APlist_7))/3\n MRR = (sum(RRlist_3)/len(RRlist_3) + sum(RRlist_5)/len(RRlist_5) + sum(RRlist_7)/len(RRlist_7))/3\n \n metric_result[\"query image\"].append(\"MAP and MRR\")\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(MAP)\n metric_result[\"reciprocal rank for k = 3\"].append(MRR)\n \n metric_result[\"precision for k = 5\"].append(0)\n metric_result[\"reciprocal rank for k = 5\"].append(0)\n \n metric_result[\"precision for k = 7\"].append(0)\n metric_result[\"reciprocal rank for k = 7\"].append(0)\n \n \n metric_result[\"time in seconds\"].append(0)\n \n \n metric_df = pd.DataFrame(data=metric_result)\n metric_df.to_csv(destination + \"\\\\\" + \"CBIR metric.csv\")\n \n del siamese_model\n return MAP, MRR", "def improve(self, tour: Tour):\n for v0, v1 in tour.iter_links():\n result = self.dfs_recursion(tour, [v0, v1], 0)\n if result is not None:\n return result", "def improve(self, tour: Tour):\n for v0, v1 in tour.iter_links():\n result = self.dfs_recursion(tour, [v0, v1], 0)\n if result is not None:\n return result", "def dfs(self, ms, idx, square, square_len):\n if any(s > square_len for s in square):\n return False\n if idx >= len(ms):\n return all(s == square[0] for s in square)\n\n v = ms[idx]\n return (\n self.dfs(ms, idx + 1, [square[0]+v, square[1] ,square[2] ,square[3]] , square_len) or\n self.dfs(ms, idx + 1, [square[0] ,square[1]+v,square[2] ,square[3]] , square_len) or\n self.dfs(ms, idx + 1, [square[0] ,square[1] ,square[2]+v,square[3]] , square_len) or\n self.dfs(ms, idx + 1, [square[0] ,square[1] ,square[2] ,square[3]+v], square_len)\n )", "def search_for_actions(self, search_depth: int = 1,\n random_seed: int = None) -> list:\n if random_seed is not None:\n random.seed(random_seed)\n for _ in range(self._max_samples):\n execute_round(self._root, max_tree_depth=self._max_tree_depth,\n tree_select_policy=self._tree_select_policy,\n tree_expand_policy=self._tree_expand_policy,\n rollout_policy=self._rollout_policy,\n backpropagate_method=self._back_propagate_policy)\n return self._search(self._root, search_depth)[1]", "def _find_all(trienode, mem, valid_words=[]):\r\n \r\n if trienode.data(): \r\n valid_words.append(mem)\r\n if trienode.children():\r\n for children in trienode.children():\r\n _find_all(trienode.children()[children], mem + children,\r\n valid_words)\r\n return valid_words", "def recursive_search(i, F, t, s, explored, leaders, order):\n x = len(explored)\n if x % 10 == 0:\n print(\"Length of explored: {}\".format(x))\n explored.append(i)\n if order == 2:\n leaders[i] = s\n arc_list = db.Database.find_one(collection=\"biggraph\", query={\"key\": i})\n if arc_list:\n for node in arc_list['value']:\n if node not in explored:\n F, t, leaders, explored = recursive_search(node, F, t, s, explored, leaders, order)\n if order == 1:\n t += 1\n F[i] = t\n return F, t, leaders, explored", "def test_parallel_alpha_diversity_wo_tree(self):\r\n params = {'metrics': 'observed_species,chao1',\r\n 'tree_path': None,\r\n 'jobs_to_start': 2\r\n }\r\n app = ParallelAlphaDiversity()\r\n r = app(self.rt_fps,\r\n self.test_out,\r\n params,\r\n job_prefix='ATEST',\r\n poll_directly=True,\r\n suppress_submit_jobs=False)\r\n # confirm that the total number of output sequences equals the total\r\n # number of input sequences\r\n output_fps = glob(join(self.test_out, '*txt'))\r\n self.assertEqual(len(output_fps), len(self.rt_fps))", "def test_check_tree_exact_match(self):\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_3tips_fp)\r\n\r\n # Should find all and give True, True result\r\n\r\n self.assertEqual(actual_subset_results, [True, True])\r\n\r\n # Should get tips not found in fasta labels with 5 tip tree\r\n\r\n fasta_labels = ['seq1_1', 'seq1_2', 'seq2_3', 'seq3_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find all and give True result\r\n\r\n self.assertEqual(actual_subset_results, [True, ['seq5', 'seq4']])\r\n\r\n # Change two of the fasta labels to not match tree tips\r\n\r\n fasta_labels = ['seq1_1', 'seqX_2', 'seq2_3', 'seqY_4']\r\n\r\n actual_subset_results = check_tree_exact_match(fasta_labels,\r\n self.sample_tree_5tips_fp)\r\n\r\n # Should find seqX and seqY as not being a subset\r\n\r\n self.assertEqual(actual_subset_results, [['seqX', 'seqY'],\r\n ['seq3', 'seq5', 'seq4']])", "def test_tree_splay() -> None:\n t = generate_graph_resources(5)\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_2\", \"ds_2\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_3\", \"ds_3\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_4\", \"ds_4\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_5\", \"ds_5\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).identity = \"email\"\n traversal = Traversal(DatasetGraph(*t), {\"email\": \"X\"})\n\n assert incoming_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\")) == {\n Edge(\n FieldAddress(\"__ROOT__\", \"__ROOT__\", \"email\"),\n FieldAddress(\"dr_1\", \"ds_1\", \"f1\"),\n )\n }\n assert outgoing_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_3\", \"ds_3\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_4\", \"ds_4\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_5\", \"ds_5\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\")),\n }\n\n assert outgoing_edges(traversal, CollectionAddress(\"dr_5\", \"ds_5\")) == set()\n assert incoming_edges(traversal, CollectionAddress(\"dr_2\", \"ds_2\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_3\", \"ds_3\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_3\", \"ds_3\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_4\", \"ds_4\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_4\", \"ds_4\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_5\", \"ds_5\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_5\", \"ds_5\", \"f1\"))\n }\n traversal_map, terminators = traversal.traversal_map()\n assert traversal_map == {\n \"__ROOT__:__ROOT__\": {\"from\": {}, \"to\": {\"dr_1:ds_1\": {\"email -> f1\"}}},\n \"dr_1:ds_1\": {\n \"from\": {\"__ROOT__:__ROOT__\": {\"email -> f1\"}},\n \"to\": {\n \"dr_2:ds_2\": {\"f1 -> f1\"},\n \"dr_3:ds_3\": {\"f1 -> f1\"},\n \"dr_4:ds_4\": {\"f1 -> f1\"},\n \"dr_5:ds_5\": {\"f1 -> f1\"},\n },\n },\n \"dr_2:ds_2\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_3:ds_3\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_4:ds_4\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_5:ds_5\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n }\n\n assert set(terminators) == {\n CollectionAddress(\"dr_2\", \"ds_2\"),\n CollectionAddress(\"dr_3\", \"ds_3\"),\n CollectionAddress(\"dr_4\", \"ds_4\"),\n CollectionAddress(\"dr_5\", \"ds_5\"),\n }", "def compare_trees(first_soup: HTMLStrip, second_soup: HTMLStrip) -> float:\n first_tree = Tree.from_soup_object(first_soup.file_name, first_soup.original_soup)\n second_tree = Tree.from_soup_object(second_soup.file_name, second_soup.original_soup)\n\n common_paths_size: int = first_tree.num_of_common_paths(second_tree)\n target_size: int = second_tree.total_num_of_paths()\n similarity = float(common_paths_size)*100/target_size\n print(f'{similarity:.2f}')\n return similarity", "def __manage_tree(self):\n for pre, fill, node in RenderTree(self.tree):\n if node.name is 'count':\n logger.info(\n \"Tree info %s%s: %s %s p/s attack: %s\",\n pre, node.name, node.value, node.pps, node.attack)\n else:\n logger.info(\"Pre - [%s], Fill - [%s], Node - [%s]\",\n pre, fill, node.name)", "def testFindAllSimilarityFromNodeOnPathSimExampleThree(self):\n\n graph, authorMap, conferenceMap = SampleGraphUtility.constructPathSimExampleThree()\n metaPath = [Author, Paper, Conference, Paper, Author]\n strategy = PathSimStrategy(graph, metaPath)\n\n mike = authorMap['Mike']\n mostSimilarNodes = strategy.findMostSimilarNodes(mike, 5)\n\n self.assertEquals([authorMap['Bob'], authorMap['Mary'], authorMap['Jim']], mostSimilarNodes)", "def run_dfs(self,s):\n if self.verbose: print('entering run_dfs with s = ',s)\n new_states = [self.succ(s,a) for a in self.actions(s)]\n results = []\n\n for ns in new_states:\n if self.verbose: print('considering new state = ',ns)\n end = self.is_end(ns)\n if end:\n result = self.result(ns)\n if result is not None:\n results.append(result)\n else:\n results += self.run_dfs(ns)\n return results", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def scan(self):\n try:\n for dataset_folder in os.scandir(\n self.path_dict['DATASETS_FOLDER']): # phase one -> scan local datasets dir\n if not dataset_folder.name.startswith('.') and dataset_folder.is_dir():\n self.local_datasets.append(dataset_folder.name)\n print(\"Local dataset found : \", dataset_folder.name, 'Folder size',\n self.get_tree_size(\n os.path.join(self.path_dict['DATASETS_FOLDER'], dataset_folder.name)) / 10 ** 6,\n 'MB')\n for dataset in self.to_be_used_datasets:\n if dataset not in self.local_datasets:\n print(dataset, ' verisetinin bilgisayarınızda yüklü olmadığı görüldü. İndirilecek.')\n self.download_queue.append(dataset)\n print(\"Eğer bir verisetinin yanlış indirildiğini düşünüyorsanız, \"\n \"verisetini silip programı tekrar çalıştırın.\")\n return self.local_datasets\n except:\n print(\"Dataset Okuma sırasında bir hata oluşmuş olabilir.\")", "def dfs(self):\n\n stack = [self.root]\n\n while stack:\n node = stack[-1]\n\n if node.goal:\n return True\n\n if not node.visited:\n node.visited = True\n\n for adj_node in self.return_adj_nodes(node):\n if adj_node and not adj_node.visited and not adj_node.wall:\n stack.append(adj_node)\n break\n else:\n stack.pop()\n\n return False", "def dfs_loop(graph_dict, nodes, track):\n\n for node in nodes:\n if node not in track.explored:\n track.current_source = node\n dfs(graph_dict, node, track)", "def _dfsearch_recursive(self, footprint):\n self.visited[footprint] = 1\n self.temp_component.append(footprint)\n for neighbour in self.neighbours[footprint]:\n if self.visited[neighbour] == 0:\n self._dfsearch(neighbour)", "def traverse_directory(args) :\n siteRGX = re.compile('DPH.'+args.site.upper())\n s = []\n\n # report non-unique residuals\n for root, dirs, files in os.walk(args.traverse):\n path = root.split('/')\n for gamitFile in files:\n if siteRGX.search(gamitFile):\n gamitFile = root+'/'+gamitFile\n #check for potential duplicates in the same path, only want to use one of the DOH files\n if len(path[-1]) > 4:\n regex = re.compile(root[:-2])\n else:\n regex = re.compile(root)\n\n\n # only check for duplicates when there is more than one network\n # being processed...\n if args.network == 'yyyy_dddnN':\n if len(s) == 0:\n s.append(gamitFile)\n else:\n # for each element in s, check to see if the root path does not match\n # any of the files already stored in the list\n m = 0\n for item in s:\n if regex.search(item) :\n m = 1\n if not m :\n s.append(gamitFile)\n else:\n s.append(gamitFile)\n\n s.sort()\n lines = ''\n # Now loop through each file and consolidate the residuals\n for dfile in s :\n dphs = res.parseDPH(dfile)\n\n # check if the dph files are being searched are from\n #a GAMIT network of type yyyy/dddn?/\n root, filename = os.path.split(dfile)\n if args.network == 'yyyy_dddnN':\n ddd = root[-5:-2]\n year = int(root[-10:-6])\n startDT = dt.datetime(year,01,01)\n startDT = startDT + dt.timedelta(days=(int(ddd) -1))\n elif args.network == 'ddd':\n ddd = root[-3:]\n year = root[-8:-4] \n startDT = dt.datetime(int(year),01,01)\n startDT = startDT + dt.timedelta(days=(int(ddd) -1))\n\n line = res.consolidate(dphs,startDT)\n lines = lines + line\n\n # if its larger than 1GB dump it to a file\n # this is designed to keep the load n the file system lighter\n if sys.getsizeof(lines) > 1073741824 :\n f = gzip.open(args.save_file,'a',9)\n f.write(lines)\n f.close()\n lines = ''\n #print(lines)\n\n # dump any remaining memory to file\n f = gzip.open(args.save_file,'a',9)\n f.write(lines)\n f.close()\n lines = ''\n\n return", "def get_dfs(dataset):\n test_slice = False\n if 'test_' in dataset:\n dataset = dataset.replace('test_', '')\n test_slice = True\n ratings_path = BUILTIN_DATASETS[dataset].path\n print('Path to ratings file is: {}'.format(ratings_path))\n if not os.path.isfile(ratings_path):\n download_builtin_dataset(dataset)\n if dataset == 'ml-100k':\n users_path = ratings_path.replace('.data', '.user')\n movies_path = ratings_path.replace('.data', '.item')\n dfs = movielens_to_df(ratings_path, users_path, movies_path)\n elif dataset == 'ml-1m':\n users_path = ratings_path.replace('ratings.', 'users.')\n movies_path = ratings_path.replace('ratings.', 'movies.')\n dfs = movielens_1m_to_df(ratings_path, users_path, movies_path)\n elif dataset == 'ml-20m':\n # there is no user path\n movies_path = ratings_path.replace('ratings.', 'movies.') # .../movies.csv\n dfs = movielens_20m_to_df(ratings_path, movies_path)\n else:\n raise Exception(\"Unknown dataset: \" + dataset)\n \n if test_slice:\n dfs['ratings'] = dfs['ratings'].sample(100, random_state=0)\n # print('Got dfs.\\nDataframe sizes are\\nratings:{}\\nusers:{}\\nmovies:{}'.format(\n # dfs['ratings'].memory_usage(), dfs['users'].memory_usage(), dfs['movies'].memory_usage()\n # ))\n return dfs", "def test_distances(self):\n sf = make_classifier_data(n=10, d=2, seed=37)\n sf.remove_column(\"class\", inplace=True)\n\n numeric_features = [\"int0\", \"int1\", \"float0\", \"float1\"]\n array_features = [\"array0\"]\n string_features = [\"str0\"]\n dict_features = [\"dict0\"]\n\n ## Numeric standard distances should work for numeric columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Numeric standard distances should work for array columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=array_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## String standard distances should work.\n for d in [\"levenshtein\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Dictionary standard distances should work.\n for d in [\"jaccard\", \"weighted_jaccard\", \"cosine\", \"transformed_dot_product\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n # Nonsensical combinations of feature types and distances should fail.\n with self.assertRaises(ValueError):\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=\"euclidean\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n # If no distance is specified, the automatic distance construction\n # should kick in and be correct.\n correct_dist = [\n [[\"str0\"], \"levenshtein\", 1],\n [[\"str1\"], \"levenshtein\", 1],\n [[\"dict0\"], \"jaccard\", 1],\n [[\"int0\", \"int1\", \"float0\", \"float1\"], \"euclidean\", 1],\n [[\"array0\"], \"euclidean\", 1],\n ]\n\n m = tc.dbscan.create(\n sf, radius=1, distance=None, min_core_neighbors=3, verbose=False\n )\n\n self.assertItemsEqual(m.distance, correct_dist)\n\n m = tc.dbscan.create(\n sf, radius=1, distance=\"auto\", min_core_neighbors=3, verbose=False\n )\n self.assertItemsEqual(m.distance, correct_dist)", "def dfs2(G):\r\n\r\n for v in V(G):\r\n v.visited = False\r\n\r\n result = []\r\n\r\n for v in V(G):\r\n if not v.visited:\r\n X = dfs2_visit(v)\r\n result.append(X)\r\n\r\n return result", "def test_cout(G, ds, frm, to):\n print \"Common friends among \", \"(\", frm, \",\", to, \")\", common_friends(ds, (frm,to))\n try:\n print \"shortest_path, len: \", nx.shortest_path(G, frm, to), nx.shortest_path_length(G, frm, to)\n except:\n print \"No node warning!\"", "def store_search_statistics(self, root):\n\n # get the sum of the simulations starting from the node 'root'\n sum_visits = sum(child.visit_count for child in root.children.values())\n # self.child_visits.append([\n # root.children[a].visit_count / sum_visits if a in root.children else 0\n # for a in range(self.num_actions)\n # ])\n\n # update the statistics of children states visited from root state\n search_stats = np.array(\n [root.children[a].visit_count / sum_visits if a in root.children else 0 for a in range(self.num_actions)])\n self.child_visits = np.vstack((self.child_visits, search_stats))", "def _compute_scores(df, node_parent_pairs):\n scores = np.empty(len(node_parent_pairs))\n for idx, pair in enumerate(node_parent_pairs):\n scores[idx] = _compute_mutual_information(df, pair)\n return scores" ]
[ "0.5999934", "0.54931384", "0.5351289", "0.5340773", "0.5317956", "0.5283371", "0.5265533", "0.52155614", "0.51734805", "0.51664525", "0.5139534", "0.5117433", "0.50639457", "0.50228846", "0.5016315", "0.50101423", "0.50033206", "0.49670547", "0.49583322", "0.49560136", "0.49523288", "0.4926698", "0.49265143", "0.49236318", "0.4915668", "0.49124444", "0.490844", "0.49062592", "0.4906111", "0.48892254", "0.48609796", "0.48558664", "0.48551974", "0.48460042", "0.48439634", "0.48406374", "0.48397526", "0.48322606", "0.4818096", "0.4810298", "0.47967976", "0.47886705", "0.4785806", "0.4782702", "0.47790778", "0.47784948", "0.47664544", "0.47408822", "0.4737956", "0.47375193", "0.47350448", "0.47308728", "0.47235835", "0.47235644", "0.47202775", "0.47130492", "0.46906897", "0.46853384", "0.46792868", "0.46711218", "0.46650723", "0.46642822", "0.46620718", "0.4652026", "0.46518293", "0.46412545", "0.46394685", "0.4635919", "0.46353576", "0.46305242", "0.46199647", "0.46192333", "0.46131548", "0.4608479", "0.46073171", "0.4606168", "0.46055597", "0.46055597", "0.45911747", "0.45885378", "0.45863402", "0.4585148", "0.45849258", "0.4582167", "0.4580503", "0.45792043", "0.45768392", "0.4576627", "0.45729226", "0.45690757", "0.4559976", "0.455844", "0.45493633", "0.45425647", "0.45403934", "0.453824", "0.4537208", "0.45352536", "0.4534418", "0.45296618", "0.45276168" ]
0.0
-1
A wrapper for backward comptibility with other data structure implementations
def contains(self, query): return self.query(query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n dict.__init__(self)\n self.datatype = None", "def _get_to_actual_data(raw):\n raise NotImplemented", "def __array__(self):\n return dict2rec(self)", "def __getitem__(self):\n pass", "def makeFMData(from_dict, locked=False):\n\n class FMData(object):\n \"\"\"Datastructure where:\n\n - attr and dict access is equal (eg. FMData.value == FMData['value'])\n - only attributtes given during initialization are readable and writable\n - modified attributes are tracked\"\"\"\n __modified__ = set()\n __slots__, __init_dict__, __old2new__, __new2old__ = key_dict(from_dict)\n\n def __init__(self, locked=False):\n init_dict = self.__init_dict__\n for key in init_dict:\n value = init_dict[key]\n date, mo, da, ye, time, ho, mi, se = [None] * 8\n if type(value) in [str, str]:\n date, da, mo, ye, time, ho, mi, se = reDateTime.match(value).groups()\n if mo and int(mo) > 12:\n mo, da = da, mo\n\n if type(init_dict[key]) == dict:\n setattr(self, key, makeFMData(init_dict[key], locked=False)) # lock all substructures??\n elif type(init_dict[key]) == list:\n l = []\n for d in init_dict[key]:\n if type(d) == dict:\n l.append(makeFMData(d)) # lock ??\n else:\n l.append(d)\n setattr(self, key, l)\n elif date and time:\n setattr(self, key, DateTime(int(ye), int(mo), int(da), int(ho), int(mi), int(se)))\n elif date:\n setattr(self, key, Date(int(ye), int(mo), int(da)))\n elif time:\n setattr(self, key, Time(int(ho), int(mi), int(se)))\n else:\n setattr(self, key, init_dict[key])\n if locked:\n self.__modified__.add('__locked__')\n\n def __setattr__(self, key, value):\n if '__locked__' in self.__modified__:\n raise AttributeError(\"This substructure is read-only, so you cannot modify '%s' attribute.\" % key)\n oldvalue = None\n if hasattr(self, key):\n oldvalue = getattr(self, key)\n # if oldvalue != None and type(oldvalue) != type(value):\n #\t raise TypeError, \"Type of field '%s' is %s, you cannot insert %s\" % (key, type(oldvalue), type(value))\n object.__setattr__(self, key, value)\n if oldvalue != None and value != oldvalue:\n self.__modified__.add(key)\n\n def __getitem__(self, key):\n if type(key) == str or type(key) == str:\n spl = key.split('.')\n else:\n print(\"-\" * 20, key, type(key))\n if len(spl) == 2:\n if spl[0] in self.__old2new__:\n spl[0] = self.__old2new__[spl[0]]\n if spl[1] in self.__old2new__:\n spl[1] = self.__old2new__[spl[1]]\n return getattr(getattr(self, spl[0]), spl[1])\n if key in self.__old2new__:\n key = self.__old2new__[key]\n return getattr(self, key)\n\n def __setitem__(self, key, value):\n spl = key.split('.')\n if len(spl) == 2:\n if spl[0] in self.__old2new__:\n spl[0] = self.__old2new__[spl[0]]\n if spl[1] in self.__old2new__:\n spl[1] = self.__old2new__[spl[1]]\n return setattr(getattr(self, spl[0]), spl[1], value)\n if key in self.__old2new__:\n key = self.__old2new__[key]\n return setattr(self, key, value)\n\n def __str__(self):\n return object.__repr__(self)\n\n def __iter__(self):\n l = []\n for key in self.__slots__:\n if hasattr(getattr(self, key), '__slots__'):\n for subkey in getattr(self, key).__slots__:\n l.append(\"%s.%s\" % (key, subkey))\n else:\n l.append(key)\n l.sort()\n for x in l:\n yield x\n\n def _modified(self):\n \"\"\"Returns tuple (key, value) for modified keys inside of FMData tree (recursive without lists)\"\"\"\n l = []\n for key in self.__slots__:\n if hasattr(getattr(self, key), '__modified__'):\n for subkey, value in getattr(self, key)._modified():\n yield (\"%s.%s\" % (key, subkey), value)\n else:\n if key in self.__modified__:\n yield (key, getattr(self, key))\n\n def __repr__(self):\n # from pformat import pformat\n # return \"<%s instance with %s records>\\n%s\" % (str(self.__class__), len(self.__slots__), pformat(dict([(value, getattr(self, value)) for value in self.__slots__])))\n # return pformat(dict([(value, getattr(self, value)) for value in self.__slots__]))\n l = []\n for key in self.__slots__:\n ukey = \"\"\n if key in self.__new2old__:\n ukey = \" (%s)\" % self.__new2old__[key]\n if hasattr(getattr(self, key), '__slots__'):\n for subkey in getattr(self, key).__slots__:\n value = getattr(getattr(self, key), subkey)\n if type(value) == str:\n value = value.decode('utf-8')\n l.append(\"%s.%s = '%s'\" % (key, subkey, value))\n elif type(getattr(self, key)) == list:\n l.append(\"%s%s = <list with %s records>\" % (key, ukey, len(getattr(self, key))))\n elif type(getattr(self, key)) == str:\n l.append(\"%s%s = '%s'\" % (key, ukey, getattr(self, key).decode('utf-8')))\n else:\n l.append(\"%s%s = '%s'\" % (key, ukey, getattr(self, key)))\n l.sort()\n return str(('\\n'.join(l)).encode('utf-8'))\n\n def get(self, key, default=None):\n try:\n return self.__getitem__(key)\n except AttributeError:\n return default\n\n return FMData(locked)", "def _unbox_data_structure(struct):\n\n if isinstance(struct, list):\n return [ _unbox_data_structure(item) for item in struct ]\n elif isinstance(struct, graphlab.data_structures.sframe.SFrame):\n return list(struct.pack_columns(column_prefix='', dtype=dict)['X1'])\n elif isinstance(struct, graphlab.data_structures.sframe.SArray):\n return list(struct)\n elif isinstance(struct, graphlab.data_structures.sgraph.SGraph):\n #TODO implement for SGraph\n raise NotImplementedError('SGraph not supported.')\n #encode_value will truncate and json-ify lists/dicts. Need to treat them differently\n elif isinstance(struct, dict):\n return { k: _unbox_data_structure(v) for (k, v) in six.iteritems(struct) }\n else:\n return _encode_value(struct)", "def __init__(self):\n self._data = PositionalList()", "def __init__(self):\n self._data = PositionalList()", "def reconstruct(self, X):", "def reconstruct(self, X):", "def __iter__(self):\n return self._data_dict.__iter__()", "def _build_impl(self):", "def to_datastruct(self,to_caller=False):\n ds = self.to_datastruct_internal()\n if to_caller:\n # don't send NULLs\n ds = self.remove_nulls(ds)\n return ds", "def __init__(self):\n # Assign -1 to conform to the specification of the typing hint.\n # Since the 'key' will not be called and used before it is assigned,\n # the assignment here will not have a bad effect.\n self.key = -1 # type: keyType\n # The dictionary support the different values with the same key.\n self.values = [] # type: List[valueType]", "def __call__(self):\n raise NotImplementedError", "def _decode_struct(\n data_type, obj, alias_validators, strict, old_style, for_msgpack):\n if obj is None and data_type.has_default():\n return data_type.get_default()\n elif not isinstance(obj, dict):\n raise bv.ValidationError('expected object, got %s' %\n bv.generic_type_name(obj))\n if strict:\n for key in obj:\n if (key not in data_type.definition._all_field_names_ and\n not key.startswith('.tag')):\n raise bv.ValidationError(\"unknown field '%s'\" % key)\n ins = data_type.definition()\n _decode_struct_fields(\n ins, data_type.definition._all_fields_, obj, alias_validators, strict,\n old_style, for_msgpack)\n # Check that all required fields have been set.\n data_type.validate_fields_only(ins)\n return ins", "def __getitem__(self, *args):\n return self.data.__getitem__(*args)", "def __init__(self):\r\n self.data = PositionalList()", "def __init__(self):\n self.currSyms = {}\n Traversable.__init__(self)", "def __getitem__(self, key):\n raise NotImplementedError()", "def __init__(self, data: Iterable=None, key=lambda x: x):\n\n if not hasattr(key, \"__call__\"):\n raise ValueError(\"{key} must be a callable object\")\n\n self.__root = None\n self.__size = 0\n self.__key = key\n\n if hasattr(data, \"__iter__\"):\n for element in data:\n self.add(element)\n elif data is not None:\n raise TypeError(\"Data must be an iterable object.\")", "def Value(self) -> object:", "def Value(self) -> object:", "def __init__(self, skipkeys=False, ensure_ascii=True,\r\n check_circular=True, allow_nan=True, sort_keys=False,\r\n indent=None, separators=None, encoding='utf-8', default=None,\r\n use_decimal=True, namedtuple_as_object=True,\r\n tuple_as_array=True, bigint_as_string=False,\r\n item_sort_key=None):\r\n\r\n self.skipkeys = skipkeys\r\n self.ensure_ascii = ensure_ascii\r\n self.check_circular = check_circular\r\n self.allow_nan = allow_nan\r\n self.sort_keys = sort_keys\r\n self.use_decimal = use_decimal\r\n self.namedtuple_as_object = namedtuple_as_object\r\n self.tuple_as_array = tuple_as_array\r\n self.bigint_as_string = bigint_as_string\r\n self.item_sort_key = item_sort_key\r\n if indent is not None and not isinstance(indent, string_types):\r\n indent = indent * ' '\r\n self.indent = indent\r\n if separators is not None:\r\n self.item_separator, self.key_separator = separators\r\n elif indent is not None:\r\n self.item_separator = ','\r\n if default is not None:\r\n self.default = default\r\n self.encoding = encoding", "def __getitem__(self, _):\n raise NotImplementedError()", "def _decode_union_old(data_type, obj, alias_validators, strict, for_msgpack):\n val = None\n if isinstance(obj, six.string_types):\n # Union member has no associated value\n tag = obj\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if not isinstance(val_data_type, (bv.Void, bv.Nullable)):\n raise bv.ValidationError(\n \"expected object for '%s', got symbol\" % tag)\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n elif isinstance(obj, dict):\n # Union member has value\n if len(obj) != 1:\n raise bv.ValidationError('expected 1 key, got %s' % len(obj))\n tag = list(obj)[0]\n raw_val = obj[tag]\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if isinstance(val_data_type, bv.Nullable) and raw_val is None:\n val = None\n elif isinstance(val_data_type, bv.Void):\n if raw_val is None or not strict:\n # If raw_val is None, then this is the more verbose\n # representation of a void union member. If raw_val isn't\n # None, then maybe the spec has changed, so check if we're\n # in strict mode.\n val = None\n else:\n raise bv.ValidationError('expected null, got %s' %\n bv.generic_type_name(raw_val))\n else:\n try:\n val = _json_compat_obj_decode_helper(\n val_data_type, raw_val, alias_validators, strict, True,\n for_msgpack)\n except bv.ValidationError as e:\n e.add_parent(tag)\n raise\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n else:\n raise bv.ValidationError(\"expected string or object, got %s\" %\n bv.generic_type_name(obj))\n return data_type.definition(tag, val)", "def __init__(self):\n self._data = [] # non-public underlying Python list as storage", "def _create_impl(self):", "def GetStructuredData(self): # real signature unknown; restored from __doc__\n pass", "def __init__(self):\n self.data = []\n self.idx = {}", "def __init__(self):\n self.d = defaultdict(list)", "def __init__(self):\n self.d = collections.defaultdict(list)", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def _build_impl_impl(self, input):", "def bound_data_with_bug_19611_patch(original_function, self, data, initial):\n return initial", "def __array_interface__(self):\n ...", "def __array_interface__(self):\n ...", "def __getitem__(self, t: Tuple[int, ...]) -> 'Tree':\n ...", "def __dict__(self):\r\n return", "def Item(self) -> object:", "def Item(self) -> object:", "def _to_be_wrapped(self) -> None:", "def __init__(self):\n self.key_dict = {}\n self.value_dict = {}\n self.head, self.last = None, None", "def GetDataAsObject(self):", "def __getitem__(self, key):", "def _fill_cdata(cls):\n\n funcs = {}\n for key, name in [(\"b\", \"char\"), (\"h\", \"short\"),\n (\"i\", \"int\"), (\"q\", \"longlong\")]:\n for echar, esuffix in [(\"<\", \"le\"), (\">\", \"be\")]:\n esuffix = \"_\" + esuffix\n for unsigned in [True, False]:\n s = struct.Struct(echar + (key.upper() if unsigned else key))\n get_wrapper = lambda f: lambda *a, **k: f(*a, **k)[0]\n unpack = get_wrapper(s.unpack)\n unpack_from = get_wrapper(s.unpack_from)\n\n def get_unpack_from(s):\n def unpack_from(data, offset=0):\n return s.unpack_from(data, offset)[0], offset + s.size\n return unpack_from\n\n unpack_from = get_unpack_from(s)\n pack = s.pack\n\n prefix = \"u\" if unsigned else \"\"\n if s.size == 1:\n esuffix = \"\"\n bits = str(s.size * 8)\n\n if unsigned:\n max_ = 2 ** (s.size * 8) - 1\n min_ = 0\n else:\n max_ = 2 ** (s.size * 8 - 1) - 1\n min_ = - 2 ** (s.size * 8 - 1)\n\n funcs[\"%s%s_min\" % (prefix, name)] = min_\n funcs[\"%s%s_max\" % (prefix, name)] = max_\n funcs[\"%sint%s_min\" % (prefix, bits)] = min_\n funcs[\"%sint%s_max\" % (prefix, bits)] = max_\n\n funcs[\"%s%s%s\" % (prefix, name, esuffix)] = unpack\n funcs[\"%sint%s%s\" % (prefix, bits, esuffix)] = unpack\n funcs[\"%s%s%s_from\" % (prefix, name, esuffix)] = unpack_from\n funcs[\"%sint%s%s_from\" % (prefix, bits, esuffix)] = unpack_from\n funcs[\"to_%s%s%s\" % (prefix, name, esuffix)] = pack\n funcs[\"to_%sint%s%s\" % (prefix, bits, esuffix)] = pack\n\n for key, func in iteritems(funcs):\n setattr(cls, key, staticmethod(func))", "def _get_data(self):\n raise NotImplementedError()", "def __init__(self):\n self.structure = {}", "def __init__(self):\n self.data = SortedList()", "def data_for_sorting() -> NoReturn:\n raise NotImplementedError", "def data_for_sorting() -> NoReturn:\n raise NotImplementedError", "def __call__(self):\n raise NotImplementedError()", "def dataproduct() -> None:\n pass", "def __getitem__(self, item):\r\n\r\n return self.data.__getitem__(item)", "def test_method_in_list_or_dict(self):\r\n m1=Module()\r\n x=T.dscalar()\r\n m1.x=T.dscalar()\r\n m1.y=Method(x,x*2)\r\n m1.z=Method([],m1.x*2)\r\n m1.ly=[Method(x,x*2)]\r\n m1.lz=[Method([],m1.x*2)]\r\n m1.ty=(Method(x,x*2),)\r\n m1.tz=(Method([],m1.x*2),)\r\n m1.dy={'y':Method(x,x*2)}\r\n m1.dz={'z':Method([],m1.x*2)}\r\n m1.lly=[[Method(x,x*2)]]\r\n m1.llz=[[Method([],m1.x*2)]]\r\n m1.lty=[(Method(x,x*2),)]\r\n m1.ltz=[(Method([],m1.x*2),)]\r\n m1.ldy=[{'y':Method(x,x*2)}]\r\n m1.ldz=[{'z':Method([],m1.x*2)}]\r\n m1.tly=([Method(x,x*2)],)\r\n m1.tlz=([Method([],m1.x*2)],)\r\n m1.tty=((Method(x,x*2),),)\r\n m1.ttz=((Method([],m1.x*2),),)\r\n m1.tdy=({'y':Method(x,x*2)},)\r\n m1.tdz=({'z':Method([],m1.x*2)},)\r\n m1.dly={'y':[Method(x,x*2)]}\r\n m1.dlz={'z':[Method([],m1.x*2)]}\r\n m1.dty={'y':(Method(x,x*2),)}\r\n m1.dtz={'z':(Method([],m1.x*2),)}\r\n m1.ddy={'y':{'y':Method(x,x*2)}}\r\n m1.ddz={'z':{'z':Method([],m1.x*2)}}\r\n\r\n inst=m1.make()\r\n inst.x=1\r\n assert inst.y(2)==4\r\n assert inst.z()==2\r\n assert inst.ly[0](2)==4\r\n assert inst.lz[0]()==2\r\n assert inst.ty[0](2)==4\r\n assert inst.tz[0]()==2\r\n assert inst.dy['y'](2)==4\r\n assert inst.dz['z']()==2\r\n for f in inst.lly[0][0], inst.lty[0][0], inst.ldy[0]['y'], inst.tly[0][0], inst.tty[0][0], inst.tdy[0]['y'], inst.dly['y'][0], inst.dty['y'][0], inst.ddy['y']['y']:\r\n assert f(2)==4\r\n for f in inst.llz[0][0], inst.ltz[0][0], inst.ldz[0]['z'], inst.tlz[0][0], inst.ttz[0][0], inst.tdz[0]['z'], inst.dlz['z'][0], inst.dtz['z'][0], inst.ddz['z']['z']:\r\n assert f()==2\r\n\r\n assert isinstance(inst.z,theano.compile.function_module.Function)\r\n assert isinstance(inst.y,theano.compile.function_module.Function)\r\n for f in inst.ly,inst.lz,inst.ty,inst.tz:\r\n assert isinstance(f[0],theano.compile.function_module.Function)\r\n for f in inst.lly,inst.llz,inst.lty,inst.ltz,inst.tly,inst.tlz,inst.tty,inst.ttz:\r\n assert isinstance(f[0][0],theano.compile.function_module.Function)\r\n for f in inst.dly['y'][0],inst.dty['y'][0], inst.dlz['z'][0],inst.dtz['z'][0], inst.ddy['y']['y'], inst.ddz['z']['z']:\r\n assert isinstance(f,theano.compile.function_module.Function)", "def _decode_struct_tree(data_type, obj, alias_validators, strict, for_msgpack):\n subtype = _determine_struct_tree_subtype(data_type, obj, strict)\n return _decode_struct(\n subtype, obj, alias_validators, strict, False, for_msgpack)", "def _get(self) -> T:\n ...", "def test_data_object_vaporise(self):\n pass", "def __init__(self):\n self.idx = None\n self.val = None\n self.left = None\n self.right = None", "def __init__(self):\n self.d = {}\n self.l = []", "def deserialize(self, data):\n return NotImplementedError", "def _json_compat_obj_decode_helper(\n data_type, obj, alias_validators, strict, old_style, for_msgpack):\n if isinstance(data_type, bv.StructTree):\n return _decode_struct_tree(\n data_type, obj, alias_validators, strict, for_msgpack)\n elif isinstance(data_type, bv.Struct):\n return _decode_struct(\n data_type, obj, alias_validators, strict, old_style, for_msgpack)\n elif isinstance(data_type, bv.Union):\n if old_style:\n return _decode_union_old(\n data_type, obj, alias_validators, strict, for_msgpack)\n else:\n return _decode_union(\n data_type, obj, alias_validators, strict, for_msgpack)\n elif isinstance(data_type, bv.List):\n return _decode_list(\n data_type, obj, alias_validators, strict, old_style, for_msgpack)\n elif isinstance(data_type, bv.Nullable):\n return _decode_nullable(\n data_type, obj, alias_validators, strict, old_style, for_msgpack)\n elif isinstance(data_type, bv.Primitive):\n # Set validate to false because validation will be done by the\n # containing struct or union when the field is assigned.\n return _make_babel_friendly(\n data_type, obj, alias_validators, strict, False, for_msgpack)\n else:\n raise AssertionError('Cannot handle type %r.' % data_type)", "def _build_iterable(self):", "def __init__(self):\n self.keys = []\n self.values = []", "def __call__(self) -> None:", "def _dd():\n return defaultdict(_dd)", "def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None", "def f_get(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def test_translate_struct_dict_unique_key(self):\n root = netapp_api.NaElement('root')\n child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}\n root.translate_struct(child)\n self.assertEqual(3, len(root.get_children()))\n self.assertEqual('v1', root.get_child_content('e1'))\n self.assertEqual('v2', root.get_child_content('e2'))\n self.assertEqual('v3', root.get_child_content('e3'))", "def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n # added stuff below", "def __getitem__(self, i: int) -> 'Tree':\n ...", "def __init__(self):\n self._data = PositionalList() # list of _Item instances", "def __init__(self):\n # set of lists of nodal differences\n self.di = {}\n # self of upper boudns\n self.i = {}", "def __getitem__(self, item):", "def __init__(self):\n self.l = {}\n self.s = {}", "def __init__(self):\n\n defaultdict.__init__(self, MultiDict)", "def __index__(self, ???):", "def __setitem__(name, other):", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def __setitem__(self, *args, **kwargs): # real signature unknown\n pass", "def to_legacy(self) -> object:\n pass", "def test_jsonify_decode(self):\n\n Point = namedtuple('Point', ['x', 'y'], False)\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('foo_id')\n\n foo_id = IDField('_id')\n str_field = StringField(default='this is default')\n date_field = DateField()\n int_field = IntField()\n bool_field = BoolField()\n list_field = ListField()\n tuple_field = TupleField(np=Point)\n\n json_str = '''{\n \"__class__\": \"Foo\",\n \"foo_id\": \"1234\",\n \"str_field\": \"anything\",\n \"int_field\": 123,\n \"date_field\": \"2014-12-13\",\n \"bool_field\": false,\n \"tuple_field\":{\n \"x\": 1,\n \"y\": 2\n }\n }'''\n foo = Foo.from_jsonify(json.loads(json_str))\n\n self.assertEqual(foo.foo_id, '1234')\n self.assertEqual(foo.int_field, 123)\n self.assertEqual(foo.bool_field, False)\n self.assertEqual(foo.date_field, datetime.date(2014, 12, 13))\n Point = namedtuple('Point', ['x', 'y'], False)\n self.assertEqual(foo.tuple_field, Point(x=1, y=2))", "def __iter__(self):\n return self.data_container.__iter__()", "def __init__(self):\n self.key2value = {}\n self.key2time = {}", "def test_translate_struct_dict_unique_key(self):\n root = netapp_api.NaElement('root')\n child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}\n root.translate_struct(child)\n self.assertEqual(len(root.get_children()), 3)\n self.assertEqual(root.get_child_content('e1'), 'v1')\n self.assertEqual(root.get_child_content('e2'), 'v2')\n self.assertEqual(root.get_child_content('e3'), 'v3')", "def data(self):\r\n raise NotImplementedError", "def normalize(self: T) -> T:", "def __init__(self):\n self._dict = {}\n self._array = []", "def __getitem__(self, key):\n raise NotImplementedError()", "def _transform_data(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def __init__ (self, structref, byteOrder = \"default\"):\n\t\tself.__dict__[\"structref\"] = structref\n\t\tself.__dict__[\"struct\"] = self.byteOrder [byteOrder]\n\t\t\n\t\t# Built structure used by the struct module\n\t\tfor i in self.structref:\n\t\t\ttry:\n\t\t\t\tsize = i[self.SIZE]\n\t\t\texcept IndexError:\n\t\t\t\tsize = 1\n\t\t\t\t\n\t\t\tif size in (1, None):\n\t\t\t\tself.struct = self.struct + self.format [i[self.TYPE]]\n\t\t\telse:\n\t\t\t\tself.struct = self.struct + \"%d\"%size + self.format [i[self.TYPE]]\n\t\t\t\t\n\t\t# Built structure values\n\t\tself.__dict__[\"value\"] = {}\n\t\tfor i in self.structref:\n\t\t\ttry:\n\t\t\t\tself.value[i[self.NAME]] = i[self.VALUE]\n\t\t\texcept IndexError:\n\t\t\t\tif i[self.TYPE] in (\"char[]\", \"string\"):\n\t\t\t\t\tself.value[i[self.NAME]] = \"\\0\"\n\t\t\t\telse:\n\t\t\t\t\tself.value[i[self.NAME]] = 0", "def __init__(self):\n self.root = [None, dict(), False] # val, sons, end-able", "def modify_struct(self, struct, is_full_struct):\n return struct", "def __call__(value):", "def _spark_struct_field(self) -> StructField:", "def elems(self):" ]
[ "0.5697675", "0.54845077", "0.5461735", "0.54345965", "0.5431139", "0.54021233", "0.5388815", "0.5388815", "0.53862596", "0.53862596", "0.5333713", "0.5331575", "0.53289396", "0.5323391", "0.5321574", "0.53174174", "0.5310203", "0.53006154", "0.52964413", "0.5296287", "0.52754027", "0.527424", "0.527424", "0.5267994", "0.52587223", "0.52531326", "0.52463317", "0.5246224", "0.5245738", "0.521439", "0.5212546", "0.5195389", "0.51692706", "0.51692706", "0.51692706", "0.51692706", "0.5168331", "0.51567364", "0.51321566", "0.51321566", "0.5130865", "0.5125323", "0.5114122", "0.5114122", "0.51139665", "0.5111725", "0.51088685", "0.51074076", "0.5099694", "0.5098688", "0.5092436", "0.50879276", "0.50863105", "0.50863105", "0.5079222", "0.5075784", "0.507255", "0.5067306", "0.5050247", "0.5028844", "0.5023854", "0.5023102", "0.501877", "0.50178754", "0.50173604", "0.50132924", "0.5006418", "0.5002114", "0.50006735", "0.49936122", "0.49889445", "0.49803278", "0.49799255", "0.49723", "0.49686232", "0.4964768", "0.49643508", "0.49634105", "0.49625355", "0.49610636", "0.49594334", "0.49581712", "0.49581712", "0.49581712", "0.49581712", "0.49545348", "0.4954424", "0.4948832", "0.49466878", "0.49436688", "0.49426925", "0.49422878", "0.4938825", "0.49378493", "0.49374744", "0.49336097", "0.49310446", "0.49252224", "0.49235088", "0.49222502", "0.492149" ]
0.0
-1
Returns the total number of bytes occupied by the filter object
def get_insternal_size(self): return ( sys.getsizeof(self.theta) + sys.getsizeof(self.num_buckets) + sys.getsizeof(self.k) + sys.getsizeof(self.fp_size) + sys.getsizeof(self.max_iter) + sys.getsizeof(self.bucket_size) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\n return sum(f.count for f in self.filters)", "def container_size(self):\n import cPickle\n import sys\n t = cPickle.dumps(self.filter_bitarray)\n return sys.getsizeof(t)", "def capacity(self):\n return sum(f.capacity for f in self.filters)", "def get_size(self):\n return (\n sys.getsizeof(self.children) +\n sys.getsizeof(self.parent) +\n sys.getsizeof(self.dataset_id) +\n sys.getsizeof(self.k) +\n self.filter.get_size()\n )", "def filtered_count(self) -> int:\n return self.__filtered_count", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def size(self):\n\t\treturn self._count", "def count(self):\n\t\treturn sum(read.copy for read in self.__iter__())", "def __len__(self):\n return self.flat_image.size", "def get_size(self):\n cum_size = 0\n for stream in self.__streams.values():\n cum_size += sys.getsizeof(stream)\n for trace in stream:\n cum_size += sys.getsizeof(trace)\n cum_size += sys.getsizeof(trace.stats)\n cum_size += sys.getsizeof(trace.stats.__dict__)\n cum_size += sys.getsizeof(trace.data)\n cum_size += trace.data.nbytes\n # Add one percent buffer just in case.\n return cum_size * 1.01", "def nbytes(self):\n\n return self.data.type.datasize", "def size(self) -> int:\n return sum(ob.size for ob in self.objects.ravel())", "def __len__(self):\n return sum(self.size_freqs.values())", "def size(self):\n return len(self.buffer)", "def __len__(self):\n return self._used - self._deleted", "def size(self):\n size = 0\n size += self.data.size * sys.getsizeof(self.data)\n return size / 1024.0 / 1024.0 / 1024.0", "def __len__(self) -> int:\n if self.preload:\n return len(self.data_ram)\n else:\n return len(self.data)", "def nbytes(self) -> int:\n return self._nbytes(False)", "def __len__(self):\n return len(self.bytes)", "def get_size(self) -> int:\n total_size = 0\n for entry in self.__entries:\n total_size += entry.get_size()\n return total_size", "def getLength(self):\n return self.count", "def get_length(self):\n return self.resource.get_size()", "def __len__(self):\n\n try:\n return len(self.counts)\n except SpectrumError:\n return len(self.cps)", "def __len__(self):\n return len(self.buffer)", "def __len__(self):\n return len(self.buffer)", "def __len__(self) -> int:\n return len(self.buffer)", "def get_size(self):\n return len(self.cache)", "def get_length(self):\n if self.opt.num_buckets > 1:\n return sum([len(bucket) for bucket in self.data])\n else:\n return len(self.data)", "def _size(self):\n return self._logicalSize", "def getLength(self):\n return len(self.entries)", "def count(self):\n return self.size()", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n if buf is not None:\n size += buf.size\n return size", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n size += buf.size\n return size", "def length(self):\n return self.count", "def size(self):\n return dict.__len__(self)", "def size(self) -> int:\n return len(self.event_buffer)", "def size(self) -> int:", "def size(self):\r\n return self.__length", "def total_length():\n return", "def size(self) -> int:\n return self._fock.size", "def getSize(self):\n return self.bf.memory()", "def size(self):\n\t\treturn len(self.cache)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def __len__(self):\n if self._buffer is not None:\n if self._header.value_type in b'ZBH':\n return len(self._buffer)\n else:\n return 1\n else:\n return 0", "def totalsize(self):\n return sum([sz for sz in self.iterate()])", "def size(self):\n return self.data.size", "def count(self):\r\n return self.data_array.size", "def size(self):\r\n return self.info().size", "def size(self) -> int:\n return self.stat().size", "def num_total_logical_bytes(self) -> str:\n return pulumi.get(self, \"num_total_logical_bytes\")", "def size(self):\n return self.__length", "def length(self):\n # TODO: Count number of key-value entries in each of the buckets\n return self.size\n # for bucket in self.buckets():", "def __len__(self):\n total_objs = 0\n\n if self._shelve is not None:\n total_objs += len(self._shelve)\n\n if self._dict is not None:\n total_objs += len(self._dict)\n\n return total_objs", "def total_length(self):\n raise NotImplementedError()", "def __len__(self):\n if not hasattr(self.limitedstream, \"limit\"):\n return 0\n return self.limitedstream.limit", "def total_length(self):\n return self.length", "def __len__(self):\r\n return numBits(self.n)", "def size(self):\n return self.__size", "def get_size(self):", "def digest_size(self):\n\n return self.__digest_size", "def __len__(self):\n return len(self.raw)", "def size(self):\n return self._N", "def nbytes(self):\n # Equivalent to self.itemsize * self.size\n return self.initial_value.nbytes", "def size(self):\n return len(self.cache)", "def size(self):", "def __len__(self):\n return sum(l for l, op,in self.items() \\\n if op in Cigar.read_consuming_ops)", "def size(self) -> int:\r\n return self.da.length()", "def __len__(self):\n return self._count", "def __len__(self):\n return self._count", "def _nbytes(self, deep: bool = False) -> int:\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result", "def size(self):\n return self._size", "def size(self) -> int:\n size = self.da.length()\n return size", "def __len__(self):\n\n return len(self.data) * 8", "def __len__(self):\n return self._count()", "def __len__(self) -> int:\n return self.disp_size ** 2", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def size(self):\r\n return self._size", "def __len__(self) -> int:\n if self.serialize_data:\n return len(self.data_address)\n else:\n return len(self.data_infos)", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def get_size(self):\n return len(self.get_payload()) + 4", "def size(self, index):\n return self.d1.size(index)\n # FILTER BASED ON D1", "def size(self):\n pass", "def size(self):\n pass", "def size(self):\n pass", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def size(self):\n return self._length", "def size(self):\n return len(self._data)", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def getSize(self):\r\n list = self.getList()\r\n return len(list)", "def __len__(self):\n return self.total", "def nbytes(self):\n dtype = self.config[\"dtype\"]\n if dtype is None:\n return None\n\n size = reduce(mul, self.shape, 1)\n nbytes = size * dtype.itemsize\n\n if getattr(self, \"masked\", True):\n nbytes += size\n\n return nbytes", "def __len__(self):\n return self.size_", "def count(self):\n # TODO not implemented yet\n return 0" ]
[ "0.78136486", "0.73444617", "0.73030394", "0.7215649", "0.6866789", "0.6819548", "0.67372525", "0.6728737", "0.67182195", "0.66980165", "0.6695262", "0.6689676", "0.6685823", "0.6674033", "0.6666413", "0.665279", "0.6636472", "0.6635328", "0.6619961", "0.6601492", "0.6596799", "0.6581194", "0.65761584", "0.6561601", "0.6561601", "0.6557488", "0.6547242", "0.65399474", "0.6535526", "0.65326524", "0.65265626", "0.6524807", "0.6510062", "0.64954704", "0.64937633", "0.64929605", "0.6487915", "0.6486541", "0.64846236", "0.6480925", "0.6478867", "0.6478617", "0.64784503", "0.64784503", "0.64784503", "0.64784503", "0.64784503", "0.64730495", "0.6463918", "0.64622474", "0.6461862", "0.64616567", "0.6461088", "0.6460886", "0.64592046", "0.6452885", "0.6452547", "0.6447709", "0.6437205", "0.64313656", "0.64219713", "0.6421755", "0.6419912", "0.64182186", "0.64110357", "0.6410568", "0.6401633", "0.6397821", "0.6397117", "0.6392775", "0.6389255", "0.63881624", "0.63881624", "0.6386856", "0.6384977", "0.6382869", "0.63813907", "0.6381184", "0.6356752", "0.635604", "0.6355702", "0.6355098", "0.6353598", "0.6353598", "0.6353598", "0.6353598", "0.63509697", "0.63478905", "0.63449985", "0.63449985", "0.63449985", "0.6344236", "0.63417864", "0.63358635", "0.6334654", "0.6334654", "0.63318634", "0.6331693", "0.6321745", "0.632146", "0.6321391" ]
0.0
-1
Represents a single node of Cuckoo Tree.
def __init__(self, k, num_buckets, fp_size, bucket_size, max_iter): self.children: List[Node] = [] self.parent: Optional[Node] = None self.filter = CuckooFilterBit(num_buckets, fp_size, bucket_size, max_iter) self.dataset_id: Optional[str] = None self.k = k
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tree(self) -> Node:\n return Node(self.to_string())", "def node(self):\n return Node(self)", "def __repr__(self):\n return 'TreeNode({0})'.format(self.data)", "def __repr__(self):\n return 'Node({!r})'.format(self.data)", "def node(self):\n return self._node", "def node(self):\n return self._node", "def __init__(self):\n self.root = TreeNode(\"\")", "def __init__(self):\n self.root = TreeNode(\"\")", "def __init__(self, c):\n TreeNode.__init__(self)\n self.c = c", "def get_node(self):\n return self.__node", "def __init__(self):\n self.root = TreeNode(None)", "def __init__(self):\n self.root = TreeNode('#')", "def binary_tree():\n\n class Node(object):\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n\n # Create a root\n root = Node(data=1)\n root.left = Node(data=2)\n root.right = Node(data=3)\n root.left.left = Node(data=4)\n \"\"\" Structure\n 1 <-- root\n / \\\n 2 3 \n / \n 4\n \"\"\"", "def get_node(self):\r\n return self._node", "def __init__(self):\n self.root = Node('')", "def get_node():\n return TrieNode()", "def __init__(self):\n self.root = Node(\"\")", "def __init__(self):\n self.root = Node(\"\")", "def __repr__(self):\n return self.root_node", "def __init__(self):\n self.root = Node(None)", "def __init__(self):\n self.root = Node()", "def __init__(self):\n self.root = Node()", "def __init__(self):\n self.root = Node()", "def __repr__(self):\n\n return f\"<Node {self.data}>\"", "def get_node(self):\n assert self._node_id is not None\n return self._get_info(self.EXPECTED)[self._node_id]", "def __str__(self):\n return self.root_node", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self):\n self.root = self.Node(None)", "def as_node(cls, obj):\n if isinstance(obj, cls):\n return obj\n elif is_string(obj):\n # Assume filepath.\n return FileNode(obj)\n elif obj is None:\n return obj\n else:\n raise TypeError(\"Don't know how to convert %s to Node instance.\" % obj)", "def __init__(self, tree_node=None):\n self.root = tree_node", "def __repr__(self):\n return '\\n~Node (' + str(self._val) + ') has ' + str(len(self._children)) + ' children: ' + str(sorted([val for val in self._children])) + '~'", "def __repr__(self):\n return 'BinaryNode({})'.format(repr(self.data))", "def __init__(self):\n self.root = self.Node()", "def __init__(self):\n self.root = SimpleNode()", "def create_node(self, data):\n node = RealNode(data, layer=self)\n self.append_node(node)\n return node", "def node_type(self):\n return self._node_type", "def __repr__(self):\n return f'PrefixTreeNode({self.character!r})'", "def __repr__(self):\n return 'BinarySearchTree({} nodes)'.format(self.size)", "def __init__(self):\n self.__root = Node()", "def node(cls):\n return relationship.many_to_one(cls, 'node')", "def node(cls):\n return relationship.many_to_one(cls, 'node')", "def __init__(self, value, parent = None):\n # initialize new node\n self.value = value\n self.parent = parent\n self.left = None\n self.right = None\n self.height = 1", "def getNode(self,ch=None):\n return TrieNode.TrieNode(ch)", "def get_node_type(self):\n return self.node_type", "def construct_tree():\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.right = TreeNode(8)\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(7)\n return root", "def __str__(self):\r\n T = Btree(2)\r\n T.root = Node(self.keys, [Node(child.keys, []) for child in self.children])\r\n return str(T)", "def __init__(self, root_value):\n self.root = self.TreeNode(value=root_value)", "def __init__(self, node_text=\"\", node_type=0, node_parent=None):\n self.node_text = node_text\n self.node_type = node_type\n self.node_parent = node_parent\n self.node_left = None\n self.node_right = None", "def tree(self):\r\n return self._tree", "def __init__(self):\n # use a Trie as a data structure\n self.root = Node()", "def __init__(self, node):\n self.node = node\n self.parent = None\n self.depth = None", "def __init__(self):\n self.root = TridNode()", "def _dummy_node(self) -> CFNode:\n node = CFNode()\n self._graph.add_node(node)\n return node", "def root_node(self):\n return self.process_tree", "def __repr__(self):\r\n node_rep = \"{} RBTreeNode(value = {}\".format(self.color, self.value)\r\n node_rep += \", left=RBTreeNode({})\".format(self.left.value) if self.left else \", left=NONE\"\r\n node_rep += \", right=RBTreeNode({})\".format(self.right.value) if self.right else \", right=NONE\"\r\n node_rep += \", parent=RBTreeNode({}))\".format(self.parent.value) if self.parent else \", parent=None)\"\r\n return node_rep", "def __repr__(self):\n return 'AVLNode({!r})'.format(self.data)", "def __str__(self):\n return 'BinarySearchTreeNode(' + str(self.element) + ')'", "def _bddnode(root, lo, hi):\n\t# print(\"_bddnode\")\n\tif lo is hi:\n\t\tnode = lo\n\telse:\n\t\tkey = (root, lo, hi)\n\t\ttry:\n\t\t\tnode = _NODES[key]\n\t\texcept KeyError:\n\t\t\tnode = _NODES[key] = BDDNode(*key)\n\treturn node", "def deserialize(self, data):\n if len(data) == 0:\n return None\n root = TreeNode(data[0])\n root.left = self.deserialize(data[1]) \n root.right = self.deserialize(data[2])\n return root", "def __init__(self):\n self.root = RadixTreeNode()\n self.root.key = \"\"\n self.size = 0", "def __init__(self, node: Node[T]) -> None:\n self.current = node", "def __repr__(self: 'StarTree') -> str:\n return 'StarTree({})'.format(repr(self.children[0]))", "def _gen_test_tree_3():\n tree = BinaryNode(5)\n tree.left = BinaryNode(1)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(3)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(8)\n tree.right.right = BinaryNode(9)\n return tree", "def __repr__(self: 'UnaryTree') -> str:\n return 'UnaryTree({}, {})'.format(\n repr(self.symbol), repr(self.children[0]))", "def __str__(self) -> str:\n return 'Node({})'.format(self.yaml_node)", "def tree(self):\n return self._tree", "def tree(self):\n return self._tree", "def tree(self):\n return self._tree", "def tree(self):\n return self._tree", "def build():\n r = TreeNode(1)\n r.left = TreeNode(2)\n r.left.left = TreeNode(4)\n r.left.right = TreeNode(5)\n\n r.right = TreeNode(3)\n\n return r\n return TreeNode(3)", "def __init__(self):\n self.root = TrieNode(None)", "def __init__(self):\n self.root = TrieNode(None)", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def __init__(self):\n self.root = TrieNode()", "def get_tree(self):\n return self.tree or None", "def to_model(self):\r\n node = Node.objects.get_or_create(\r\n name=self.name,\r\n description=self.description\r\n )[0]\r\n \r\n return node", "def node_data(self):\n return self.node_data_", "def TreeNode(parent, name, icon_name):\n node = QtGui.QTreeWidgetItem(parent)\n node.name = name\n node.setText(0, name)\n\n icon = Icon(icon_name)\n node.setIcon(0, icon)\n\n return node", "def get_node(self):\n split_data = self._data.split(',',1)\n if len(split_data) == 1:\n return self.node_class(split_data[0])\n else:\n return self.node_class(split_data[0], Parser(split_data[1],\n self.node_class))", "def __init__(self):\n\n\t\tself.root = None\n\t\tself.numNodes = 0", "def build():\n root = TreeNode(3)\n root.left = TreeNode(2)\n root.right = TreeNode(4)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(5)\n return root", "def __init__(self):\n self.left = None\n self.right = None\n self.depth = 0\n self.val = None\n self.id = None", "def createNode(self, name):\n return Node(name)", "def __init__(self, val=None):\n self.val = val\n self.parent = None\n if val is not None:\n self.left = BSTree()\n self.right = BSTree()\n else:\n self.left = None\n self.right = None", "def get_node(self, key: str) -> Node:", "def node(self, uid):\n\n raise NotImplementedError", "def __init__(self):\n self.root = TrieNode(\".\")", "def node(self, name):\r\n return self.nodes[name]" ]
[ "0.7352759", "0.7162796", "0.698228", "0.6661938", "0.65606713", "0.65606713", "0.65407693", "0.65407693", "0.64917505", "0.6490089", "0.6484594", "0.64840937", "0.64418876", "0.6436587", "0.6350738", "0.6347878", "0.6335503", "0.6335503", "0.63311744", "0.6306258", "0.62918115", "0.62918115", "0.62918115", "0.62830186", "0.6275369", "0.62670934", "0.6234995", "0.6234995", "0.62209135", "0.6214404", "0.62129927", "0.61709386", "0.61627513", "0.6150675", "0.6147801", "0.6135152", "0.60851216", "0.6082017", "0.6065797", "0.604724", "0.60408354", "0.60408354", "0.60264796", "0.6004263", "0.5976113", "0.5957672", "0.59526527", "0.5946217", "0.5933947", "0.59332967", "0.58947587", "0.5892963", "0.58898866", "0.58574826", "0.58364093", "0.5820109", "0.58144474", "0.58102936", "0.580593", "0.580505", "0.5775912", "0.5743547", "0.5738784", "0.57334644", "0.5706651", "0.5701904", "0.57015526", "0.57015526", "0.57015526", "0.57015526", "0.56865746", "0.5675075", "0.5675075", "0.56668425", "0.56668425", "0.56668425", "0.56668425", "0.56668425", "0.56668425", "0.56668425", "0.56668425", "0.56668425", "0.56668425", "0.56668425", "0.56668425", "0.56668425", "0.56668425", "0.5661989", "0.56609297", "0.565981", "0.56565565", "0.5655355", "0.5652433", "0.565062", "0.5646409", "0.5645924", "0.56415904", "0.5637249", "0.5630794", "0.56253", "0.56227213" ]
0.0
-1
"Hamming distance" score where lower is better
def score(self, dataset: List[Read]) -> int: kmers_in_common = 0 for read in dataset: for kmer in read.kmers(self.k): if self.filter.contains(kmer): kmers_in_common += 1 return self.filter.num_items_in_filter - kmers_in_common
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hamming_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n\n # Call the function to compute the distance\n return measure.get_raw_score(s1, s2)", "def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])", "def hamming_distance(a, b):\n return np.count_nonzero(a != b)", "def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance", "def hamming_distance(h1, h2):\n b1 = bitarray.bitarray()\n b1.frombytes(h1)\n b2 = bitarray.bitarray()\n b2.frombytes(h2)\n return bitarray.bitdiff(b1, b2)", "def hamming_dist(a_b, b_b):\n return sum(bin(a_b[n] ^ b_b[n]).count('1') for n in range(len(a_b)))", "def hamming_dist(seq1, seq2):\n diffs = 0\n length = 0\n for x, y in zip(str(seq1), str(seq2)):\n if x == '-' or y == '-':\n continue\n elif x != y:\n diffs += 1\n length += 1\n try:\n return float(diffs) / length\n except:\n return 0.5", "def hamming_distance(x1: np.ndarray, x2: np.ndarray) -> int:\n assert isinstance(x1, np.ndarray) and isinstance(x2, np.ndarray)\n return (x1 != x2).sum()", "def compute_hamming_distance(str1, str2):\n\n mismatches = 0\n len_strs = len(str1)\n for i in range(len_strs):\n if str1[i] != str2[i]:\n mismatches = mismatches + 1\n return mismatches", "def hamming_distance(s1, s2):\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming_distance(s1, s2):\n assert len(s1)==len(s2), \",\".join((s1, s2))\n s1 = np.array(s1.upper(), dtype=\"c\")\n s2 = np.array(s2.upper(), dtype=\"c\")\n return np.sum(s1 != s2)", "def hamming_dist(v1, v2):\r\n edits = (v1 != v2)\r\n return edits.sum()", "def hamming_distance(p, q):\n result = 0\n for x, y in zip(p, q):\n if x != y:\n result += 1\n return result + abs(len(p) - len(q))", "def hamming_distance(cs):\n d = 0.0\n end = len(cs) - 1\n for idx in range(end):\n s1 = cs[idx]\n s2 = cs[idx + 1]\n assert len(s1) == len(s2)\n s1_bits = ''.join('{:b}'.format(c).zfill(8) for c in s1)\n s2_bits = ''.join('{:b}'.format(c).zfill(8) for c in s2)\n d += sum(c1 != c2 for c1, c2 in zip(s1_bits, s2_bits))\n return d / end", "def hamming_distance(lhs,rhs):\n return len([(x,y) for x,y in zip(lhs,rhs) if x !=y])", "def hamming_sim(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity score.\n return measure.get_sim_score(s1, s2)", "def hamming_distance(input1, input2):\n if len(input1) != len(input2):\n raise ValueError('Length of input1 and input2 are not equal.')\n input1 = hex_decode(hex_encode(input1))\n input2 = hex_decode(hex_encode(input2))\n # the general strategy here is to xor the two strings together\n # and then just count the number of 1s in the output (i.e., where the\n # two strings differed).\n output = fixed_xor(input1, input2)\n distance = 0\n for byte in output:\n for i in range(8):\n bit_mask = 1 << i\n if (bit_mask & byte) == bit_mask:\n distance += 1\n return distance", "def minHamm(text,pattern):\r\n D=kmersfrequency(text,len(pattern))\r\n return (min([(HammingDistance(pattern,x)) for x in D.keys()]))", "def hamming_distance(v_est, v_true):\n assert(v_est.shape == v_true.shape)\n\n return 1 / len(v_est) * np.sum(v_est != v_true)", "def hamdist(inp):\n\treturn sum(c1 != c2 for c1, c2 in itertools.izip(inp[0],inp[1]))", "def __hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def hamming_distance(bytes_0: bytes, bytes_1: bytes) -> int:\n assert len(bytes_0) == len(bytes_1)\n return sum(sum(bits(byte_0 ^ byte_1)) for (byte_0, byte_1) in zip(bytes_0, bytes_1))", "def hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal lenght.\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))", "def hammingDist(x, y):\n hd = 0\n for ch1, ch2 in zip(x, y):\n if ch1 != ch2:\n hd += 1\n return hd", "def hamming_dist(bytes1, bytes2):\n if type(bytes1) == str:\n bytes1 = [ord(c) for c in str1]\n if type(bytes2) == str:\n bytes2 = [ord(c) for c in str2]\n bins = [bin(o1 ^ o2) for o1, o2 in zip(bytes1, bytes2)]\n return len([i for i in ''.join(bins) if i == '1'])", "def hamming_distance(a, b):\n assert len(a) == len(b)\n dist = sum(item_a != item_b for item_a, item_b in zip(a, b))\n return dist", "def hammingDistance(s1 = \"\", s2 = \"\"):\n # if len(s1) != len(s2):\n # raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(bool(ord(ch1) - ord(ch2)) for ch1, ch2 in zip(s1, s2))", "def get_hamming_distance(self, calc_bits):\n\n # Iterate through calculated bits and compare to received bits.\n # Store number of different bits in total_distance.\n total_distance = 0\n for idx, bit in enumerate(calc_bits):\n diff = abs(bit - self.received_bits[idx])\n total_distance += diff\n\n return total_distance", "def generalised_hamming_distance(a, b):\n if len(a) == len(b):\n return hamming_distance(a, b)\n if len(a) > len(b):\n dna = a\n kmer = b\n else:\n dna = b\n kmer = a\n k = len(kmer)\n\n dist = min([hamming_distance(kmer, kmer2) for kmer2 in kmers_from_dna(dna, k)])\n return dist", "def hamming_dist(gene_1, gene_2):\n ham_dist = 0\n for c1, c2 in zip(gene_1, gene_2):\n if c1 != c2:\n ham_dist += 1\n return ham_dist", "def hamming_distance(s1, s2):\n if len(s1) > len(s2):\n s2 = s2.ljust(len(s1))\n else:\n s1 = s1.ljust(len(s2))\n\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def chk_hamming(data):\n pass", "def hamming_distance(words: Iterator[str], vocabulary: Dict[str, int]):\n\n for word in words:\n distances = []\n suggestions = []\n vocab_list = list(vocabulary)\n for (i,vocab) in enumerate(vocab_list):\n if len(vocab) == len(word):\n distances.append(hamming(word, vocab))\n else:\n distances.append(120)\n \n idx = np.array(distances).argsort()[:5]\n \n for i in range(5):\n for j in range(i+1,5):\n if distances[idx[i]] == distances[idx[j]]:\n if vocabulary.get(vocab_list[idx[i]]) < vocabulary.get(vocab_list[idx[j]]):\n temp = idx[i] \n idx[i] = idx[j]\n idx[j] = temp \n\n for i in idx:\n suggestions.append(vocab_list[i])\n\n output(\"{misspelled}\\t{corrections}\".format(\n misspelled=word,\n corrections=\"\\t\".join(suggestions)\n )) # may cause IO bottleneck", "def __h2(self): # _manhattan_distance\n h2 = 0\n\n for i in range(self.board_size):\n for j in range(self.board_size):\n if self.arr[i][j] == 0:\n continue\n h2 += (abs(i-(self.arr[i][j]//self.board_size)) +\n abs(j-(self.arr[i][j] % self.board_size)))\n\n return h2", "def _heuristic(a, b):\n return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2", "def test_binary_hamming_distance_differentiability(self, inputs):\n preds, target = inputs\n self.run_differentiability_test(\n preds=preds,\n target=target,\n metric_module=BinaryHammingDistance,\n metric_functional=binary_hamming_distance,\n metric_args={\"threshold\": THRESHOLD},\n )", "def hamming_distance(str1, str2):\n\n # TODO: Write your solution here\n # Edge case check\n if len(str1) != len(str2):\n return None\n\n count = 0\n for index in range(len(str1)):\n if str1[index] != str2[index]:\n count += 1\n\n if count is 0:\n return None\n\n return count", "def HammingDistance(p, q):\r\n if len(p) != len(q):\r\n return -1\r\n dist = 0\r\n #zip(AB,CD) gives (('A','C'),('B','D'))\r\n for first, second in zip(p, q):\r\n if first != second:\r\n dist = dist + 1\r\n return dist", "def get_h_score(start, end):\n #uses a heuristic function\n #return 0 #used if you want Djikstras algorithm\n return (abs(end[0]-start[0])+abs(end[1]-start[1])) * 10", "def hamming_distance(s1, s2, hamming_distance = 3):\n\ts1 = str(s1)\n\ts2 = str(s1)\n\n\tif len(s1) != len(s2):\n\t\ts1 = replenish_int(s1, 6)\n\t\ts2 = replenish_int(s2, 6)\n\tdis = sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))\n\n\tif dis <= hamming_distance:\n\t\t'表示海明距离在 3 以内'\n\t\treturn True\n\telse:\n\t\treturn False", "def hamming(s1, s2):\n weight = abs(len(s1)-len(s2))\n if len(s1) < len(s2):\n s1, s2 = s2, s1\n for i in range(len(s2)):\n weight += not s1[i] == s2[i]\n return weight", "def manhatam_distance(self) -> int:\n raise NotImplementedError", "def HammingDistance(pattern1, pattern2):\n distance = 0\n if len(pattern1) == len(pattern2):\n for i in range(len(pattern1)):\n if pattern1[i]!=pattern2[i]:\n distance += 1\n return distance\n else:\n assert 0, \"Two patterns have different lengths.\"", "def lcs_hamming_only_matches(s1: str, s2: str, k: int, length: int, matches_lst: List):\n count = 0\n for i in range(0, len(s1) - length + 1):\n for j in range(0, len(s2) - length + 1):\n sub1 = s1[i: i + length]\n sub2 = s2[j: j + length]\n result = hamming_distance(sub1, sub2)\n if result <= k:\n matches_lst.append([i, j, result, sub1, sub2])\n count += 1\n # print(\"total matches: \" + str(count))\n return count", "def min_ham_dist(pattern, dna):\n dist = 0\n candidates = []\n for seq in dna:\n a, b = min_ham_dist_helper(pattern, seq)\n dist += a\n candidates.append(b)\n return dist, candidates", "def HammingDist(str1, str2):\n\tHdist = 0\n\tfor i, base in enumerate(str1):\n\t\tif base != str2[i]:\n\t\t\tHdist += 1\n\n\treturn Hdist", "def structural_hamming_distance(self,\n other,\n penalty_edge_mismatch_func=None):\n\n edges_1 = self.edges\n edges_2 = other.edges\n if penalty_edge_mismatch_func is None:\n penalty_edge_mismatch_func = GraphViaEdges.compute_penalty\n\n if set(edges_1.keys()) != set(edges_2.keys()):\n msg = 'The Structural Hamming Distances cannot be computed : the '\n msg += 'graphs cannot be compared.'\n raise GraphsCannotBeCompared(msg)\n\n shd = 0\n\n for key in edges_1.keys():\n\n shd += penalty_edge_mismatch_func(\n edge_1=edges_1[key],\n edge_2=edges_2[key]\n )\n\n return shd", "def calculate_weighted_hash(cls, word):\n\n hash_value = 0\n for char in word:\n hash_value += cls.alpha_lookup[char.lower()]\n return hash_value", "def _calculate_score(lsh, minhash, total_num_events):\n neighbours = lsh.query(minhash)\n return float(len(neighbours)) / float(total_num_events)", "def hamming_distance(string_a: str, string_b: str) -> int:\n if len(string_a) != len(string_b):\n raise ValueError(\n \"Strings are of unequal length can not compute hamming distance. Hamming distance is undefined.\"\n )\n return sum(char_1 != char_2 for char_1, char_2 in zip(string_a, string_b))", "def distance(self, x, y):\n\n return distance.hamming(x, y)", "def smith_waterman_score(src,dest,gap_cost=float(0.5),func=simCost_plus1_minus2):\n\tsrc_len = len(src)\n\tdest_len = len(dest)\n\tif src_len==0:\n\t\treturn dest_len\n\tif dest_len==0:\n\t\treturn src_len\n\n\td = numpy.zeros((src_len, dest_len), dtype=numpy.float)\n # pylint: enable=no-member\n \n\td[0][0]=max(0,-gap_cost,func.getCost(src,0,dest,0))\n \tmaxsofar = d[0][0]\n \tfor i in range(1,src_len):\n \t\td[i][0] = max(0,d[i-1][0]-gap_cost,func.getCost(src,i,dest,0))\n \t\tif maxsofar < d[i][0]:\n \t\t\tmaxsofar = d[i][0]\n \tfor j in range(1,dest_len):\n \t\td[0][j] = max(0,d[0][j-1]-gap_cost,func.getCost(src,0,dest,j))\n \t\tif maxsofar < d[0][j]:\n \t\t\tmaxsofar = d[0][j]\n \tfor i in range(1,src_len):\n \t\tfor j in range(1,dest_len):\n \t\t\tcost = func.getCost(src,i,dest,j)\n \t\t\td[i][j] = max(0,d[i-1][j]-gap_cost,d[i][j-1]-gap_cost,d[i-1][j-1]+cost)\n \t\t\tif maxsofar < d[i][j]:\n \t\t\t\tmaxsofar = d[i][j]\n\treturn maxsofar", "def distance(base_strand, comparison_strand):\n hamming_distance = 0\n\n for nucleotide in range(len(base_strand)):\n if base_strand[nucleotide] != comparison_strand[nucleotide]:\n hamming_distance += 1\n \n return hamming_distance", "def _compute_score(img_binary: np.ndarray, s: float) -> float:\n img_sheared = _shear_img(img_binary, s, 0)\n h = img_sheared.shape[0]\n\n img_sheared_mask = img_sheared > 0\n first_fg_px = np.argmax(img_sheared_mask, axis=0)\n last_fg_px = h - np.argmax(img_sheared_mask[::-1], axis=0)\n num_fg_px = np.sum(img_sheared_mask, axis=0)\n\n dist_fg_px = last_fg_px - first_fg_px\n col_mask = np.bitwise_and(num_fg_px > 0, dist_fg_px == num_fg_px)\n masked_dist_fg_px = dist_fg_px[col_mask]\n\n score = sum(masked_dist_fg_px ** 2)\n return score", "def hamming(a, b):\n len1 = len(a)\n len2 = len(b)\n overlap = min(len1, len2)\n difference = abs(len1 - len2)\n for x in range(overlap):\n if a[x] != b[x]:\n difference += 1\n\n return difference", "def calculate_manhattan_dist(state):", "def similarity_function(feature1, feature2):\n # 256 HOG, 18 HSV, 512 Encoder\n # weight color more if using the full vector\n if len(feature1) > 785:\n salient1 = feature1[256:256 + 18].copy() # be careful not to modify feature vector in place\n salient2 = feature2[256:256 + 18].copy()\n feature1 = feature1.copy()\n feature2 = feature2.copy()\n feature1[256:256 + 18] = salient1 * 10\n feature2[256:256 + 18] = salient2 * 10\n\n abs_distance = np.abs(feature1 - feature2)\n return np.sum(abs_distance)", "def hamdist(str1, str2):\n\n diffs = 0\n for ch1, ch2 in zip(str1, str2):\n if ch1 != ch2:\n diffs += 1\n return diffs", "def astar_heuristic(n1, n2):\n average_speed = 70\n return edge_weight(n1, n2, 70)", "def get_distance_hamming(self, vec):\r\n\r\n sum = 0\r\n if len(self.weights) == len(vec):\r\n return self.hamming(self.weights, vec)\r\n else:\r\n sys.exit(\"Error: dimension of nodes != input data dimension!\")", "def hamming_byte(bin1, bin2):\n\n diffs = 0\n xored = xor(bin1, bin2)\n for byte in xored:\n diffs += bin(byte).count(\"1\")\n return diffs", "def hausdorff_distance(self, other):\n ...", "def countingPointMutations(seq1, seq2):\n seqLength = len(list(seq1))\n \n hammingDistance=0;\n for i in range(0,seqLength):\n if list(seq1)[i]!=list(seq2)[i]:\n hammingDistance = hammingDistance+1;\n return hammingDistance", "def bhatt_distance(a, b):\n return -np.log(np.dot(b**.5, a**.5))", "def heuristic(self, a, b):\n return math.fabs(a[0] - b[0]) + math.fabs(a[1] - b[1])", "def hamming(s1, s2):\n assert len(s1) == len(s2)\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def heuristic(state, puzzle):\n h = 0\n for i in range(puzzle.dimension):\n for j in range(puzzle.dimension):\n # (0, 0) -> 1 as value, (0, 2) -> 3 as value, etc\n value = i * puzzle.dimension + j + 1\n if value == puzzle.dimension ** 2: # value is ' '\n value = ' '\n current_position = puzzle.get_coordinates(state, value)\n goal_position = (i, j)\n h += util.manhattanDistance(current_position, goal_position)\n h /= 2\n return h", "def ham_dist(p, q):\n count = 0\n for i in range(len(p)):\n if p[i] != q[i]:\n count += 1\n return count", "def ham_dist(p, q):\n count = 0\n for i in range(len(p)):\n if p[i] != q[i]:\n count += 1\n return count", "def hamming_weight(num):\n\n return bin(num).count(\"1\");", "def similarity_two_images_hog(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:\n hog_image1 = hog_of_image(img1)\n hog_image2 = hog_of_image(img2)\n\n max_difference = max(2 * sum_all_magnitudes(img1), 2 * sum_all_magnitudes(img2))\n return 100 - 100 * np.sum(np.absolute(hog_image1 - hog_image2)) / max_difference", "def fitness(ch,distance,shift):\n countryNo=len(ch)\n total = 0.\n for c in range(countryNo):\n total += distance[ch[c]][ch[(c+1)%countryNo]]\n if shift - total < 0:\n return 0\n else:\n return shift - total", "def hamming_distance(StringA,StringB):\n if len(StringA) != String(B):\n raise ValueError(\"The length of sequences are not equal!\")\n return sum(x !=y for (x,y) in zip(StringA,StringB))", "def _weight_hamming(r, l):\n w = 0.54 + 0.46*np.cos(2*pi*r/l)\n w[np.absolute(r)>l/2.]=0\n return w", "def min_ham_dist_helper(pattern, seq):\n k = len(pattern)\n dist = math.inf\n candidates = set()\n for i in range(len(seq)-k+1):\n compare = seq[i:i+k]\n ham = ham_dist(pattern, compare)\n if ham < dist:\n dist = ham\n candidates = set([compare])\n elif ham == dist:\n candidates.add(compare)\n return dist, list(candidates)", "def hamming_distance(string1: str, string2: str) -> int:\n if len(string1) != len(string2):\n raise ValueError(\"String lengths must match!\")\n\n count = 0\n\n for char1, char2 in zip(string1, string2):\n if char1 != char2:\n count += 1\n\n return count", "def hamming2(s1, s2):\n assert len(s1) == len(s2)\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def get_error_hamming_distributions_from_results(results: Sequence[Sequence[Sequence[int]]]) \\\n -> Sequence[Sequence[float]]:\n num_shots = len(results[0])\n n_bits = len(results[0][0]) - 1\n\n hamming_wt_distrs = []\n # loop over all binary strings of length n_bits\n for result, bits in zip(results, all_bitstrings(2 * n_bits)):\n # Input nums are written from (MSB .... LSB) = (a_n, ..., a_1, a_0)\n num_a = bit_array_to_int(bits[:n_bits])\n num_b = bit_array_to_int(bits[n_bits:])\n\n # add the numbers\n ans = num_a + num_b\n ans_bits = int_to_bit_array(ans, n_bits + 1)\n\n # record the fraction of shots that resulted in an error of the given weight\n hamming_wt_distr = [0. for _ in range(len(ans_bits) + 1)]\n for shot in result:\n # multiply relative hamming distance by the length of the output for the weight\n wt = len(ans_bits) * hamming(ans_bits, shot)\n hamming_wt_distr[int(wt)] += 1. / num_shots\n\n hamming_wt_distrs.append(hamming_wt_distr)\n\n return hamming_wt_distrs", "def count_accuracy(G_true, G):\n B_true = G_true != 0# nx.to_numpy_array(G_true) != 0\n B = G != 0# nx.to_numpy_array(G) != 0\n d = B.shape[0]\n # linear index of nonzeros\n pred = np.flatnonzero(B)\n cond = np.flatnonzero(B_true)\n cond_reversed = np.flatnonzero(B_true.T)\n cond_skeleton = np.concatenate([cond, cond_reversed])\n # true pos\n true_pos = np.intersect1d(pred, cond, assume_unique=True)\n\n # false pos\n false_pos = np.setdiff1d(pred, cond_skeleton, assume_unique=True)\n # reverse\n extra = np.setdiff1d(pred, cond, assume_unique=True)\n reverse = np.intersect1d(extra, cond_reversed, assume_unique=True)\n # compute ratio\n pred_size = len(pred)\n cond_neg_size = 0.5 * d * (d - 1) - len(cond)\n fdr = float(len(reverse) + len(false_pos)) / max(pred_size, 1)\n tpr = float(len(true_pos)) / max(len(cond), 1)\n fpr = float(len(reverse) + len(false_pos)) / max(cond_neg_size, 1)\n # structural hamming distance\n B_lower = np.tril(B + B.T)\n pred_lower = np.flatnonzero(B_lower)\n cond_lower = np.flatnonzero(np.tril(B_true + B_true.T))\n extra_lower = np.setdiff1d(pred_lower, cond_lower, assume_unique=True)\n missing_lower = np.setdiff1d(cond_lower, pred_lower, assume_unique=True)\n shd = len(extra_lower) + len(missing_lower) + len(reverse)\n return shd, tpr, fpr, fdr, pred_size", "def calc_flow(hand, bigrams):\n\n hand_flow_score = 0\n for b in bigrams:\n i = hand.index(b[0])\n j = hand.index(b[1])\n hand_flow_score += b[2] * flow[i][j]\n\n return hand_flow_score", "def heuristic(self):\n if self._dist < 0:\n self._dist = 0\n for pos, idx in enumerate(self.config):\n if idx != 0: # Skip blank\n self._dist += manhattan_dist(idx, pos, self.n)\n return self._dist", "def evaluate_hamming_loss(predict, truth):\n predict_max = predict.gt(0.5).long()\n\n batch_eq_num = torch.ne(predict_max, truth).long().sum().item()\n batch_num, label_num = predict_max.shape\n\n return batch_eq_num * 1.0 / (batch_num * label_num)", "def Hamming(data):\r\n N=float(data.shape[0])\r\n temp=np.zeros(data.shape[0])\r\n for u, i in enumerate(data):\r\n temp[u]=(0.54-0.46*np.cos(2*np.pi*(u/N)))*i\r\n return temp", "def manhattan_heuristic(state):\n man_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n else:\n man_h = man_h + abs(i - int(state[i][j]/3)) + abs(j - (state[i][j])%3)\n return man_h", "def get_mismatch_matrix(k,m):\n words = get_words(k)\n N = len(words)\n\n mismatch_matrix = np.zeros((N, N))\n for i in range(N):\n for j in range(i, N):\n if Levenshtein.hamming(words[i], words[j]) <= m:\n mismatch_matrix[i,j] = 1/2\n mismatch_matrix[j,i] = 1/2\n\n return mismatch_matrix", "def hammingLoss(y_test, predictions):\n hammingloss = 0.0\n for i in range(y_test.shape[0]):\n aux = 0.0\n for j in range(y_test.shape[1]):\n if int(y_test[i,j]) != int(predictions[i,j]):\n aux = aux+1.0\n aux = aux/y_test.shape[1]\n hammingloss = hammingloss + aux\n \n return hammingloss/y_test.shape[0]", "def hausdorff_distance(image1, image2):\n image1_int = image1.clone(\"unsigned int\")\n image2_int = image2.clone(\"unsigned int\")\n\n libfn = utils.get_lib_fn(\"hausdorffDistance%iD\" % image1_int.dimension)\n d = libfn(image1_int.pointer, image2_int.pointer)\n\n return d", "def _dist(a, b):\n return torch.pow(a - b, 2).sum(-1)", "def hellinger_distance(x, y):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n assert (np.all(x.sum(1) != 0.) and np.all(y.sum(1) != 0.))\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n x = np.sqrt(x)\n y = np.sqrt(y)\n # x (120, 40), y (100, 40), H(x,y) (120, 100)\n xx = np.tile(x, (y.shape[0], 1, 1)).transpose((1, 0, 2))\n yy = np.tile(y, (x.shape[0], 1, 1))\n xx_yy = xx - yy\n res = np.sqrt(np.sum(xx_yy ** 2, axis=-1))\n return np.float64((1. / np.sqrt(2)) * res)", "def calHash(n, m):\n return int(m*BloomFilter.ln2/n)", "def test_multilabel_hamming_distance_differentiability(self, inputs):\n preds, target = inputs\n self.run_differentiability_test(\n preds=preds,\n target=target,\n metric_module=MultilabelHammingDistance,\n metric_functional=multilabel_hamming_distance,\n metric_args={\"num_labels\": NUM_CLASSES, \"threshold\": THRESHOLD},\n )", "def compute_similarity(self, lhs_minhash, rhs_minhash):\n def compute_A(r):\n numerator = r * (1 - r)**((1 << self.b) - 1)\n denominator = 1 - (1 - r)**(1 << self.b)\n return numerator / denominator\n\n lhs_r = len(lhs_minhash) / self.d\n rhs_r = len(rhs_minhash) / self.d\n\n both_r = lhs_r + rhs_r\n lhs_r_ratio = lhs_r / both_r\n rhs_r_ratio = rhs_r / both_r\n\n lhs_A = compute_A(lhs_r)\n rhs_A = compute_A(rhs_r)\n\n C1 = lhs_A * rhs_r_ratio + rhs_A * lhs_r_ratio\n C2 = lhs_A * lhs_r_ratio + rhs_A * rhs_r_ratio\n\n xor = np.bitwise_xor(lhs_minhash, rhs_minhash)\n num_nonzero = np.count_nonzero(xor)\n E = (self.num_hashes - num_nonzero) / self.num_hashes\n similarity = (E - C1) / (1 - C2)\n return similarity if similarity > 0 else 0", "def minkowski(r, rating1, rating2):\r\n distance = 0\r\n commonRatings = False \r\n for key in rating1:\r\n if key in rating2:\r\n distance += math.pow((abs(rating1[key] - rating2[key])), r)\r\n commonRatings = True\r\n if commonRatings:\r\n distance = math.pow(distance, 1/r)\r\n return distance\r\n else:\r\n return -1 #Indicates no ratings in common\r", "def height_similarity(h1, h2, condition):\n if h1 and h2 and len(condition) > 0:\n if h2 < condition[0] or h2 > condition[1]:\n return 0.5\n gap = abs(h1 - h2)\n if 0 <= gap and gap < 10:\n return 0.8\n elif 10 <= gap and gap < 20:\n return 0.9\n else:\n return 1.0\n else:\n return 0.0", "def hamming(string1, string2):\n\n strlen1 = len(string1)\n strlen2 = len(string2)\n\n if strlen2 < strlen1:\n strlength = strlen2\n else:\n strlength = strlen1\n\n hamcount = 0\n for i in range(strlength):\n if string1[i] != string2[i]:\n hamcount += 1\n\n return(hamcount)", "def calc_heuristic(self, state):\n h = 0\n board = state.board.array\n\n for i in range(self._n):\n for j in range(self._n):\n\n if board[i][j] != space_rep:\n tile_as_number = board[i][j]\n correct_x = (tile_as_number - 1) // self._n\n correct_y = (tile_as_number - 1) % self._n\n else:\n continue\n h += calc_diffs(i, j, correct_x, correct_y)\n return h", "def manhattan(rating1, rating2):\r\n distance = 0\r\n commonRatings = False \r\n for key in rating1:\r\n if key in rating2:\r\n distance += abs(rating1[key] - rating2[key])\r\n commonRatings = True\r\n if commonRatings:\r\n return distance\r\n else:\r\n return -1 #Indicates no ratings in common\r", "def _local_improvement(self, folded_design):\n differing_sites = _string_difference_indices(\n self.target.dot_bracket, folded_design\n )\n hamming_distances = []\n for mutation in product(\"AGCU\", repeat=len(differing_sites)):\n mutated = self.design.get_mutated(mutation, differing_sites)\n folded_mutated, _ = fold(mutated.primary)\n hamming_distance = hamming(folded_mutated, self.target.dot_bracket)\n hamming_distances.append(hamming_distance)\n if hamming_distance == 0: # For better timing results\n return 0\n return min(hamming_distances)", "def all_match():\n S1=Spectrum.Spectrum()\n S1.add_peak(50.7,234)\n S1.add_peak(54.6,585)\n S1.add_peak(60.7,773)\n S1.add_peak(65.6,387)\n S1.add_peak(87.7,546)\n S1.add_peak(104.6,598)\n S1.pep_mass=100\n S1.euclidean_scale()\n\n S2=Spectrum.Spectrum()\n S2.add_peak(50.5,234/2)\n S2.add_peak(54.8,585/2)\n S2.add_peak(61.0,773/2)\n S2.add_peak(65.4,387/2)\n S2.add_peak(88.0,546/2)\n S2.add_peak(104.3,598/2)\n S2.pep_mass=100\n S2.euclidean_scale()\n\n score,peaks=similarity.cosine_score_max(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with greedy method\"\n assert math.isclose(score,1.0), \"Incorrect score with greedy method\"\n\n score,peaks=similarity.cosine_score_greedy(S1,S2)\n assert peaks==6, \"Incorrect number of peaks matched with maximum weighted method\"\n assert math.isclose(score,1.0), \"Incorrect score with maximum weighted method\"", "def hss(self):\n return 2 * (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / (\n (self.table[0, 0] + self.table[0, 1]) * (self.table[0, 1] + self.table[1, 1]) +\n (self.table[0, 0] + self.table[1, 0]) * (self.table[1, 0] + self.table[1, 1]))", "def distance_score(rows):\n # If there's only one word, everyone wins!\n if len(rows[0]) <= 2:\n return dict([(row[0], 1.0) for row in rows])\n\n # Initialize the dictionary with large values\n mindistance = dict([(row[0], 1000000) for row in rows])\n\n for row in rows:\n dist = sum([abs(row[i]-row[i-1]) for i in range(2, len(row))])\n # get the min distance for this url\n if dist < mindistance[row[0]]:\n mindistance[row[0]] = dist\n return normalize_scores(mindistance, small_is_better=1)" ]
[ "0.7202649", "0.71405065", "0.7085281", "0.7053139", "0.6996998", "0.6948701", "0.69338876", "0.69194096", "0.6818797", "0.6787892", "0.6770964", "0.6747005", "0.6712575", "0.6708986", "0.6693751", "0.6689196", "0.6669471", "0.665242", "0.66389847", "0.6632668", "0.6616107", "0.6600993", "0.6599551", "0.65896875", "0.65320724", "0.6522679", "0.6512693", "0.6500109", "0.6481368", "0.647651", "0.6471736", "0.6469627", "0.6464517", "0.6438192", "0.6355073", "0.63534516", "0.63233906", "0.63148415", "0.6314463", "0.62198895", "0.61963904", "0.6193078", "0.61290956", "0.6111136", "0.60989535", "0.60985434", "0.60441065", "0.603041", "0.6021563", "0.6008713", "0.6005037", "0.5976709", "0.5970646", "0.59638804", "0.59573174", "0.59389365", "0.59224606", "0.58791953", "0.58707565", "0.5852966", "0.5852394", "0.58370095", "0.58107996", "0.57984877", "0.5798346", "0.5779114", "0.5776339", "0.5773064", "0.5773064", "0.57712454", "0.57649076", "0.57584864", "0.5756729", "0.5720201", "0.57158315", "0.57090604", "0.5707919", "0.56980646", "0.5698025", "0.5669609", "0.56638277", "0.56599504", "0.56536454", "0.5631992", "0.5625227", "0.5617375", "0.5612019", "0.56110483", "0.5609639", "0.56084275", "0.5605002", "0.560424", "0.5591135", "0.55841506", "0.5577227", "0.55697215", "0.55696166", "0.555335", "0.55471474", "0.55436224", "0.55392337" ]
0.0
-1
Returns the total number of bytes occupied by the filter object
def get_size(self): return ( sys.getsizeof(self.children) + sys.getsizeof(self.parent) + sys.getsizeof(self.dataset_id) + sys.getsizeof(self.k) + self.filter.get_size() )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self):\n return sum(f.count for f in self.filters)", "def container_size(self):\n import cPickle\n import sys\n t = cPickle.dumps(self.filter_bitarray)\n return sys.getsizeof(t)", "def capacity(self):\n return sum(f.capacity for f in self.filters)", "def filtered_count(self) -> int:\n return self.__filtered_count", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def size(self):\n\t\treturn self._count", "def count(self):\n\t\treturn sum(read.copy for read in self.__iter__())", "def __len__(self):\n return self.flat_image.size", "def get_size(self):\n cum_size = 0\n for stream in self.__streams.values():\n cum_size += sys.getsizeof(stream)\n for trace in stream:\n cum_size += sys.getsizeof(trace)\n cum_size += sys.getsizeof(trace.stats)\n cum_size += sys.getsizeof(trace.stats.__dict__)\n cum_size += sys.getsizeof(trace.data)\n cum_size += trace.data.nbytes\n # Add one percent buffer just in case.\n return cum_size * 1.01", "def nbytes(self):\n\n return self.data.type.datasize", "def size(self) -> int:\n return sum(ob.size for ob in self.objects.ravel())", "def __len__(self):\n return sum(self.size_freqs.values())", "def size(self):\n return len(self.buffer)", "def __len__(self):\n return self._used - self._deleted", "def size(self):\n size = 0\n size += self.data.size * sys.getsizeof(self.data)\n return size / 1024.0 / 1024.0 / 1024.0", "def __len__(self) -> int:\n if self.preload:\n return len(self.data_ram)\n else:\n return len(self.data)", "def nbytes(self) -> int:\n return self._nbytes(False)", "def __len__(self):\n return len(self.bytes)", "def get_size(self) -> int:\n total_size = 0\n for entry in self.__entries:\n total_size += entry.get_size()\n return total_size", "def getLength(self):\n return self.count", "def get_length(self):\n return self.resource.get_size()", "def __len__(self):\n\n try:\n return len(self.counts)\n except SpectrumError:\n return len(self.cps)", "def __len__(self):\n return len(self.buffer)", "def __len__(self):\n return len(self.buffer)", "def __len__(self) -> int:\n return len(self.buffer)", "def get_size(self):\n return len(self.cache)", "def get_length(self):\n if self.opt.num_buckets > 1:\n return sum([len(bucket) for bucket in self.data])\n else:\n return len(self.data)", "def _size(self):\n return self._logicalSize", "def getLength(self):\n return len(self.entries)", "def count(self):\n return self.size()", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n if buf is not None:\n size += buf.size\n return size", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n size += buf.size\n return size", "def length(self):\n return self.count", "def size(self):\n return dict.__len__(self)", "def size(self) -> int:\n return len(self.event_buffer)", "def size(self) -> int:", "def size(self):\r\n return self.__length", "def total_length():\n return", "def size(self) -> int:\n return self._fock.size", "def getSize(self):\n return self.bf.memory()", "def size(self):\n\t\treturn len(self.cache)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def __len__(self):\n if self._buffer is not None:\n if self._header.value_type in b'ZBH':\n return len(self._buffer)\n else:\n return 1\n else:\n return 0", "def totalsize(self):\n return sum([sz for sz in self.iterate()])", "def size(self):\n return self.data.size", "def count(self):\r\n return self.data_array.size", "def size(self):\r\n return self.info().size", "def size(self) -> int:\n return self.stat().size", "def num_total_logical_bytes(self) -> str:\n return pulumi.get(self, \"num_total_logical_bytes\")", "def size(self):\n return self.__length", "def length(self):\n # TODO: Count number of key-value entries in each of the buckets\n return self.size\n # for bucket in self.buckets():", "def __len__(self):\n total_objs = 0\n\n if self._shelve is not None:\n total_objs += len(self._shelve)\n\n if self._dict is not None:\n total_objs += len(self._dict)\n\n return total_objs", "def total_length(self):\n raise NotImplementedError()", "def __len__(self):\n if not hasattr(self.limitedstream, \"limit\"):\n return 0\n return self.limitedstream.limit", "def total_length(self):\n return self.length", "def __len__(self):\r\n return numBits(self.n)", "def size(self):\n return self.__size", "def get_size(self):", "def digest_size(self):\n\n return self.__digest_size", "def __len__(self):\n return len(self.raw)", "def size(self):\n return self._N", "def nbytes(self):\n # Equivalent to self.itemsize * self.size\n return self.initial_value.nbytes", "def size(self):\n return len(self.cache)", "def size(self):", "def __len__(self):\n return sum(l for l, op,in self.items() \\\n if op in Cigar.read_consuming_ops)", "def size(self) -> int:\r\n return self.da.length()", "def __len__(self):\n return self._count", "def __len__(self):\n return self._count", "def _nbytes(self, deep: bool = False) -> int:\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result", "def size(self):\n return self._size", "def size(self) -> int:\n size = self.da.length()\n return size", "def __len__(self):\n\n return len(self.data) * 8", "def __len__(self):\n return self._count()", "def __len__(self) -> int:\n return self.disp_size ** 2", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def size(self):\r\n return self._size", "def __len__(self) -> int:\n if self.serialize_data:\n return len(self.data_address)\n else:\n return len(self.data_infos)", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def get_size(self):\n return len(self.get_payload()) + 4", "def size(self, index):\n return self.d1.size(index)\n # FILTER BASED ON D1", "def size(self):\n pass", "def size(self):\n pass", "def size(self):\n pass", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def size(self):\n return self._length", "def size(self):\n return len(self._data)", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def getSize(self):\r\n list = self.getList()\r\n return len(list)", "def __len__(self):\n return self.total", "def nbytes(self):\n dtype = self.config[\"dtype\"]\n if dtype is None:\n return None\n\n size = reduce(mul, self.shape, 1)\n nbytes = size * dtype.itemsize\n\n if getattr(self, \"masked\", True):\n nbytes += size\n\n return nbytes", "def __len__(self):\n return self.size_", "def count(self):\n # TODO not implemented yet\n return 0" ]
[ "0.78136486", "0.73444617", "0.73030394", "0.6866789", "0.6819548", "0.67372525", "0.6728737", "0.67182195", "0.66980165", "0.6695262", "0.6689676", "0.6685823", "0.6674033", "0.6666413", "0.665279", "0.6636472", "0.6635328", "0.6619961", "0.6601492", "0.6596799", "0.6581194", "0.65761584", "0.6561601", "0.6561601", "0.6557488", "0.6547242", "0.65399474", "0.6535526", "0.65326524", "0.65265626", "0.6524807", "0.6510062", "0.64954704", "0.64937633", "0.64929605", "0.6487915", "0.6486541", "0.64846236", "0.6480925", "0.6478867", "0.6478617", "0.64784503", "0.64784503", "0.64784503", "0.64784503", "0.64784503", "0.64730495", "0.6463918", "0.64622474", "0.6461862", "0.64616567", "0.6461088", "0.6460886", "0.64592046", "0.6452885", "0.6452547", "0.6447709", "0.6437205", "0.64313656", "0.64219713", "0.6421755", "0.6419912", "0.64182186", "0.64110357", "0.6410568", "0.6401633", "0.6397821", "0.6397117", "0.6392775", "0.6389255", "0.63881624", "0.63881624", "0.6386856", "0.6384977", "0.6382869", "0.63813907", "0.6381184", "0.6356752", "0.635604", "0.6355702", "0.6355098", "0.6353598", "0.6353598", "0.6353598", "0.6353598", "0.63509697", "0.63478905", "0.63449985", "0.63449985", "0.63449985", "0.6344236", "0.63417864", "0.63358635", "0.6334654", "0.6334654", "0.63318634", "0.6331693", "0.6321745", "0.632146", "0.6321391" ]
0.7215649
3
Change the position of the turtle.
def setposition(self, x, y, bearing=None): self.posX = x self.posY = y # self check of position inside canvas if self.posX < self._min_x: self.posX = self._min_x if self.posY < self._min_y: self.posY = self._max_y if self.posX > self._max_x: self.posX = self._max_x if self.posY > self._max_y: self.posY = self._max_y if bearing is None: self._add_point() elif isinstance(bearing, int): self.setbearing(bearing) else: raise ValueError("Bearing must be an integer")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repositionTurtle(t, x, y):\n t.up()\n t.goto(x, y)\n t.down()", "def goto(x, y):\n turtleTmp.setposition(x, y)", "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def move_turtle(self):\n self.forward(self.move_speed)", "def set_position(self, x, y):\n self.tx = -x\n self.ty = -y", "def move_turtle(self, x, y):\n tortuga = self.turtle\n if self.capture_mode:\n tortuga.setheading(tortuga.towards(x, y))\n tortuga.setpos(x, y)\n self.add_punto(Punto(x, y))", "def setPosition(position):", "def drawTo(self, x, y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n self._turtle.setposition(x, y)", "def set_new_location(self, xPos, yPos):", "def move(self,x,y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n d = self._turtle.isdown()\n if d:\n self._turtle.penup()\n self._turtle.setposition(x,y)\n if d:\n self._turtle.pendown()", "def setPos(self,pos):\n self.Xpos,self.Ypos=pos", "def set_position(self, new_pos):\n self._position = new_pos", "def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n self._turtle.shape('turtle')\n self.color = 'red'\n self.heading = 180\n self.speed = 0", "def move(self, p):\r\n self.position.setvalue(p)", "def moveturtle(x,y,t):\n t.penup()\n t.goto(x,y)\n t.pendown()", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def move(self,x,y):\n assert (type(x) in [int, float]), \"parameter x:%s is not a valid number\" % `x`\n assert (type(y) in [int, float]), \"parameter y:%s is not a valid number\" % `y`\n fstate = self._turtle.fill()\n if fstate: # only need to do this if in mid-fill\n self._turtle.fill(False)\n self._turtle.penup()\n self._turtle.setposition(x,y)\n self._turtle.pendown()\n if fstate: # only need to do this if in mid-fill\n self._turtle.fill(True)", "def set_position(self, x, y):\n self.position.x = x\n self.position.y = y\n self.rect.topleft = x, y", "def set_position(self, x: float, y: float):\n self._shape.body.position.x = x\n self._shape.body.position.y = y", "def set_position(self, x, y):\n self.geometry('%s%s' % (x, y))", "def init_turtle():\n turtle.up()\n turtle.home()", "def set_pos(self, x):\n self._pos = x", "def setPosition(self,newPos):\n self._position = newPos", "def set_position( self, posx, posy ):\n\n self.__foodx = posx\n self.__foody = posy", "def set_position(self, position):\n self.gripper_io.set_signal_value(\"position_m\", position)", "def setDesiredPosition(self, x, y):\n (self.setX, self.setY) = (x , y)", "def set_position(self, position):\n self.set_current_position(position)", "def change_pos(self, direction):\n if direction == Direction.UP:\n self._y_pos -= 1\n elif direction == Direction.DOWN:\n self._y_pos += 1\n elif direction == Direction.LEFT:\n self._x_pos -= 1\n elif direction == Direction.RIGHT:\n self._x_pos += 1\n self._coordinates = self.coordinates()", "def up():\n turtleTmp.penup()", "def position(self, position):\n self.move_to(position)", "def teleport(self, x, y):\n self.rect.x = x\n self.rect.y = y", "def reset_position(self):\n self.goto(STARTING_POSITION)", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def _move_tetrino(self, tetrino, x, y):\n tetrino.location_offset[constant.X] += x\n tetrino.location_offset[constant.Y] += y\n tetrino.update_location()", "def set_position(self, position):\n self.position = position", "def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_name = initialize(turtle_shape, bg_color,\n turtle_color, turtle_speed)\n\n for i in range(36):\n for i in range(4):\n turtle_name.forward(200)\n turtle_name.right(90)\n turtle_name.right(10)", "def setRobotPosition(self, position):\n posx = position.getX()\n posy = position.getY()\n self.position = Position(posx, posy)\n #raise NotImplementedError", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def set_position(self, position):\n raise NotImplementedError()", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def move_to(self, x, y):\n self.x = x\n self.y = y", "def set_position(self):\n raise RuntimeError(\"the 'set_position' method must be overriden\")", "def set_position(self, pos):\n self.ref_pos = pos", "def SetPosition(self, pos):\n self._pos = pos", "def light_positions(turtle, color, pos, hide=0):\n if hide == 1:\n turtle.hideturtle()\n turtle.penup()\n turtle.forward(40)\n turtle.left(90)\n turtle.forward(pos)\n turtle.shape(\"circle\")\n turtle.shapesize(3)\n turtle.fillcolor(color)", "def set_location(self, x, y):\n self.scene.set_location(x, y)\n self.redraw()", "def reset_position(self, x, y):\n\t\tself.grid[x][y] = self.terminal", "def set_position(self, x, y):\n self.pos = pygame.Rect(x, y, 0, 0)", "def force_set(self, pos):\n self.rect.center = pos", "def __init__(self):\r\n pen.up()\r\n pen.setheading(0)\r\n pen.hideturtle()\r\n turtle.title(\"My name\")\r\n pen.speed(0)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)", "def draw_t(self):\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.up()\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def setRobotPosition(self, position):\n self.position = position", "def setRobotPosition(self, position):\n self.position = position", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y", "def set_position(self, position: Union[pygame.math.Vector2,\n Tuple[int, int],\n Tuple[float, float]]):\n super().set_position(position)\n self.text_block.set_position(position)", "def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY", "def setRoboPos(self,x,y):\r\n self.RoboPosX=x\r\n self.RoboPosY=y", "def _moveTo(self, pt):\n self._handleAnchor()\n t = \"M%s\" % (pointToString(pt))\n self._commands.append(t)\n self._lastCommand = \"M\"\n self._lastX, self._lastY = pt", "def move(self, friction = 0.0):\n try:\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n self.goto(newX, newY)\n # apply friction\n self.dx = self.dx * (1 - friction)\n self.dy = self.dy * (1 - friction)\n except:\n print(\"Error, probably because dx and dy are not properties of the turtle\")", "def set_position(self, position):\r\n\r\n self.position = position\r\n if (self.rect):\r\n self.rect.x = position[0]\r\n self.rect.y = position[1]", "def move(self,x,y):\n self.pos.x = x\n self.pos.y = y", "def setPos(self, pos):\n self.cameraNode.setPos(pos)", "def set_pos(self, p: tuple) -> None:\n self.pos = p", "def draw_triangle():\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)\r\n turtle.forward(100)\r\n turtle.left(120)", "def set_pos(self, x, y, orien):\n self.pos_x = x\n self.pos_y = y\n self.orientation = orien", "def draw_square():\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)", "def change_loc_coords(self, field_size):\r\n self.top_left_corner = _get_center_writing(self.button) # sets new center\r\n font_size = int(field_size * 2) # resizes font\r\n self.font = pygame.font.SysFont(None, font_size) # updates font\r", "def position(x, y):\n command([x + 0x80, y + 0x40])", "def reset(self):\n self.x_pos = 10\n self.y_pos = 10\n self.line_height = 15", "def move(self):\n \n self.position = self.wander()", "def reset_position(self):\n self.rect.left, self.rect.top = self.start_pos", "def line():\n tt.left(90)\n tt.down()\n tt.forward(50)\n tt.up()\n tt.right(90)\n tt.forward(10)\n tt.right(90)\n tt.forward(50)\n tt.left(90)", "def move_to_position1(self):", "def setPos(self, x, y, anchor='ll'):\n self.transform.setPos(glm.vec3(x, y, 0))\n if anchor == 'ul':\n offx = 0\n offy = - self.font.table['ascent']\n elif anchor == 'uc':\n offx = - self._labelWidth / 2\n offy = - self.font.table['ascent']\n elif anchor == 'ur':\n offx = - self._labelWidth\n offy = - self.font.table['ascent']\n elif anchor == 'cl':\n offx = 0\n offy = self._labelHeight / 2 - self.font.table['ascent']\n elif anchor == 'cc':\n offx = - self._labelWidth / 2\n offy = self._labelHeight / 2 - self.font.table['ascent']\n elif anchor == 'cr':\n offx = - self._labelWidth\n offy = self._labelHeight / 2 - self.font.table['ascent']\n elif anchor == 'll':\n offx = 0\n offy = self._labelHeight - self.font.table['ascent']\n elif anchor == 'lc':\n offx = - self._labelWidth / 2\n offy = self._labelHeight - self.font.table['ascent']\n elif anchor == 'lr':\n offx = - self._labelWidth\n offy = self._labelHeight - self.font.table['ascent']\n else:\n raise SystemExit(f\"Unimplemented anchor '{anchor}'\")\n self.model.setPos(glm.vec3(offx, offy, 0))", "def move(self):\n \n self.position = self.explore()", "def set_position(self, position):\n self.position = tuple(position)", "def shapes2():\r\n turtle.up()\r\n turtle.backward(100)\r\n turtle.left(270)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.backward(700)\r\n shapes()", "def SetCurrentPosition(self,pos):\n\n if self.Reverse: pos*=-1\n self.Bus.Transaction(chr(self.Address)+chr(0x40)+struct.pack('@l',pos))", "def setRobotPosition(self, position):\n self.position = position\n #raise NotImplementedError", "def move_to_position2(self):", "def setzePosition(self, x, y):\n self.zielX = x\n self.zielY = y", "def set_position(self, new_pos, units=\"bohr\"):\n from numpy import array\n # Convert the input to the right units.\n pos = array(new_pos)\n if _IsAngstroem(units):\n pos /= AU_to_A\n if _IsAngstroem(self):\n pos *= AU_to_A\n pos = [x for x in pos]\n\n # Insert\n if 'r' in self.store:\n self.store['r'] = pos\n else:\n self.store[self.sym] = pos\n pass", "def _set_origin(self):\n self += helper.circle(cx=self.__dict__['x'], cy=self.__dict__['y'], r=2, fill=\"black\", stroke=\"black\", style=\"fill-opacity: 50%\")\n self += helper.text(\"(0,0)\", x=self.__dict__['x']+5, y=self.__dict__['y']-5, style=\"fill-opacity: 50%\")", "def set_position(self, updated):\n self.buff_x = updated[0]\n self.buff_y = updated[1]", "def move(self):\r\n segments = len(self.all_turtles) - 1\r\n for i in range(len(self.all_turtles)):\r\n if segments == 0:\r\n self.all_turtles[segments].forward(MOVE_DISTANCE)\r\n else:\r\n new_x = self.all_turtles[segments - 1].xcor()\r\n new_y = self.all_turtles[segments - 1].ycor()\r\n self.all_turtles[segments].goto(new_x, new_y)\r\n segments -= 1", "def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8", "def __init__(self, pos=(0, 0)):\n super().__init__() # Call 'turtle' initiation\n self.penup() # Stop displaying trail\n self.shapesize(stretch_wid=1, stretch_len=1) # Set dimensions of ball object to same height and width\n self.color(\"white\") # Set colour to white\n self.shape(\"circle\") # Set ball shape to round\n self.setpos(pos) # Move ball to desired position on screen\n self.x_dir = 1 # Set ball horizontal movement to right\n self.y_dir = 1 # Set ball vertical movement to up", "def set_node_position(self, node, x, y, z=0):\n pass", "def draw_flower_advanced():\n draw_flower()\n turtle.left(90)\n turtle.up() #Raise pen for movement\n turtle.forward(150)\n turtle.left(90)\n turtle.forward(150)\n turtle.right(90)\n turtle.down() #lower pen for drawing", "def setPosition(self, position, view) -> None:\n ..." ]
[ "0.7971479", "0.7698009", "0.73912793", "0.71698403", "0.70017356", "0.69530845", "0.6844458", "0.6767161", "0.6726359", "0.6678807", "0.651918", "0.651298", "0.64901376", "0.64820397", "0.6479972", "0.6466013", "0.6383914", "0.63821214", "0.6373982", "0.63284934", "0.63246614", "0.63178825", "0.6300733", "0.62726474", "0.6255098", "0.62540346", "0.6247374", "0.6237543", "0.62335384", "0.6216802", "0.62153983", "0.6214716", "0.62134564", "0.62134564", "0.62134564", "0.62134564", "0.62134564", "0.62134564", "0.62134564", "0.62134564", "0.62134564", "0.62134564", "0.62134564", "0.617609", "0.616893", "0.6164123", "0.615811", "0.6154595", "0.6154595", "0.614859", "0.61397946", "0.61397946", "0.61201257", "0.609854", "0.60932785", "0.60924405", "0.60844016", "0.6079439", "0.6054858", "0.6054001", "0.6051021", "0.6030474", "0.60281456", "0.60281456", "0.6025337", "0.60244733", "0.60184467", "0.6014132", "0.6011252", "0.5997343", "0.5994473", "0.59944355", "0.5987633", "0.59734964", "0.5969162", "0.59686756", "0.59531134", "0.59521216", "0.5918809", "0.591635", "0.59110636", "0.5895624", "0.5881537", "0.5866618", "0.58356607", "0.5834106", "0.583113", "0.5805896", "0.58030987", "0.5797766", "0.5793261", "0.57868713", "0.5785003", "0.57780063", "0.5776166", "0.577312", "0.57710856", "0.57674706", "0.57663393", "0.5760431", "0.5754273" ]
0.0
-1
Change the bearing (angle) of the turtle.
def setbearing(self, bearing): diff = self.bearing - bearing self.b_change = diff self.bearing = bearing self._add_point() self.b_change = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bearing(self, value: int):\n self._bearing = value", "def set_angle(self, ang):\n if ang < 0:\n ang = 0\n elif ang > 180:\n ang = 180\n dutyCycle = 5 + (ang*5/180)\n self.servoPort.ChangeDutyCycle(dutyCycle)", "def setAngle(self,angle = 2.5):\n pass", "def change_angle(self, new_angle):\r\n self.angle = new_angle", "def set_bearing(self, bearing):\n self._set_sub_text('bearing', text=str(bearing))\n return self", "def change_angle(self, up_or_down):\n self.angle += up_or_down * math.pi / 180", "def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)", "def set_angle(self, angle=0.0):\n self.angle = angle", "def setAngle(self,a):\n self.angle = a\n if self.drawn == True:\n self.draw()", "def set_angle(self, value):\n if not -90 <= value <= 90:\n raise ValueError('Servo angle must be between -90 and 90 degrees')\n self.duty_cycle = ...", "def setAngle(self, angle):\n self._angle = (angle + math.pi / 2) % math.pi - math.pi / 2\n # self._angle = angle % (2*math.pi)", "def setAngle(self, angle):\n self.vector.angle = angle", "def settiltangle(self, angle):\n tilt = -angle * self._degreesPerAU * self._angleOrient\n tilt = (tilt * math.pi / 180.0) % (2*math.pi)\n self.pen(resizemode=\"user\", tilt=tilt)", "def set_angle(self, angle):\n return self.bot_client.send_command(_Command.SetAngle, angle)", "def change_angle(self, new_angle):\n if type(new_angle) not in [int, float]:\n raise ValueError('angle must be int or float.')\n self.__angle = new_angle", "def rotate_rad(self, angle):\n self.beam_angle += angle\n self.xy = rotate(self.xy, angle)\n self.angle += angle", "def wheel_angle(self, angle):\n self.angle = angle", "def rotate_turtle(angle, mv_direction):\n \n if mv_direction == 1:\n turtle.right(angle)\n else:\n turtle.left(angle)", "def set_angel(self):\n self.angle = math.degrees(math.atan2(self.next.y - self.y, self.next.x - self.x)\n - math.atan2(self.prev.y - self.y, self.prev.x - self.x))\n\n if self.angle < 0:\n self.angle += 360", "def move_turtle(self):\n self.forward(self.move_speed)", "def angle(self, angle):\n self._angle = angle\n self.x_rate = self._rate * cos(self._angle)\n self.y_rate = self._rate * sin(self._angle)", "def set_angle(self, angle):\n new_angle = angle\n\n # Declaring conversion constants\n angle_min = 0\n angle_max = 180\n angle_range = angle_max - angle_min\n dc_range = self._dc_max - self._dc_min\n\n # Enforcing angle range\n if new_angle > angle_max:\n new_angle = angle_max\n elif new_angle < angle_min:\n new_angle = angle_min\n\n # Scaling input angle to an appropriate duty cycle\n duty_cycle = ((dc_range / angle_range) * (new_angle - angle_min)) + self._dc_min\n\n self._servo_pwm.changeDutyCycle(duty_cycle)", "def change_angle_by(self, delta_angle, direction):\n target_angle = round(self.__calc_target_angle(degree_to_radian(delta_angle), direction), 5)\n\n self.move_to_angle(target_angle)\n self.current_angle = target_angle", "def adjAngle(self, amt): \r\n\r\n self.angle = self.angle + radians(amt)\r\n self.redraw()", "def set_angle(self, angle_key: Union[EKT, str], v: float): # -> None:\n ...", "def angle(self, angle):\n\n self._angle = angle", "def adjAngle(self, amt):\n \n self.angle = self.angle+radians(amt)\n self.redraw()", "def angle(self, value):\n if value is None:\n value = 0.0\n\n self.__angle = value", "def turn_by(self, dangle, dt):\n # Don't turn too fast\n self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.turning_rate)\n\n # Keep angle in range [-pi, pi)\n self.angle = normalize_angle(self.angle)", "def set_wrist(self, angle):\n return self.set_servo_angle(protocol.SERVO_HAND, angle)", "def update_bearing(self, bearing):\n q0, q1, q2, q3 = self.q\n v_head = np.array([-2*q2**2 - 2*q3**2 + 1, 2*q0*q3 + 2*q1*q2])\n v_bearing = np.array([np.cos(bearing), np.sin(bearing)])\n y = np.vstack(v_bearing) - np.vstack(v_head)\n\n H = np.zeros((2, 11), dtype=float)\n H[0, 2] = -4*q2\n H[0, 3] = -4*q3\n H[1, 0] = 2*q3\n H[1, 1] = 2*q2\n H[1, 2] = 2*q1\n H[1, 3] = 2*q0\n\n S = H.dot(self.P).dot(H.T) + (1*np.pi/180)**2\n K = self.P.dot(H.T).dot(np.linalg.inv(S))\n x = self.state_vec() + K.dot(y)\n\n self.P = (np.eye(11) - K.dot(H)).dot(self.P)\n self.set_state_vec(x)", "def bounce(self, diff):\n \n self.direction = (180 - self.direction) % 360\n self.direction -= diff", "def srotate(self, angle):\n\n self.angle = self.angle + angle", "def setAngle(self, value):\n n, a = Vector.polar(self.components)\n self.components = Vector.cartesian([n, value])", "def bounce(self, diff):\r\n\r\n self.direction = (180 - self.direction) % 360\r\n self.direction -= diff", "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def adjustAngle(self, angle):\n\t\tif self.timeout <= 0:\n\t\t\tself.angle = (self.angle + angle) % 360", "def set_angle(self, req_angle):\n self._current_angle = req_angle\n req_angle_pulse = (self._pulse_max - self._pulse_min) / (self._angle_max - self._angle_min) * (\n req_angle - self._angle_max) + self._pulse_max\n self.pwm.set_pwm(SERVO_CHANEL, 0, int(round(req_angle_pulse)))", "def turn_to(self, angle, dt):\n a = normalize_angle(angle - self.angle)\n self.turn_by(a, dt)", "def set_angle(self, value):\n scene = self.scenes[self.current_scene]\n scene.set_perspective(angle=value)\n angle, ratio, near, far = scene.perspective()\n self.redraw()", "def rotate(self, angle):\n n, a = Vector.polar([self.x, self.y])\n a += angle\n self.x = n * cos(a)\n self.y = n * sin(a)", "def change_angle_by(self, delta_angle, divide_count, delay, direction):\n\n target_angle = round(self.__calc_target_angle(degree_to_radian(delta_angle), direction), 5)\n\n self.move_to_angle(target_angle, divide_count, delay)\n self.current_angle = target_angle", "def move_to_angle(self, shoulder_angle, femur_angle, tibia_angle):\n vrep.simxSetJointTargetPosition(self.clientID,\n self.shoulderHandle,\n shoulder_angle,\n vrep.simx_opmode_oneshot)\n\n vrep.simxSetJointTargetPosition(self.clientID,\n self.femurHandle,\n femur_angle * self.yDirection,\n vrep.simx_opmode_oneshot)\n\n vrep.simxSetJointTargetPosition(self.clientID,\n self.tibiaHandle,\n tibia_angle * self.yDirection,\n vrep.simx_opmode_oneshot)", "def tilt(self, angle):\n self.settiltangle(angle + self.tiltangle())", "def set_ang_vel(self, otherframe, value):\n\n self._check_vector(value)\n self._check_frame(otherframe)\n self._ang_vel_dict.update({otherframe: value})\n otherframe._ang_vel_dict.update({self: -value})", "def angle(self) -> float:\n ...", "def rotate(self, angle):\n old_angle, tilt = self.rotation\n new_angle = old_angle + angle\n while new_angle > 90:\n new_angle = new_angle - 90\n while angle < -90:\n new_angle = new_angle + 90\n self.rotation = (new_angle, tilt)", "def angle(self, to_angle):\n\n # Restrict to -90..+90 degrees\n to_angle = int(min(max(to_angle, -90), 90))\n\n ratio = (to_angle + 90) / 180.0\n pulse_range = self.pulse_left_ns - self.pulse_right_ns\n pulse = self.pulse_left_ns - round(ratio * pulse_range)\n\n self.pi.set_servo_pulsewidth(self.gpio, pulse)", "def go_to_angle(user_theta):\n global rate\n theta_new = user_theta - theta\n if theta_new > 0:\n # Left\n while abs(user_theta - theta) > 0.05:\n speed.linear.x = 0\n speed.angular.z = 0.4\n pub.publish(speed)\n rate.sleep()\n else:\n # Take a Right\n while abs(user_theta - theta) > 0.05:\n speed.linear.x = 0\n speed.angular.z = - 0.4\n pub.publish(speed)\n rate.sleep()\n speed.linear.x = 0\n speed.angular.z = 0\n pub.publish(speed)", "def setAzimuthAngle(self, angle):\n angle = int(round(angle))\n if angle != self._azimuth:\n self._azimuth = angle\n self._updateLight()\n self.sigAzimuthAngleChanged.emit()", "def rotate(self,amount):\n self.angle += amount\n if self.drawn == True:\n self.draw()", "def set_back_arm_angle(world_state, ros_util, target_angle):\n \"\"\"rospy.loginfo('Setting back arm angle to %s radian%s...',\n str(target_angle),\n \"\" if target_angle == 1 else \"s\")\"\"\"\n\n if target_angle > world_state.back_arm_angle:\n while target_angle > world_state.back_arm_angle:\n ros_util.publish_actions(\"stop\", 0, 1, 0, 0)\n ros_util.rate.sleep()\n else:\n while target_angle < world_state.back_arm_angle:\n ros_util.publish_actions(\"stop\", 0, -1, 0, 0)\n ros_util.rate.sleep()\n\n ros_util.publish_actions(\"stop\", 0, 0, 0, 0)", "def angle(self, angle: int, time: int = 0, /) -> None:", "def bearing(self) -> int:\n return self._bearing", "def tiltangle(self, angle=None):\n if angle is None:\n tilt = -self._tilt * (180.0/math.pi) * self._angleOrient\n return (tilt / self._degreesPerAU) % self._fullcircle\n else:\n self.settiltangle(angle)", "def tilt(self, angle):\n rot_angle, old_tilt = self.rotation\n new_tilt = old_tilt + angle\n while new_tilt > 90:\n new_tilt = new_tilt - 90\n while angle < -90:\n new_tilt = new_tilt + 90\n self.rotation = (rot_angle, new_tilt)", "def turn_angle(self, angle, speed=1.0):\n mt_buf = bytearray()\n error = random.normalvariate(0.5, self.standard_deviation)\n\n res, ret_ints, ret_floats, ret_strings, ret_buffer = vrep.simxCallScriptFunction(\n self.client_id,\n 'youBot_ref' + self.postfix,\n vrep.sim_scripttype_childscript,\n 'GetRobotAngle', [], [], [], mt_buf,\n BLOCKING_MODE)\n\n start_angle = ret_floats[0] + error\n delta = 0\n\n # вызов скрипта поворота\n vrep.simxCallScriptFunction(\n self.client_id,\n 'youBot_ref' + self.postfix,\n vrep.sim_scripttype_childscript,\n 'Turn', [],\n [speed], [], mt_buf,\n BLOCKING_MODE)\n\n while delta <= angle:\n res, ret_ints, ret_floats, ret_strings, ret_buffer = vrep.simxCallScriptFunction(\n self.client_id,\n 'youBot_ref' + self.postfix,\n vrep.sim_scripttype_childscript,\n 'GetRobotAngle', [], [], [], mt_buf,\n BLOCKING_MODE)\n\n current_angle = ret_floats[0] + error\n delta += math.fabs(current_angle - start_angle)\n start_angle = current_angle\n\n vrep.simxCallScriptFunction(\n self.client_id,\n 'youBot_ref' + self.postfix,\n vrep.sim_scripttype_childscript,\n 'Turn', [], [0.0], [], mt_buf,\n BLOCKING_MODE)", "def rotate(self, angle):\n self._surf = pygame.transform.rotate(self._surf, angle).convert_alpha()", "def init_turtle():\n turtle.up()\n turtle.home()", "def startAngMovementY(self):\n self.boolrot[1] = True", "def advanceTan():\n global tanBallX, speed\n tanBallX += speed\n if tanBallX <= -4:\n # Reached the bottom - switch directions\n tanBallX = -4\n speed = -speed\n elif tanBallX >= 2.8:\n # Reached the top - switch directions\n tanBallX = 2.8\n speed = -speed", "def set_rotation(self, angle):\n self._rotation = angle\n self._reset_slot_bounds()", "def setBrake(self, brake):\r\n if brake < 0.0:\r\n brake = 0.0\r\n elif brake > 1.0:\r\n brake = 1.0\r\n brake *= self.maxBrake\r\n for tire in self.tires:\r\n if tire.brake:\r\n tire.shape.setBrakeTorque( brake )", "def bearing(self):\n return self['bearing']", "def on_cam_base_set_angle_btn_clicked(self):\n pitch = self.cam_base_pitch_hSlider.value()\n yaw = self.cam_base_yaw_hSlider.value()\n pitch, yaw = self.control1.device.set_init_basecam_angle(pitch, yaw)\n status = \"set INIT angles as, pitch: \" + str(pitch) + \", yaw: \" + str(yaw)\n self.cam_set_status_txt(status)", "def turn(self, is_right):\n if is_right:\n self.heading.rotate(1)\n else:\n self.heading.rotate(-1)", "def turn(self, angle):\n self.logger.debug(\"turn \" + str(angle))", "def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)", "def steer(self, angle_diff):\n\n try:\n angle = self.angle + angle_diff\n\n # ensure that the angle stays within the limits\n if angle > const.Driving.MAX_STEERING_ANGLE:\n angle = const.Driving.MAX_STEERING_ANGLE\n elif angle < const.Driving.MIN_STEERING_ANGLE:\n angle = const.Driving.MIN_STEERING_ANGLE\n\n self.angle = angle\n except TypeError:\n raise TypeError(\"Tried to change the steering angle by a non-numerical value.\")", "def _rotate(self, angle):\n if self.undobuffer:\n self.undobuffer.push((\"rot\", angle, self._degreesPerAU))\n angle *= self._degreesPerAU\n neworient = self._orient.rotate(angle)\n tracing = self.screen._tracing\n if tracing == 1 and self._speed > 0:\n anglevel = 3.0 * self._speed\n steps = 1 + int(abs(angle)/anglevel)\n delta = 1.0*angle/steps\n for _ in range(steps):\n self._orient = self._orient.rotate(delta)\n self._update()\n self._orient = neworient\n self._update()", "def right(self, angle):\r\n self.dir += math.radians(angle)", "def bearing(self) -> typing.Union[None, float]:\n bearing = self.data[1]\n return float(bearing.replace('°', '')) if bearing else None", "def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n self._turtle.shape('turtle')\n self.color = 'red'\n self.heading = 180\n self.speed = 0", "def bearing(start,finish):\n\n s = math.pi * np.squeeze(np.array(start)) / 180\n f = math.pi * np.squeeze(np.array(finish)) / 180\n\n y = math.sin(f[1] - s[1]) * math.cos(f[0])\n x = math.cos(s[0])*math.sin(f[0]) - math.sin(s[0])*math.cos(f[0])*math.cos(f[1] - s[1])\n\n return math.atan2(y,x)/math.pi * 180 % 360", "def set_servo_angle(self, servo_number, angle):\n cmd = protocol.SET_ANGLE.format(str(servo_number), str(angle))\n response = self.__send_and_receive(cmd)\n if response.startswith(protocol.OK.lower()):\n return True\n else:\n return False", "def up(self, angle):\n self.pitch(angle)", "def move(self, dt):\n lims = self.settings['agent']['jointLimits']\n # print '[move] curr joint Angle:'\n # print self.jointAngle\n # print '[move] curr speed:'\n # print self.speed\n\n J = self.jointAngle + dt * np.array(self.speed)\n self.jointAngle[0] = min(max(J[0], lims[0][0]), lims[0][1])\n self.jointAngle[1] = min(max(J[1], lims[1][0]), lims[1][1])\n self.forward_kinematics()", "def ry(self, angle: float) -> \"Mate\":\n a = angle / 180 * pi\n self.x_dir = Mate._rotate(self.x_dir, self.y_dir, a)\n self.z_dir = Mate._rotate(self.z_dir, self.y_dir, a)\n return self", "def angle(self):\n self.convert_window(\"Angle\", \"degree\", [\"arcminute\", \"arcsecond\", \"circle\", \"degree\", \"gon\", \"gradian\", \"mil(Nato)\", \"mil(Soviet Union)\", \"mil(Sweden)\", \"octant\", \"quadrant\", \"radian\", \"revolution\", \"sextant\", \"sign\", \"turn\"])", "def setRotation(self, angle=0.0):\n axis = (0, 0, 1)\n oldp = self.transform.pos\n newpos = oldp + glm.vec3(0, -40, 0)\n self.transform.setPos(newpos)\n self.transform.setRot(glm.angleAxis(glm.radians(angle),\n glm.vec3(axis)))\n self.transform.setPos(oldp)", "def bounce(self, orientation):\r\n if orientation == \"horizontal\":\r\n # rebond sur une surface horizontale\r\n self.angle *= -1\r\n elif orientation == \"vertical\":\r\n # rebond sur une surface verticale\r\n self.angle = math.pi - self.angle\r\n else:\r\n print(\"Not a valid bounce. Either horizontal or vertical\")", "def ChangeWindAngle(self,theta):\n self.problem.ChangeWindAngle(theta)", "def rotate(self, value):\n self.pi.set_servo_pulsewidth(self.steering_pin, self.convert_radians_to_PW(value))", "def yaw(self, dangle): # aka azimuth\n vu = self.getViewUp()\n GL.glTranslate(*self.focus)\n GL.glRotate(dangle, *vu)\n GL.glTranslate(*-self.focus)", "def calibrer(self):\n self._angle_courant = self._angle_initial\n self.angle(self._angle_initial)", "def turn_by(self, angle: float) -> None:\n\n self.action = TurnBy(angle=angle)", "def set_joint(self, joint, value, radians=False):\n if value == None:\n return\n\n if not radians:\n value = math.radians(value)\n\n target = value\n current = self.chain.joints[joint]['current_value']\n step = self._servo_speed / 2 # divide by two here to allow for half second sleeps\n\n if (current > target):\n # current angle is LARGER than the target angle so we decrement it to get closer\n while (current - target) > step:\n current = current - step\n self.anim_variables[joint] = current\n sleep(0.5)\n else:\n self.anim_variables[joint] = target\n\n elif (target > current):\n # current angle is SMALLER than the target angle so we increment it to get closer\n while (target - current) > step:\n current = current + step\n self.anim_variables[joint] = current\n sleep(0.5)\n else:\n self.anim_variables[joint] = target\n\n else:\n # current angle is EQUAL to the target angle\n self.anim_variables[joint] = target\n\n # failsafe catches and set current values in chain\n self.anim_variables[joint] = value\n self.chain.joints[joint]['current_value'] = value", "def my_turn_in_place(robot, angle, speed):\n\n t = (1/speed) * numpy.abs(angle)\n\n circum = 2 * math.pi * get_distance_between_wheels()\n arc_length = (numpy.abs(angle)/360) * circum\n mm_speed = arc_length / t\n mm_speed = mm_speed if angle>0 else -mm_speed\n\n robot.drive_wheels(-mm_speed, mm_speed, duration=t)", "def detector_angle(self, angle):\n self.rotate_rad(-radians(angle))", "def yaw(self, dangle): # aka azimuth\n vu = self.getViewUp()\n GL.glTranslatef(*self.focus)\n GL.glRotate(dangle, *vu)\n GL.glTranslatef(*-self.focus)", "def setAltitudeAngle(self, angle):\n angle = int(round(angle))\n if angle != self._altitude:\n self._altitude = angle\n self._updateLight()\n self.sigAltitudeAngleChanged.emit()", "def rel_angle(self, angle):\n steps = int(angle / 360 * self.steps_per_rev)\n self.steps(steps)", "def move_to_angle(self, target_angle):\n\n self.motor.directly_goto_position(target_angle)\n self.current_angle = target_angle", "def do_altangle(self):\n nave = 10000\n x, y, z, angle = cbp.phidget.main(nave)\n current_angle = angle\n #print(current_angle)\n self.altangle = current_angle\n return current_angle", "def rotate(self, angle):\n self.call('rotate', angle)", "def set_ypos(self, deg):\n if deg < -10:\n deg = -10\n elif deg > 10:\n deg = 10\n deg += 10\n self.kit.servo[8].angle = deg", "def turn(robot, alpha=0.524): # 0.524 rad = 30 degrees\n\n journey = Journey(robot, angle=alpha)\n journey.start()\n robot.position.turn(alpha)\n sleep(0.5)", "def rotate(self, angle, aspeed):\n current_pose = [self.px, self.py, self.pth]\n initial_pose = current_pose\n # final pose is the final angle that the robot moves to about z\n final_angle = self.pth+angle\n if final_angle < self.pth:\n aspeed=aspeed*(-1)\n\n final_pose = [self.px, self.py, final_angle]\n \ttolerance = 0.01\n\n self.send_speed(0.0, aspeed)\n while abs(final_pose[2]-current_pose[2]) > tolerance:\n current_pose = [self.px, self.py, self.pth]\n self.send_speed(0.0, 0.0)", "def right(self, angle):\r\n self.rotation += angle", "def fire(self, angle):\r\n #convert the angle to the slope multiply by bullet speed for velocity\r\n self.velocity.dy = math.sin(math.radians(angle)) * BULLET_SPEED\r\n #convert the angle to the slope multiply by bullet speed for velocity\r\n self.velocity.dx = math.cos(math.radians(angle)) * BULLET_SPEED" ]
[ "0.6930348", "0.6852178", "0.6837454", "0.67657924", "0.6641104", "0.66320354", "0.66257477", "0.6583451", "0.65234107", "0.64924616", "0.64834297", "0.64331305", "0.63996845", "0.6354722", "0.6261629", "0.6239155", "0.62279516", "0.62081057", "0.62069297", "0.61839217", "0.614857", "0.6126802", "0.61265624", "0.60912144", "0.6088944", "0.6047413", "0.60423183", "0.596344", "0.59620225", "0.5949161", "0.5931189", "0.59268636", "0.591696", "0.59097564", "0.5902373", "0.5897223", "0.5897143", "0.58861387", "0.58658415", "0.5862381", "0.58257973", "0.5799599", "0.57467103", "0.57423127", "0.5732071", "0.57203436", "0.5718489", "0.56986207", "0.5681678", "0.56809324", "0.5671971", "0.56700575", "0.5641745", "0.56408924", "0.5615181", "0.56114733", "0.5606281", "0.5593044", "0.5590168", "0.5582891", "0.5582563", "0.5550837", "0.553866", "0.553808", "0.5520132", "0.55113906", "0.55096096", "0.5507532", "0.5498232", "0.5485767", "0.5480044", "0.5468032", "0.54667586", "0.54500484", "0.544587", "0.54408365", "0.5436935", "0.5435338", "0.5431729", "0.5430302", "0.5422574", "0.54199886", "0.5413196", "0.5402889", "0.53959465", "0.53955346", "0.5391168", "0.5375284", "0.5375047", "0.5373109", "0.5357877", "0.53574646", "0.5355774", "0.53545666", "0.5347538", "0.53380287", "0.5337679", "0.5332133", "0.5331261", "0.5315885" ]
0.6944382
0
Draw a circle, or part of a circle. From its current position, the turtle will draw a series of short lines, turning slightly between each. If radius is positive, it will turn to its left; a negative radius will make it turn to its right.
def circle(self, radius, extent=360): temp = self.bearing self.b_change = 0; tempSpeed = self.speedVar self.speedVar = 1 for i in range(0, (extent//2)): n = math.fabs(math.radians(self.b_change) * radius) if(radius >= 0): self.forward(n) self.left(2) else: self.forward(n) self.right(2) if(radius >= 0): self.bearing = (temp + extent) else: self.bearing = (temp - extent) self.speedVar = tempSpeed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_circle(c):\n turtle.circle(c.radius)", "def draw_circle(c):\n turtle.circle(c.radius)", "def drawCircle(t, x, y, radius):\r\n t.up()\r\n t.goto(x + radius, y)\r\n t.setheading(90)\r\n t.down()\r\n for count in range(120):\r\n t.left(3)\r\n t.forward(2.0 * math.pi * radius / 120.0)", "def drawCircle(r):\r\n # create a turtle-painter instance using turtle library\r\n painter = turtle.Turtle()\r\n\r\n # turtle properties (we want the turtle to look nicer)\r\n painter.shape(\"turtle\") # setting painter shape to turtle\r\n painter.shapesize(3,3,1) # making turtle-painter 3 times bigger\r\n painter.color(\"limegreen\") # setting painting color to limegreen\r\n\r\n # move the turtle-painter to ready position\r\n painter.pu() # we just move without drawing anything\r\n x0 = coordX(r, 0) # compute initial coordinate x0\r\n y0 = coordY(r, 0) # compute initial coordinate y0\r\n\r\n painter.goto(x0,y0) # move the turtle to the ready position\r\n \r\n # tell the turtle to put pencil down on the paper\r\n painter.pd()\r\n\r\n # draw a circle\r\n for theta in range(0, 361, 1):\r\n x = coordX(r, theta, useradians = False)\r\n y = coordY(r, theta, useradians = False)\r\n\r\n painter.goto(x,y)\r\n\r\n # tell the turtle to put pencil up from the paper\r\n painter.pu()\r\n # hide the painter after he finished to draw\r\n painter.ht()\r\n print(\"Draw a circle of r = \", r )", "def circle(radius, extent=360):\n turtleTmp.circle(radius, extent)", "def drawCircle(self, r):\n assert (type(r) in [int, float]), \"parameter r:%s is not a valid number\" % `r` \n x = self._turtle.xcor()\n y = self._turtle.ycor()\n \n # Move the pen into position\n fstate = self._turtle.pendown()\n if fstate:\n self._turtle.penup()\n self._turtle.setposition(x, y-r)\n if fstate:\n self._turtle.pendown()\n \n # Draw the circle and fill if necessary\n self._turtle.circle(r)\n self.flush()\n self._turtle.forward(0)\n \n # Return the pen to the position\n if fstate:\n self._turtle.penup()\n self._turtle.setposition(x, y)\n if fstate:\n self._turtle.pendown()", "def draw_circle(t, circle):\n t.pu()\n t.goto(circle.center.x, circle.center.y)\n t.pd()\n polygon.circle(t, circle.radius)", "def drawCircle(x, y, r):\n pen1.up()\n pen1.goto(x,y)\n pen1.down()\n pen1.circle(r)", "def draw_circle(self, x, y, radius, color=Color['white']):\n pygame.draw.circle(self.display, color, (x, y), radius)", "def draw_full_circle(x, y, radius):\n iterations = int(2 * radius * pi)\n s = sin(2 * pi / iterations)\n c = cos(2 * pi / iterations)\n\n dx, dy = radius, 0.\n\n glBegin(GL_TRIANGLE_FAN)\n glVertex2f(x, y)\n for _ in range(iterations + 1):\n glVertex2f(x + dx, y + dy)\n dx, dy = (dx * c + dy * s), (dy * c - dx * s)\n glEnd()", "def circle(self, pos, radius, draw=None, fill=\"black\", lw=0, options=None, kwoptions=None):\n\n fill = norm_colour(fill)\n self.use_colour(fill)\n\n draw = norm_colour(draw)\n if draw is None:\n draw = fill\n self.use_colour(draw)\n\n self._commands.append(rf\"\\filldraw[line width={lw},\"\n rf\"{fmt_options(options, kwoptions, draw=draw, fill=fill)}] \"\n rf\" {fmt_point(pos)} circle ({radius});\")", "def draw_circle(self, color, center, radius, width):\n _c = self.T.itrans(center)\n pg.draw.circle(self.screen, color, _c(), radius, width)", "def draw_circle(self, center, radius, line_width, line_color, fill_color=\"\"):\n line_color, fill_color = check_color(line_color), check_color(fill_color)\n SToval.oval(self.canvas, center, radius, line_width, line_color, fill_color)", "def circle(draw, centrex, centrey, radius, color=\"#AAAAAAFF\") -> None:\n # convert cartesian centre to pixel centre\n cx, cy = pixelcoord(centrex, centrey)\n # top left and bottom right coordinates\n rect = [(cx-radius, cy-radius), (cx+radius, cy+radius)]\n # draw\n draw.arc(rect, 0, 360, color)", "def drawCircle(x,y,radius,ucoords=1):\n if ucoords:\n dislin.rlcirc(x,y,radius)\n else:\n dislin.circle(x,y,radius)", "def circle(self, center, radius, color=(255, 255, 255), width=0):\n center = self._transform(center)\n pygame.draw.circle(self.screen, color, center, radius, width)", "def draw_circle(color, position, radius, width=0):\n #print('(color={}, position={}, radius={}, width={})')\n pygame.draw.circle(screen, color, position, radius, width)", "def __drawCircle(self, center, radius, color, drawwidth=1):\n radius *= self.viewZoom\n if radius < 1: radius = 1\n else: radius = int(radius)\n\n pygame.draw.circle(self.screen, color, center, radius, drawwidth)", "def draw_circle(self, color, position, radius, width = 0, anchor= 'topleft'):\n color = spyral.color._determine(color)\n offset = self._calculate_offset(anchor)\n pygame.draw.circle(self._surf, color, position + offset, radius, width)", "def draw_circle(self, x0, y0, r, color=None):\n f = 1 - r\n ddF_x = 1\n ddF_y = -2 * r\n x = 0\n y = r\n\n self.set(x0, y0 + r, color)\n self.set(x0, y0 - r, color)\n self.set(x0 + r, y0, color)\n self.set(x0 - r, y0, color)\n\n while x < y:\n if f >= 0:\n y -= 1\n ddF_y += 2\n f += ddF_y\n x += 1\n ddF_x += 2\n f += ddF_x\n\n self.set(x0 + x, y0 + y, color)\n self.set(x0 - x, y0 + y, color)\n self.set(x0 + x, y0 - y, color)\n self.set(x0 - x, y0 - y, color)\n self.set(x0 + y, y0 + x, color)\n self.set(x0 - y, y0 + x, color)\n self.set(x0 + y, y0 - x, color)\n self.set(x0 - y, y0 - x, color)", "def circle(self, center, rad):\n self.gc.show_circles(center[0], center[1], rad, facecolor='none', edgecolor=self.color, linewidth=0.5)", "def DrawCircle(self, center, radius, color, drawwidth=1):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, drawwidth)", "def draw_circle(centerx, centery, radius):\r\n global _canvas\r\n global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n circle = Circle()\r\n circle.move(centerx, centery)\r\n circle.setRadius(radius)\r\n _set_not_filled(circle)\r\n _canvas.add(circle)", "def circle(t, r):\n circumference = math.pi * 2 * r\n n = 60\n length = circumference / n\n polygon(t, length, n)", "def draw_circle_outline(center_x, center_y, radius, color, border_width=1):\n width = radius\n height = radius\n draw_ellipse_outline(center_x, center_y, width, height,\n color, border_width)", "def DrawSolidCircle(self, center, radius, axis, color):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, (color/2).bytes+[127],\r\n center, radius, 0)\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, 1)\r\n pygame.draw.aaline(self.surface, (255, 0, 0), center,\r\n (center[0] - radius*axis[0], center[1] +\r\n radius*axis[1]))", "def circle(r, mv_direction):\n vert_amount = 80\n edge = 2 * r * math.sin(math.radians(360 / (2 * vert_amount))) \n polygon_angle = (vert_amount - 2) / vert_amount * 180\n angle = 180 - polygon_angle\n \n for i in range(vert_amount):\n if i == 0: \n rotate_turtle(polygon_angle / 2, not mv_direction)\n else:\n rotate_turtle(angle, mv_direction)\n turtle.forward(edge)", "def circle(cls, radius, position, open_circle=False):\n\n nb_points = 2*np.pi*radius/1\n points1 = radius*np.transpose(np.concatenate(([np.cos(2*np.pi*np.arange(0,nb_points+1)/nb_points)],[np.sin(2*np.pi*np.arange(0,nb_points+1)/nb_points)]),axis=0))\n \n for y in range(points1.shape[0]):\n points1[y,:]=points1[y,:]+position\n \n circle_obj = cls()\n circle_obj.coord = [points1]\n circle_obj.open = open_circle\n return circle_obj", "def circle(self, radius, extent=None, steps=None):\n super().circle(radius, extent, steps)", "def draw_circle(self, color, position, radius, width=0, anchor='topleft'):\n offset = self._calculate_offset(anchor)\n pygame.draw.circle(self._surf, color, (position + offset).floor(),\n radius, width)\n self._version += 1\n spyral.util.scale_surface.clear(self._surf)\n return self", "def draw_circle_filled(center_x, center_y, radius, color):\n width = radius\n height = radius\n draw_ellipse_filled(center_x, center_y, width, height, color)", "def turn_circles(self):\n r = self.player.turn_r\n k = 30 #higher numbers result in a better looking circle, but too high lags the bot\n\n circleR = []\n centreR = np.array([0,r,0])\n for i in range(k):\n theta = (2/k) * math.pi * i\n point = centreR + np.array([r*math.sin(theta), -r*math.cos(theta), 0])\n point = self.player.pos + world(point,self.player.orientM)\n circleR.append(point)\n\n circleL = []\n centreL = np.array([0,-r,0])\n for i in range(k):\n theta = (2/k) * math.pi * i\n point = centreL + np.array([r*math.sin(theta), r*math.cos(theta), 0])\n point = self.player.pos + world(point,self.player.orientM)\n circleL.append(point)\n\n self.renderer.begin_rendering(\"turn circles\")\n self.renderer.draw_polyline_3d(circleR, self.renderer.cyan())\n self.renderer.draw_polyline_3d(circleL, self.renderer.cyan())\n self.renderer.end_rendering()", "def circle(self, clear_screen=True, x=50, y=50, radius=40, fill_color='black', outline_color='black'):\n\n if clear_screen:\n self.clear()\n\n x1 = x - radius\n y1 = y - radius\n x2 = x + radius\n y2 = y + radius\n\n return self.draw.ellipse((x1, y1, x2, y2), fill=fill_color, outline=outline_color)", "def draw_star(x=0,y=0,radius=10):\n cx = x\n cy = y+radius\n bx = cx * math.cos(2*math.pi/3) - ( cy * math.sin(2*math.pi/3) )\n by = cx * math.sin(2*math.pi/3) + ( cy * math.cos(2*math.pi/3) )\n ax = cx * math.cos(4*math.pi/3) - ( cy * math.sin(4*math.pi/3) )\n ay = cx * math.sin(4*math.pi/3) + ( cy * math.cos(4*math.pi/3) )\n my_turtle.penup()\n my_turtle.goto(cx, cy)\n my_turtle.pendown()\n my_turtle.goto(bx, by)\n my_turtle.goto(ax, ay)\n my_turtle.goto(cx, cy)\n my_turtle.penup()\n cy = y-radius\n bx = cx * math.cos(2*math.pi/3) - ( cy * math.sin(2*math.pi/3) )\n by = cx * math.sin(2*math.pi/3) + ( cy * math.cos(2*math.pi/3) )\n ax = cx * math.cos(4*math.pi/3) - ( cy * math.sin(4*math.pi/3) )\n ay = cx * math.sin(4*math.pi/3) + ( cy * math.cos(4*math.pi/3) )\n my_turtle.penup()\n my_turtle.goto(cx, cy)\n my_turtle.pendown()\n my_turtle.goto(bx, by)\n my_turtle.goto(ax, ay)\n my_turtle.goto(cx, cy)\n my_turtle.penup()", "def remote_concentric_circles(circle_turtle,dis_range,radius):\r\n for i in range(dis_range):\r\n color = random.choice(dark_colors)\r\n circle_turtle.color(color)\r\n circle_turtle.circle(radius*i)\r\n circle_turtle.up()\r\n circle_turtle.sety((radius*i)*(-1))\r\n circle_turtle.down()\r\n\r\n circle_turtle.up()\r\n circle_turtle.goto(0,0)\r\n circle_turtle.down()", "def wdraw_circle(self, wx, wy, dradius, fill, outline):\r\n dx, dy = self.w_to_d(wx, wy)\r\n self.canvas.create_oval(dx - dradius, dy - dradius, dx + dradius, dy + dradius, fill=fill, outline=outline)", "def circle(self, center_x, center_y, radius, color):\n x = radius - 1\n y = 0\n d_x = 1\n d_y = 1\n err = d_x - (radius << 1)\n while x >= y:\n self.pixel(center_x + x, center_y + y, color)\n self.pixel(center_x + y, center_y + x, color)\n self.pixel(center_x - y, center_y + x, color)\n self.pixel(center_x - x, center_y + y, color)\n self.pixel(center_x - x, center_y - y, color)\n self.pixel(center_x - y, center_y - x, color)\n self.pixel(center_x + y, center_y - x, color)\n self.pixel(center_x + x, center_y - y, color)\n if err <= 0:\n y += 1\n err += d_y\n d_y += 2\n if err > 0:\n x -= 1\n d_x += 2\n err += d_x - (radius << 1)", "def circle(radius = 10, angle_resolution = 2.5, layer = 0):\n D = Device(name = 'circle')\n t = np.linspace(0, 360, int(np.ceil(360/angle_resolution) + 1)) * pi/180\n xpts = (radius*cos(t)).tolist()\n ypts = (radius*sin(t)).tolist()\n D.add_polygon(points = (xpts, ypts), layer = layer)\n return D", "def draw_two_circles(t):\n # large circle\n circle(t, 100)\n move(t, 100, 0)\n\n # another large circle\n circle(t, 100)", "def draw(self, draw_circle):\n draw_circle(self.color, (int(self.position[0]), int(self.position[1])), self.size)", "def draw_rectangle(n, radius):\n side_length = math.radians(360/(2*n)) * 2 * radius\n angle = 360 / n\n turtle.penup()\n turtle.goto(side_length, 0)\n turtle.pendown()\n\n turtle.left((180 - angle) / 2)\n for _ in range(n):\n turtle.left(360 / n)\n turtle.forward(side_length)\n turtle.right((180 - angle) / 2)", "def draw_neuron(self, center, radius, color):\r\n self.pen.up()\r\n self.pen.color(color)\r\n self.pen.goto(center)\r\n\r\n self.pen.setheading(0)\r\n self.pen.forward(radius)\r\n self.pen.setheading(90)\r\n\r\n # draw circle\r\n self.pen.begin_fill()\r\n self.pen.pendown()\r\n self.pen.circle(radius)\r\n self.pen.end_fill()\r\n\r\n self.pen.color('black')\r\n self.pen.up()\r\n self.pen.goto(center)\r\n self.pen.setheading(0)", "def makeCircleOutline(self):\n #circle defined\n global circ_main\n circ_main = Circle(stroke_color=BLUE).scale(2).shift(LEFT*5)\n\n #dot at circle and dot at center\n global dot_circ\n dot_circ = always_redraw(\n lambda : Dot(circ_main.get_end())\n )\n global dot_center\n dot_center = Dot(LEFT*5)\n \n #line from origin to circle\n global line_circ\n line_circ = always_redraw(\n lambda : Line(start=dot_center.get_center(), end=dot_circ.get_center())\n )\n \n #write stuff\n self.play(Write(dot_circ), Write(line_circ), Write(dot_center))\n self.play(Write(circ_main), run_time=3, rate_func=double_smooth)", "def circle(self, x, y, r, solid = False):\n px = 0\n py = r\n d = 1 - 2 * r\n err = 0\n while py >= 0:\n if solid:\n for i in range(x - px, x + px + 1):\n self.pixel(i, y + py, 1)\n self.pixel(i, y - py, 1)\n else:\n self.pixel(x + px, y + py, 1)\n self.pixel(x + px, y - py, 1)\n self.pixel(x - px, y + py, 1)\n self.pixel(x - px, y - py, 1)\n err = 2 * (d + py) - 1\n if d < 0 and err <= 0:\n px += 1\n d += 2 *px + 1\n else:\n err = 2 * (d - px) - 1\n if d > 0 and err > 0:\n py -= 1\n d += 1 - 2 * py\n else:\n px += 1\n d += 2 * (px - py)\n py -= 1", "def circle(self, xo: int, yo: int, radius: int, color: int, fill=False):\n for x in range(xo - radius, xo + radius + 1):\n square = sqrt(radius ** 2 - (x - xo) ** 2)\n y = yo + square\n self.pixel(x, floor(y), color)\n y = yo - square\n self.pixel(x, floor(y), color)\n for y in range(yo - radius, yo + radius + 1):\n square = sqrt(radius ** 2 - (y - yo) ** 2)\n x = xo + square\n self.pixel(floor(x), y, color)\n x = xo - square\n self.pixel(floor(x), y, color)\n if fill:\n if radius > 1:\n self.circle(xo, yo, radius - 1, color, True)\n else:\n self.circle(xo, yo, radius - 1, color, False)", "def _circle(i, r=.05):\n\treturn Circle((i, 0), r, fill=True, color='black')", "def draw_circle(self, circle, color, thickness=2):\n center = self._format_point(circle.center())\n opencv.circle(self.img, center.tuple(), int(circle.radius()), color.bgra(), thickness=thickness)", "def circular_movement(radius = 150, theta=None):\n y = radius * np.sin(theta)\n if theta == 0:\n x = radius\n elif np.pi*0.99 < theta < np.pi*1.01:\n x = -radius\n else:\n x = y/np.tan(theta)\n return x, y", "def draw_square():\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)\r\n turtle.forward(100)\r\n turtle.left(90)", "def circle(self, p, radius, **kwargs):\n cx, cy = self._sky2img(p)\n self._draw.ellipse([cx-radius, cy-radius, cx+radius, cy+radius], **kwargs)", "def circle(center, radius, *args, **kwargs):\n return patch.Circle(center, radius, *args, **kwargs)", "def create_circle(self, x, y, r, **kwargs):\n return self.create_oval(*self.circ_to_oval(x, y, r), **kwargs)", "def DrawCircle(*args, **kwargs):\n return _gdi_.PseudoDC_DrawCircle(*args, **kwargs)", "def DrawCircle(*args, **kwargs):\n return _gdi_.DC_DrawCircle(*args, **kwargs)", "def make_circle(x, y, r):\n\tnew_circle = Circle()\n\tnew_circle.x = x\n\tnew_circle.y = y\n\tnew_circle.r = r\n\treturn new_circle", "def draw_filled_circle(centerx, centery, radius):\r\n global _canvas\r\n global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n circle = Circle()\r\n circle.move(centerx, centery)\r\n circle.setRadius(radius)\r\n _set_filled(circle)\r\n _canvas.add(circle)", "def showCircle(self, window, color=None, radius=None, fill=None, conversion=None):\n if not color: color = self.color\n if not radius: radius = self.radius\n if not fill: fill = self.fill\n if not conversion: conversion = self.conversion\n window.draw.circle(window.screen, color, [self.x, self.y], radius, fill, conversion)", "def draw_circle(self, xy=None, bbox=None, flatratio=1, **options):\n #TEMPORARY DISABLING TRANSFORM TO AVOID DEFORMED CIRCLE\n options = self._check_options(options)\n args = []\n \n if options[\"outlinecolor\"]:\n pen = aggdraw.Pen(options[\"outlinecolor\"], options[\"outlinewidth\"])\n args.append(pen)\n if options[\"fillcolor\"]:\n brush = aggdraw.Brush(options[\"fillcolor\"])\n args.append(brush)\n \n if xy:\n x,y = xy\n x,y = self.coord2pixel(x,y)\n fillsize = options[\"fillsize\"]\n width = options[\"fillwidth\"]\n height = options[\"fillheight\"]\n## width, height = width / self.width * self.coordspace_width, \\\n## height / self.height * self.coordspace_height\n if flatratio: height *= flatratio\n halfwidth, halfheight = width / 2.0, height / 2.0\n bbox = [x-halfwidth, y-halfheight, x+halfwidth, y+halfheight]\n \n elif bbox: pass\n \n else: raise Exception(\"Either xy or bbox has to be specified\")\n \n self.drawer.settransform()\n self.drawer.ellipse(bbox, *args)\n self.drawer.settransform(self.coordspace_transform)", "def plot(self, radius=15, **kwargs):\n self.plot_circle(radius, **kwargs)", "def circle(draw, bbox, thickness=4, loops=2, fill=(255,0,0)):\n offset = 0\n x1, y1, x2, y2 = bbox\n w, h = x2 - x1, y2 - y1\n x_c, y_c = x1 + w/2, y1 + h/2\n rot = noise(0.6)\n a, b = w, h\n for loop in range(loops):\n for r in np.arange(0, 2*pi + random.random(), 1/(max(w, h))):\n offset += noise()\n for i in range(thickness):\n x, y = ellipse_pt(r, x_c, y_c, a+i+offset, b+i+offset, rot)\n draw.point((x,y), fill=fill)\n a, b = a + 1, b + 1", "def fillcircle(draw, centrex, centrey, radius, color=\"#AAAAAAFF\") -> None:\n # convert cartesian centre to pixel centre\n cx, cy = pixelcoord(centrex, centrey)\n # top left and bottom right coordinates, must never reverse\n rect = [(cx-radius, cy-radius), (cx+radius, cy+radius)]\n # draw, same color for outline and fill\n draw.ellipse(rect, color, color)", "def circleCirc(radius):\n radius = float(radius)\n return 2*math.pi*radius", "def draw_circle(self,\n boxes=1,\n completeline=0,\n lines=0,\n seek=0,\n continuetext=0,\n fontsize=0,\n gray=0,\n style=\"\"):\n\n c = self.canvas\n c.setLineWidth(0.90)\n c.setStrokeGray(gray)\n self.resetx(seek=seek)\n #if style == \"center\":\n # self.x = self.width / 2\n #elif style == \"right\":\n # self.x = self.width - self.marginsides - self.fontsize\n #if seek > (self.width - (self.marginsides + self.fontsize)):\n # seek = 0\n #if (self.y - self.fontsize) < 40:\n # self.set_new_page()\n #if continuetext == 1:\n # self.y = self.y + self.fontsize\n # self.x = self.lastx\n #else:\n # self.x = self.marginsides\n #if seek != 0:\n # self.x = self.x + seek\n #if fontsize == 0:\n # fontsize = self.fontsize\n #else:\n # self.fontsize = fontsize\n #if completeline == 1:\n # boxes = int(self.width / self.fontsize)\n for eachcircle in xrange(boxes):\n c.circle(self.x + self.fontsize/2, self.y + self.fontsize/2,\n self.fontsize/2, fill = 0)\n self.resetx(seek=self.fontsize)\n self.resetx(seek=seek)\n # if self.x > (self.width - (self.marginsides + self.fontsize)):\n # break\n #self.lastx = self.x\n #self.x = self.marginsides\n #self.y = self.y - self.fontsize", "def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, self.radius, TARGET_COLOR)", "def _generate_circle(self, center, radius):\n assert len(center) in [2, 3], 'Center of circle must have 2 or 3 elements'\n assert radius > 0, 'Radius must be greater than zero'\n return Point(*center).buffer(radius)", "def draw_circle(mat, center, radius, color=(0, 0, 255), thickness=1):\n cv2.circle(mat, center, radius, color, thickness=thickness)", "def circle(radius = 15, resolution = 20, robotHeight = -90, n = 1, dir = 0):\n \n t = np.linspace(0, n*2*m.pi, resolution*n)\n circlePos = []\n for num in t:\n if dir == 0:\n x = m.cos(num)*radius\n y = m.sin(num)*radius\n else:\n x = m.cos(num)*radius\n y = m.sin(num-m.pi)*radius\n\n circlePos.append([x, y, robotHeight, 0, 0, 0, 'mov'])\n\n circlePos.append([0,0,-127,0,0,0,'mov'])\n return circlePos", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)", "def oncircle(size=None):\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n # This beats normalizing incircle for all sizes, even though that\n # should be the superior algorithm for compiled code.\n theta = 2.*pi * random(size + (1,))\n return concatenate((cos(theta), sin(theta)), axis=-1)", "def draw(self, window):\n radius = SQUARE_SIZE // 2 - PADDING\n if self.stack_size == 2:\n x1, y1 = self.x - SQUARE_SIZE//8, self.y - SQUARE_SIZE//8\n x2, y2 = self.x + SQUARE_SIZE//8, self.y + SQUARE_SIZE//8\n pygame.draw.circle(window, BLACK, (x1, y1), radius + OUTLINE)\n pygame.draw.circle(window, self.color, (x1, y1), radius)\n pygame.draw.circle(window, BLACK, (x2, y2), radius + OUTLINE)\n pygame.draw.circle(window, self.color, (x2, y2), radius)\n else:\n pygame.draw.circle(window, BLACK, (self.x, self.y), radius + OUTLINE)\n pygame.draw.circle(window, self.color, (self.x, self.y), radius)", "def circle(self):\n return circle(self.N, self.o, self.r)", "def target(x, y, radius):\n color = (randint(0, 1), randint(0, 1), randint(0, 1))\n #generating random color\n if radius>5:\n penup()\n goto(x, y)\n pendown()\n fillcolor(color)\n begin_fill()\n circle(radius)\n end_fill()\n target(x, y+10, radius-10)", "def get_nice_circle(x, y, radius, color=\"lightsteelblue\", facecolor=\"green\", alpha=.6, ax=None ):\n e = pl.Circle([x, y], radius)\n if ax is None:\n ax = pl.gca()\n ax.add_artist(e)\n e.set_clip_box(ax.bbox)\n e.set_edgecolor( color )\n e.set_linewidth(3)\n e.set_facecolor( facecolor ) # \"none\" not None\n e.set_alpha( alpha )\n return e", "def circle(radius, center, dim):\n kern = np.zeros(shape=(radius*2,radius*2))\n kern[draw.circle(r=radius, c=radius, radius=radius)] = 1\n return kern", "def turn(self,\n radius,\n angle,\n number_of_points=0.01,\n max_points=_max_points,\n final_width=None,\n final_distance=None,\n layer=0,\n datatype=0):\n exact = True\n if angle == 'r':\n delta_i = _halfpi\n delta_f = 0\n elif angle == 'rr':\n delta_i = _halfpi\n delta_f = -delta_i\n elif angle == 'l':\n delta_i = -_halfpi\n delta_f = 0\n elif angle == 'll':\n delta_i = -_halfpi\n delta_f = -delta_i\n elif angle < 0:\n exact = False\n delta_i = _halfpi\n delta_f = delta_i + angle\n else:\n exact = False\n delta_i = -_halfpi\n delta_f = delta_i + angle\n if self.direction == '+x':\n self.direction = 0\n elif self.direction == '-x':\n self.direction = numpy.pi\n elif self.direction == '+y':\n self.direction = _halfpi\n elif self.direction == '-y':\n self.direction = -_halfpi\n elif exact:\n exact = False\n self.arc(radius, self.direction + delta_i, self.direction + delta_f,\n number_of_points, max_points, final_width, final_distance,\n layer, datatype)\n if exact:\n self.direction = _directions_list[int(\n round(self.direction / _halfpi)) % 4]\n return self", "def draw_circle_set(canvas, x, y, radius, nib_width, partitions):\n offset = radius\n for i in (float(x) for x in partitions.split(\",\")):\n canvas.circle(x, y, offset, fill = 1)\n offset -= i *nib_width * mm\n\n return offset", "def circle(r=0):\n\tteta = 2*pi*random()\n\tx = (r+1)*cos(teta) + L//2\n\ty = (r+1)*sin(teta) + L//2\n\t\n\ti = int(x) + 1\n\tj = int(y) + 1\n\tprint(r)\n\treturn i,j", "def draw(self):\n radius = self.width / 2\n center_x = self.x + radius\n center_y = self.y + radius\n arcade.draw_circle_filled(center_x, center_y, radius, self.fill.color)\n arcade.draw_circle_outline(\n center_x, center_y, radius, self.pen.color, 3)", "def create_circle(self, cx, cy, radius, style=None, parent=None):\n if parent is None:\n parent = self.current_parent\n if parent is not None:\n attrs = {'r': str(radius), 'cx': str(cx), 'cy': str(cy)}\n if style:\n attrs['style'] = style\n return etree.SubElement(parent, svgns('circle'), attrs)", "def plot_circle(self):\n if self.lastmouse is not None:\n pygame.gfxdraw.circle(self.screen,\n self.lastmouse[0], self.lastmouse[1],\n int(self.drawsize), (255, 0, 255))", "def DrawCirclePoint(*args, **kwargs):\n return _gdi_.PseudoDC_DrawCirclePoint(*args, **kwargs)", "def append_circle(p, v, n, center, radius, start_angle, end_angle):\n\n # Fraction of the circle we're covering, in radians.\n angle_span = end_angle - start_angle\n\n # The number of segments we want to use for this span. Use 20 for a full circle.\n segment_count = int(math.ceil(20*math.fabs(angle_span)/tau))\n\n for i in range(segment_count + 1):\n th = start_angle + angle_span*i/segment_count\n point = center + v*math.cos(th)*radius + n*math.sin(th)*radius\n p.append(point)", "def plot_circle(self, radius, c=color, ax=None, label=True, fontsize=12, **kwargs):\n if ax is None:\n ax = plt.gca()\n circle = Circle(self.coords, radius, fill=None, ec=c, **kwargs)\n ax.add_artist(circle)\n if label and self.i is not None:\n plt.text(\n *(np.array(self.coords) - [0, 1.5 * radius]),\n self.i,\n c=c,\n ha=\"center\",\n va=\"top\",\n fontsize=fontsize,\n )", "def make_circle_fill():\n num_points = 40\n batch = pyglet.graphics.Batch()\n rad = math.pi * 2 / num_points # getting 360 / n in radians\n index = list(itertools.chain.from_iterable( (0, x-1, x) for x in range(2, num_points+1) ))\n index += [0, 1, num_points] # end of fan\n vertices = [0, 0] # adding center of fan\n for i in range(1, num_points + 1):\n angle = rad * i\n vertices += [math.cos(angle), math.sin(angle)]\n vertices += [1, 0] # adding end of fan\n circle = pyglet.graphics.vertex_list_indexed(num_points+2, index, ('v2f', vertices))\n return circle", "def draw_circle(self, draw_x, draw_y, player_one):\n if player_one:\n pygame.draw.circle(self.background, (0, 0, 0), (draw_x, draw_y), self.radius + 1)\n pygame.draw.circle(self.background, (self.red, 0, self.blue), (draw_x, draw_y), self.radius)\n pygame.draw.circle(self.background, (self.red, 100, self.blue + 100), (draw_x, draw_y), self.radius - 8)\n else:\n pygame.draw.circle(self.background, (0, 0, 0), (draw_x, draw_y), self.radius + 1)\n pygame.draw.circle(self.background, (self.red, 0, self.blue), (draw_x, draw_y), self.radius)\n pygame.draw.circle(self.background, (self.red + 100, 100, self.blue), (draw_x, draw_y), self.radius - 8)", "def draw_circle_thing(diameter: float, center: Tuple[float, float]) -> List[FigureElement]:\n x, y = cm_to_px(center[0]), cm_to_px(center[1])\n r_px = cm_to_px(diameter) / 2\n\n circles = []\n num_circles = 12\n circle_mult = 1\n for i in range(num_circles):\n rads = 2*math.pi/num_circles * i\n little_radius = (r_px/2)*circle_mult\n center_offset = r_px - little_radius\n cx = x + math.sin(rads) * center_offset\n cy = y + math.cos(rads) * center_offset\n circles.append(CircleElement(cx, cy, little_radius, stroke_width=1, fill=\"none\"))\n return circles", "def main():\r\n x = int(input(\"Enter the x coordinate of the center point: \"))\r\n y = int(input(\"Enter the y coordinate of the center point: \"))\r\n radius = int(input(\"Enter the radius: \"))\r\n drawCircle(Turtle(), x, y, radius)\r\n sleep(5)", "def wdraw_wcircle(self, wx, wy, wradius, fill, outline):\r\n x0, y0 = self.w_to_d(wx - wradius, wy - wradius)\r\n x1, y1 = self.w_to_d(wx + wradius, wy + wradius)\r\n self.canvas.create_oval(x0, y0, x1, y1, fill=fill, outline=outline)", "def discretized_circle(radius, n_pts):\n x1 = np.zeros(n_pts)\n y1 = np.zeros(n_pts)\n for i in range(0, n_pts):\n x1[i] = np.cos(2 * np.pi / n_pts * i) * radius\n y1[i] = np.sin(2 * np.pi / n_pts * i) * radius\n\n x2 = np.roll(x1, -1)\n y2 = np.roll(y1, -1)\n return x1, y1, x2, y2", "def ell(screen, color, x0, y0, size, angle):\n for k in range(0, 100):\n circle(screen, color, (x0 + size * 20 * math.sin(angle / 180 * math.pi) * k / 100,\n y0 + size * 20 * math.cos(angle / 180 * math.pi) * k / 100),\n 2 * size * (1 + 1 * (k / 100) ** 0.8), width=0)\n circle(screen, color, (x0 + size * 20 * math.sin(angle / 180 * math.pi) * (1 + k / 100),\n y0 + size * 20 * math.cos(angle / 180 * math.pi) * (1 + k / 100)),\n 2 * size * (2 - (k / 100) ** 1.25), width=0)", "def _generate_circle_mask(center_y, center_x, radius):\n\n circle = draw.circle(center_y, center_x, radius)\n\n return circle", "def draw_open(x, y):\n square_pos_x = x * 30\n square_pos_y = (y - 1) * -30\n penup()\n pencolor('#ff9800')\n # Sets the position on the position (15, 25) in the square of size (30,30) and draws a filled circle\n setpos(-500 + square_pos_x + 15, 200 + square_pos_y - 25)\n pendown()\n circle(10)", "def draw(self, screen):\n\t\tpygame.draw.circle(screen, self.color, self.pos, self.radius)", "def draw_triangle(x, y, length=10):\n radius = length/math.sqrt(3)\n my_turtle.penup()\n my_turtle.goto(x, y+radius)\n my_turtle.pendown()\n my_turtle.right(60)\n for i in range(3):\n my_turtle.forward(length)\n my_turtle.right(120)\n\n my_turtle.left(60)\n my_turtle.penup()", "def plot_tpc_circle(radius):\n import holoviews as hv\n x = radius * np.cos(np.arange(-np.pi, np.pi + 0.1, 0.01))\n y = radius * np.sin(np.arange(-np.pi, np.pi + 0.1, 0.01))\n return hv.Curve((x, y)).opts(color='k')", "def addCircle(self, radius=5.0, value=1.0, cx=None, cy=None):\n self.fimage = None\n # Create a circle at the center.\n if cx == None:\n cx = self.nx/2.0\n if cy == None:\n cy = self.ny/2.0\n tmp = (self.xx - cx)**2 + (self.yy - cy)**2\n circle = numpy.where(tmp<=radius**2, value, 0)\n self.image += circle\n return", "def plot_circle(radius=1.0, centre=[0,0], height=0, *args, **kwargs):\n\n deg = np.linspace(0, 360, 361)\n rad = np.deg2rad(deg)\n x = centre[0] + np.sqrt((radius**2-height**2))*np.cos(rad)\n y = centre[1] + np.sqrt((radius**2-height**2))*np.sin(rad)\n plt.plot(x, y, *args, **kwargs)", "def circle(self, x, y, r, cls=None, style=None):\n x, y, r = self._meta.units(x, y, r)\n cls_str = 'class=\"%s\" ' % cls if cls else ''\n style_str = 'style=\"%s\" ' % self._meta.make_style(style) if style else ''\n self.elements.append(\"\"\"\n <circle cx=\"%s\" cy=\"%s\" r=\"%s\" %s%s/>\n \"\"\".strip() % (\n x, y, r, cls_str, style_str\n ))\n return self", "def circle(self, x, y, r, cls=None, style=None):\n x, y, r = self._meta.units(x, y, r)\n cls_str = 'class=\"%s\" ' % cls if cls else ''\n style_str = 'style=\"%s\" ' % self._meta.make_style(style) if style else ''\n self.elements.append(\"\"\"\n <circle cx=\"%s\" cy=\"%s\" r=\"%s\" %s%s/>\n \"\"\".strip() % (\n x, y, r, cls_str, style_str\n ))\n return self" ]
[ "0.77814776", "0.77814776", "0.77214444", "0.74263126", "0.74239993", "0.7414104", "0.7346996", "0.72157526", "0.69786805", "0.6978031", "0.69246596", "0.6809172", "0.6785606", "0.6784033", "0.6783516", "0.6775677", "0.67311746", "0.67182976", "0.669799", "0.6684738", "0.66184586", "0.66095126", "0.65891486", "0.6587661", "0.6582551", "0.6565128", "0.6555078", "0.65373945", "0.65145636", "0.64850116", "0.6458055", "0.6454624", "0.6454145", "0.64026207", "0.6375192", "0.6371733", "0.63466775", "0.6335082", "0.6333022", "0.6326975", "0.6305199", "0.6302412", "0.62752074", "0.62643886", "0.6262017", "0.62526625", "0.62514323", "0.62178445", "0.6213135", "0.61747515", "0.6153366", "0.61522424", "0.615143", "0.61453813", "0.612105", "0.6099411", "0.60822105", "0.6070822", "0.60657746", "0.602978", "0.60168314", "0.60112804", "0.598693", "0.59793407", "0.5979266", "0.59507865", "0.5937444", "0.5930528", "0.5930528", "0.5930026", "0.59261924", "0.5916085", "0.5881449", "0.58761793", "0.5859025", "0.5858693", "0.5840709", "0.5839079", "0.5839013", "0.5822136", "0.58138496", "0.57887685", "0.57870996", "0.57838774", "0.5780313", "0.5770567", "0.57672477", "0.57604563", "0.57541066", "0.5750312", "0.57475114", "0.57425153", "0.5741112", "0.5737758", "0.5736945", "0.5730765", "0.57267314", "0.5725345", "0.57230103", "0.57230103" ]
0.6391658
34
this method is called by an admin user to approve the lyrics of a song
def approve_lyrics(): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_lyrics_approved():", "def approve (self, response) :\n if 'event' in response and 'moderator' in response :\n eventId = response ['event']\n userId = response ['moderator']\n else :\n raise ModerationError (response)\n\n mod_status = 'OK'\n if 'status' in response :\n mod_status = response ['status']\n \n event = Event.object.get (id = eventId)\n approval = Approval (approved = event, moderatorId = userId, status = mod_status)\n approval.save ()\n self.editValues (event.answer, response)", "def vote(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n _logger.info(\"%s is trying to vote on %s\", request.user, context['song'])\n vote_dict = get_vote_dict(request.user)\n can_vote = context['song'].id not in vote_dict[request.user.id] and context['song'].ready\n if can_vote:\n vote = Vote()\n vote.user = request.user\n vote.song = context['song']\n vote.save()\n vote_dict[request.user.id].append(context['song'].id)\n cache.set('vote_dict', vote_dict)\n logging.info('%s voted on %s.', request.user, context['song'])\n return HttpResponse('Vote registered on %s.' % context['song'])\n else:\n logging.info('%s tried to vote more than once on %s.', request.user.username, context['song'])\n return HttpResponse(\"Du har allerede stemt på denne sangen i dag!\", content_type='text/plain', status=403)", "def change_learned_status(self, instance):\n self.song = self.songs.get_song_by_title(instance.text)\n # Marks song as learned and shows according status text\n if self.song.required:\n self.song.mark_learned()\n status_text = \"You have learned {}\".format(self.song.title)\n # Marks song as required and shows according status text\n else:\n self.song.mark_required()\n status_text = \"You need to learn {}\".format(self.song.title)\n # Shows status text, sorts songs by current s\n self.root.ids.status_text.text = status_text\n self.sort_songs(self.root.ids.sort_options.text)", "async def musicbot(self, ctx, the_state):\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n if is_mod:\r\n if the_state == \"1\":\r\n self.music_off = False\r\n await ctx.send(\"Music Bot features now on\")\r\n else:\r\n self.music_off = True\r\n await ctx.send(\"Music Bot features now off\")\r\n else:\r\n await ctx.send(\"**Error:** You are not allowed to use this command!\")", "async def queue(self, ctx, *args):\r\n if ctx.message.channel.id != 701868237771505695:\r\n return await ctx.send(\"**Error:** Music Bot commands are only available in <#701868237771505695>\")\r\n if self.music_off:\r\n return await ctx.send(\"**Error:** Music Bot features are currently off\")\r\n if ctx.voice_client is None or ctx.voice_client is not self.voice:\r\n return await ctx.send(\"**Error:** You must be connected to the voice channel.\")\r\n\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n\r\n queue_string = \"```\"\r\n try:\r\n # if(args[0]==\"clear\"):\r\n #\tself.songs=[]\r\n if args[0] == \"remove\":\r\n pos = len(self.songs) - 1\r\n while pos > 0:\r\n if args[1].lower() in self.songs[pos][0].title.lower():\r\n if ctx.author.id not in self.songs[pos][4]:\r\n self.songs[pos][4].append(ctx.author.id)\r\n shortened_title = self.title_shorten(self.songs[pos][0].title)\r\n print(self.songs[pos][1])\r\n print(ctx.message.author)\r\n if (len(ctx.message.author.voice.channel.members) - 1 > len(self.songs[pos][4]) * 2\r\n and not is_mod\r\n and not ctx.message.author.id == self.songs[pos][1]):\r\n await ctx.send(\"{0} remove votes registered for `{1}`, need {2} to remove song.\".format(\r\n len(self.songs[pos][4]),\r\n shortened_title,\r\n int((len(ctx.message.author.voice.channel.members) - 1) / 2)))\r\n else:\r\n await ctx.send(\"Removing `{0}`\".format(shortened_title))\r\n self.del_song(pos)\r\n pos = pos - 1\r\n except:\r\n pass\r\n pos = 0\r\n for song in self.songs:\r\n if pos == 0:\r\n pos_indicator = \"> \"\r\n else:\r\n pos_indicator = \"{0}.\".format(str(pos))\r\n shortened_title = self.title_shorten(song[0].title)\r\n queue_string = \"{0}{1}{2}\\n\".format(queue_string, pos_indicator, shortened_title)\r\n pos = pos + 1\r\n if queue_string == \"```\":\r\n return await ctx.send(\"Queue is empty\")\r\n await ctx.send(\"{0}```\".format(queue_string))", "async def auto(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'Still working on integration with the election results. Maybe have a command to link to an elections '\n 'database?')\n else:\n await ctx.message.channel.send('Hey! You do not have permission to do that.')", "def update_text(self):\n likes = \"\"\n if self.comedy.get():\n likes += \"You like comedy.\"\n if self.drama.get():\n likes += \"You like drama.\"\n if self.romance.get():\n likes += \"You like romantic.\"\n self.result.delete(0.0, END) # delete from position 0 until the end\n self.result.insert(0.0, likes) # insert to textbox the text in likes in position 0", "async def approve(self, ctx, user: discord.Member):\n server = ctx.message.server\n if user.id in self.norole[server.id]:\n if self.norole[server.id][user.id]['Role'] == True:\n self.norole[server.id][user.id] = {'Role': False}\n dataIO.save_json(self.warninglist, self.norole)\n nobnl = discord.utils.get(server.roles, name = \"NoBNL\")\n await self.bot.remove_roles(user,nobnl)\n msg = await self.bot.say (\"Role removed!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg) \n await self.bot.delete_message(ctx.message)\n else:\n msg = await self.bot.say(\"There is no role to remove!\")\n await asyncio.sleep(8)\n await self.bot.delete_message(msg)\n await self.bot.delete_message(ctx.message)", "def add_the_song_to_playlist(self):\n com_util.tap_on(self.driver, element['AddToPlaylist'])\n # com_util.send_to(self.driver, element['EnterThePlaylist'], 'My Songs')\n com_util.tap_on(self.driver, element['ClickMySongs'])\n # com_util.tap_on(self.driver, element['SaveBtn'])\n com_util.tap_on(self.driver, element['CancelBtn'])\n com_util.tap_on(self.driver, element['DownArrow'])", "def approve(self, request, object_id, extra_context=None):\n obj = self.get_object(request, unquote(object_id))\n title = self._approve_title(obj)\n\n AdminAddApprovalForm = self._approve_approval_form(request)\n\n form = AdminAddApprovalForm(initial={'prescription': obj})\n if request.method == 'POST':\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n if obj.approval_status == obj.APPROVAL_DRAFT and obj.can_approve:\n # create an approval\n obj.approval_status = obj.APPROVAL_SUBMITTED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully submitted for approval.\")\n return HttpResponseRedirect(url)\n elif obj.approval_status == obj.APPROVAL_SUBMITTED:\n if request.POST.get('_cancel'):\n obj.clear_approvals()\n msg = 'Delete: Clearing Approvals/Endorsements', 'Burn ID: {}, Deleted by: {}'. format(obj.burn_id, request.user.get_full_name())\n logger.warning(msg)\n support_email('Delete: Clearing Approvals/Endorsements', msg)\n\n self.message_user(\n request, \"Approval rejected. ePFP is now draft.\")\n return HttpResponseRedirect(url)\n\n form = AdminAddApprovalForm(request.POST,\n initial={'prescription': obj})\n if form.is_valid():\n approval = form.save(commit=False)\n approval.prescription = obj\n approval.creator = request.user\n approval.modifier = request.user\n approval.save()\n obj.approval_status = obj.APPROVAL_APPROVED\n obj.approval_status_modified = timezone.now()\n obj.save()\n self.message_user(\n request, \"Successfully approved.\")\n return HttpResponseRedirect(url)\n elif obj.is_approved:\n if obj.is_closed:\n self.message_user(\n request, \"You can't extend an approval after the \"\n \"prescribed fire plan has been closed.\")\n return HttpResponseRedirect(url)\n if request.POST.get('_cancel'):\n self.message_user(\n request, \"Didn't extend approval.\")\n return HttpResponseRedirect(url)\n else:\n approval = obj.current_approval\n if approval and approval.extension_count < 3:\n approval.extension_count = approval.extension_count + 1\n approval.valid_to = approval.next_valid_to\n approval.save()\n self.message_user(\n request, \"Successfully extended approval.\")\n else:\n self.message_user(request, \"You can't extend an \"\n \"approval more than 3 times.\")\n return HttpResponseRedirect(url)\n\n admin_form, media = self._approve_form(request, obj, form)\n\n context = {\n 'title': title,\n 'current': obj,\n 'form': admin_form,\n 'media': media,\n 'errors': None,\n }\n return TemplateResponse(request, \"admin/prescription/prescription/\"\n \"approval.html\", context,\n current_app=self.admin_site.name)", "async def vote(self, ctx):\n embed = discord.Embed(title = \"Here are some bot lists that you can vote for me on, voters may soon™ recieve perks\", color = discord.Color.blurple())\n embed.add_field(name = \"Bots For Discord\", value = \"[Click Here](https://botsfordiscord.com/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Discord Boats\", value = \"[Click Here](https://discord.boats/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Divine Discord Bots\", value = \"[Click Here](https://divinediscordbots.com/bot/592811241756688405/vote)\") \n embed.add_field(name = \"Botlist.space\", value = \"[Click Here](https://botlist.space/bot/592811241756688405/upvote)\") \n embed.set_thumbnail(url = self.bot.user.avatar_url)\n await ctx.send(embed = embed)", "def jao_approve(self):\n print \"JAO approved this form. Current state:\", self.state", "def approve(self):\n self.approved = True\n self.quest_node['approved'] = True\n graph.push(self.quest_node)\n self.payout()", "def hod_approve(self):\n print \"HOD approved this form. Current state:\", self.state", "def put_on_wish_list():\n book = request.form\n flash(\"The Wish list feature is under construction! Please check back soon!\")\n return render_template('book_details.html', list_of_books=book)", "async def optin(self, ctx):\n optout.delete_one({\"_id\": ctx.author.id})\n await ctx.send(f\"You have **opted into** A Sound Mood. To leave the program, use ?optout.\")", "def approve(self):\n self._check_if_open()\n data = {\"approved\": True}\n return self.post(\"approve\", data)", "async def _lyrics(self, ctx: commands.Context):\n if not ctx.voice_state.is_playing:\n raise commands.CommandError('Nothing being played at the moment.')\n\n # Get song name listed on youtube\n song_title = ctx.voice_state.current.source.track\n if not song_title:\n return await ctx.send(\"Couldn't find lyrics for this track!\")\n\n song_title = re.sub(\"[(\\[].*?[)\\]]\", \"\", song_title).strip() # Remove parenthesis from song title\n # Get artist name listed on youtube\n artist_name = ctx.voice_state.current.source.artist\n # Instance of GeniusSong class using the Genius API\n genius_song = GeniusSong(song_title, artist_name)\n # Try getting the lyrics using the lyricsgenius library\n lyrics = genius_song.fastlyrics()\n\n # In case of no lyrics found. Use the other (slower) method\n if not lyrics:\n res = genius_song.get_response() # Generate a response using the Genius API to get the songs\n if res:\n # Find the most similar artist comparing the artist on YouTube and Genius\n artist_name = genius_song.return_similar_artist(res)\n # Artist didn't match\n if not artist_name:\n await ctx.send(\"Couldn't find similar artists. The lyrics might not be the expected.\")\n\n # Get the lyrics using the lyricsgenius library with the new artist\n lyrics = genius_song.fastlyrics(artist_name)\n\n else:\n return await ctx.send(\n \"**Error!**\\nThere is a problem with Genius.\\nTry again in a few minutes. \"\n \"\\nYou can also try the command `fastlyrics`.\")\n\n if lyrics:\n # Split lyrics into fields\n fields = genius_song.split_lyrics(lyrics)\n # Create an embed message\n embed = embed_msg(\n title=song_title.capitalize() + \"\\n{}\".format(artist_name),\n description=\"\",\n footer=\"Lyrics provided by Genius.\",\n field_values=fields,\n inline=False\n )\n return await ctx.send(embed=embed)\n\n return await ctx.send(\"Lyrics couldn't be found.\")", "async def admin(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'List of useable commands for the parent command: **admin**\\n\\n **eboard admin auto** - updates the '\n 'new seats given current election data.\\n\\n**eboard admin set <position> <User#0000>** - assigns a '\n 'position to target user.\\n\\n**eboard admin remove <position> <User#0000>** - remove a target user '\n 'from their position.\\n\\n**eboard admin list** - lists the positions in the SQLite table.')", "def set_lyrics(self, lyrics: str) -> None:\n self.lyrics = lyrics", "def approve_me(message):\n load_users(message._client.users)\n sender_id = message._get_user_id()\n target = user_list[sender_id].details['name']\n if (user_list[sender_id].is_unknown):\n message.reply(Strings['APPROVER_REQUEST'])\n names = list_to_names(user_list.admin_list)\n approval_message = Strings[\n 'APPROVER_REQUEST_DETAIL'].format(\">, <@\".join(names), target)\n message._client.send_message(config.AUTH_CHANNEL, approval_message)\n else:\n message.reply(\n \"Your status is already: \" + user_list[sender_id].level.name)", "async def stan(self, ctx, *args):\n if args:\n if args[0] == 'update':\n amount = len(self.artists)\n self.artists = []\n urls_to_scrape = ['https://kprofiles.com/k-pop-girl-groups/',\n 'https://kprofiles.com/k-pop-boy-groups/',\n 'https://kprofiles.com/co-ed-groups-profiles/',\n 'https://kprofiles.com/kpop-duets-profiles/',\n 'https://kprofiles.com/kpop-solo-singers/']\n for url in urls_to_scrape:\n self.artists += scrape_kprofiles(url)\n\n database.set_attr(\"data\", \"artists\", self.artists)\n\n await ctx.send(f\"Artist list succesfully updated, {len(self.artists) - amount} new entries, \"\n f\"{len(self.artists)} total entries\")\n self.logger.info(misolog.format_log(ctx, f\"artist list updated; {len(self.artists) - amount} new, \"\n f\"{len(self.artists)} total\"))\n return\n\n elif args[0] == 'clear':\n self.artists = []\n database.set_attr(\"data\", \"artists\", self.artists)\n await ctx.send(\"Artist list cleared\")\n self.logger.info(misolog.format_log(ctx, f\"artist list cleared\"))\n return\n\n if self.artists:\n artist = str(rd.choice(self.artists))\n await ctx.send('stan ' + artist)\n self.logger.info(misolog.format_log(ctx, f\"artist={artist}\"))\n else:\n await ctx.send(\"Error: artist list is empty, please use >stan update\")\n self.logger.warning(misolog.format_log(ctx, f\"artist list empty\"))", "async def spotify(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send('Invalid command passed. Use the `?help spotify` command to learn more.')", "async def ironman(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n out = (':tools: __**IRONMAN**__ :tools:\\n' \\\n 'If you want to become an ironman, please react to this post with a :thumbsup:. '\n 'This will **RESET** your account and give you the ironman role. '\n 'You will be unable to trade with other players or gamble. '\n 'In return, you will be able to proudly display your status as an ironman, '\n 'by the way.')\n msg = await ctx.send(out)\n\n if await self.confirm(ctx, msg, out):\n ctx.user_object.reset_account()\n ctx.user_object.is_ironman = True\n ctx.user_object.save()\n # ironman_role = discord.utils.get(ctx.guild.roles, name=\"Ironman\")\n # await ctx.author.add_roles(ironman_role, reason='Wanted to become an ironmeme.')\n name = get_display_name(ctx.author)\n await msg.edit(content=f':tools: __**IRONMAN**__ :tools:\\n'\n f'Congratulations, {name}, you are now '\n 'an ironman!')", "def record_lyrics_result(self, track_id, songdata):\n self.lyrics.insert_one(\n {\n \"_id\": track_id,\n \"response_artist\": songdata.artist,\n \"response_title\": songdata.title,\n \"lyrics\": songdata.lyrics,\n }\n )", "async def quotes(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"quotes\")", "def song_has_lyrics():\n pass", "def _query_commands(self):\n # TODO: make this work\n self.player.respond(\"Hi there! Ask me to play artists or songs. \"\n \"I can also find songs that are similar to other \"\n \"artists.\")", "def add_music_from_search(request, music_id: int) -> HttpResponse:\n music_item = get_object_or_404(Music, id=music_id)\n\n if music_item in request.user.profile.playlist.all():\n return HttpResponse('Success')\n\n playpos = PlayPosition(\n position=music_item,\n plist=request.user.profile\n )\n\n playpos.add_order()\n playpos.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))", "def set_solo(self, track, xclip, ident, value = None):\n if track in self.song().tracks + self.song().return_tracks:\n if value in KEYWORDS:\n track.solo = KEYWORDS[value]\n else:\n track.solo = not(track.solo)", "def add_song(self):\n settings = dict(initialdir=pathlib.Path().absolute(), title=\"Choose songs\", filetypes=(\n (\"flac files\", \"*.flac\"),\n (\"mp3 files\", \"*.mp3\"),\n (\"all files\", \"*\")))\n\n song = filedialog.askopenfilename(**settings)\n\n self.update_playlist(song)\n self.listbox.insert(\"end\", self.song_list[-1]['name'])", "def add_new_song(self):\n return \"New Song Added\"", "async def play(self, ctx, *, query):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n query = query.strip('<>')\n if player.is_connected:\n if not ctx.author.voice or not ctx.author.voice.channel or player.connected_channel.id != ctx.author.voice.channel.id:\n return await ctx.send(\"You have to be in my voice channel to queue a song :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"Join a voice channel :no_entry:\")\n else:\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n if not url_re.match(query):\n query = \"ytsearch:{}\".format(query)\n results = await self.bot.lavalink.get_tracks(query)\n if not results or not results['tracks']:\n return await ctx.send(\"I could not find any songs matching that query :no_entry:\")\n s=discord.Embed()\n if results[\"loadType\"] == \"PLAYLIST_LOADED\":\n tracks = results[\"tracks\"]\n for track in tracks:\n player.add(requester=ctx.author.id, track=track)\n s.description = \"Enqueued {} with **{}** tracks <:done:403285928233402378>\".format(results['playlistInfo']['name'], len(tracks))\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n else:\n track = results[\"tracks\"][0]\n player.add(requester=ctx.author.id, track=track)\n timetill = 0\n for x in player.queue:\n timetill += x.duration\n if player.current:\n timetill += player.current.duration - player.position\n else:\n timetill = 0 \n index = [x.track for x in player.queue].index(track[\"track\"]) + 1\n s.set_author(name=\"Added to Queue\", icon_url=ctx.author.avatar_url)\n s.set_thumbnail(url=\"https://img.youtube.com/vi/{}/default.jpg\".format(track[\"info\"][\"identifier\"]))\n s.add_field(name=\"Song\", value=\"[{}]({})\".format(track[\"info\"][\"title\"], track[\"info\"][\"uri\"]), inline=False)\n s.add_field(name=\"Duration\", value=self.format_time(track[\"info\"][\"length\"]), inline=True)\n s.add_field(name=\"Position in Queue\", value=index)\n if timetill != 0:\n s.add_field(name=\"Estimated time till playing\", value=self.format_time(timetill-track[\"info\"][\"length\"]))\n else:\n s.add_field(name=\"Estimated time till playing\", value=\"Next\")\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n if not player.is_playing:\n await player.play()", "def update_knowledge(self):\n pass", "def dr_approve(self):\n print \"DR approved this form. Current state:\", self.state", "def submit(self):\n self.parent().app.setOverrideCursor(gui.QCursor(core.Qt.WaitCursor))\n changes = []\n new = changed = False\n for ix, wins in enumerate(self.fields):\n if ix < len(self.parent().artists):\n fname, lname = wins[0].text(), wins[1].text()\n artist = self.parent().artists[ix]\n if fname != artist.first_name or lname != artist.last_name:\n changed = True\n changes.append((artist.id, fname, lname))\n else:\n new = True\n changes.append((0, wins[0].text(), wins[1].text()))\n if changed or new:\n dmla.update_artists(changes)\n else:\n qtw.QMessageBox.information(self, 'Albums', 'Nothing changed')\n self.parent().app.restoreOverrideCursor()\n self.parent().get_all_artists()\n self.parent().do_select()", "def add_song(self):\n # Error check for blank inputs\n if \"\" in (self.root.ids.input_title.text, self.root.ids.input_artist.text, self.root.ids.input_year.text):\n self.root.ids.status_text.text = \"All fields must be completed\"\n return\n # Error check for negative numbers\n try:\n if int(self.root.ids.input_year.text) < 0:\n self.root.ids.status_text.text = \"Year must be >= 0\"\n return\n # Error check for invalid numbers\n except ValueError:\n self.root.ids.status_text.text = \"Please enter a valid number\"\n return\n # Song add, clear inputs, sort songlist\n song_to_add = Song(self.root.ids.input_title.text, self.root.ids.input_artist.text,\n int(self.root.ids.input_year.text))\n self.songs.add_song(song_to_add)\n SongsToLearnApp.clear_inputs(self)\n self.sort_songs(self.root.ids.sort_options.text)", "def _run_lyrics_gui(self):\n self._log.info(\"Searching for lyrics\")\n\n self.save_lyrics(find=True)\n Action(\"load\", load=True)\n\n self._log.info(\"Done\")", "async def status(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if adv.is_on_adventure(ctx.author.id):\n out = adv.print_adventure(ctx.author.id)\n else:\n out = 'You are not doing anything at the moment.'\n await ctx.send(out)", "def add_music(request, music_id: int) -> HttpResponse:\n music_item = get_object_or_404(Music, id=music_id)\n\n if music_item in request.user.profile.playlist.all():\n return HttpResponse('Success')\n\n playpos = PlayPosition(\n position=music_item,\n plist=request.user.profile\n )\n playpos.add_order()\n playpos.save()\n\n return HttpResponse('Success')", "async def genius(self, ctx, *args):\n args = argsmachine(args)\n async with ctx.channel.typing():\n if len(args) > 0:\n headers = {'Authorization': 'Bearer ' + token}\n search_url = f'https://api.genius.com/search?q={args}'\n response = requests.get(search_url, headers=headers)\n response = response.json()\n allitems = []\n for item in response['response']['hits']:\n new = item['result']\n newsong = Song(new['full_title'], new['url'], new)\n allitems.append(newsong)\n embed = Embed()\n embed.description = concatenator(allitems)\n await ctx.channel.send('Here are some results of the songs that you wanted. Type in the # of which result you want the lyrics to, or \"no\" to back out!', embed=embed)\n while True:\n try:\n message = await self.bot.wait_for('message', check = check, timeout=30)\n message = message.content.strip()\n if message == 'no':\n break\n else:\n message = int(message)-1\n break\n except asyncio.TimeoutError:\n await ctx.send(\"You didn't reply in time! Enter the #.\")\n continue\n except:\n await ctx.send(f\"Try entering the # again, or enter 'no' to exit the search command.\")\n continue\n\n try:\n chosensong = allitems[message]\n site = requests.get(chosensong.url)\n site = bs4.BeautifulSoup(site.text, features='html.parser')\n chosensong.lyrics = site.find(\"div\", class_=\"lyrics\").get_text()\n \n #Discord supports only 2048 characters in each embed message so this is used to break it up into multiple messages\n messages_needed = math.ceil(len(chosensong.lyrics) / 2048)\n lyricsembed=Embed()\n counter = 1\n currentchar = 0\n nextchar = 2048\n while messages_needed >= counter:\n lyrics = chosensong.lyrics[currentchar:nextchar]\n lyricsembed.description = lyrics\n await ctx.send(f'Here are the lyrics for `{chosensong.title}`, `{counter}`/`{messages_needed}`!', embed=lyricsembed)\n currentchar += 2048\n nextchar += 2048\n counter += 1\n except:\n await ctx.send(f\"Stopping the genius command.\")\n else:\n await ctx.send(f\"Can't really search for lyrics if there are none provided, right? Try again with words, song titles, or artist names.\")", "def action_approve(self):\n if not self.date_approve:\n self.date_approve = fields.Datetime.now()\n\n config = self.env['ka_hr_payroll.config'].default_config()\n if check_rapel_status(self, config):\n self.action_rapel()\n else:\n self.action_done()", "async def _queue(self, msg):\n if msg.voice_client is not None:\n if msg.guild.id in self.player:\n if self.player[msg.guild.id]['queue']:\n emb = discord.Embed(\n colour=self.random_color, title='queue')\n emb.set_footer(\n text=f'Command used by {msg.author.name}', icon_url=msg.author.avatar_url)\n for i in self.player[msg.guild.id]['queue']:\n emb.add_field(\n name=f\"**{i['author'].author.name}**\", value=i['title'], inline=False)\n return await msg.send(embed=emb, delete_after=120)\n\n return await msg.send(\"No songs in queue\")", "async def can_you_add_this(ctx):\n question = '**Q:** Why don\\'t you have _<insert title here>_ in MyAniList? I have my 18+ toggle on but I still ' \\\n 'can\\'t find _<insert title here>_. Can\\'t you just add it? '\n answer = '**A:** We do not own the contents of what gets displayed in the app, AniList.co does.** MyAniList is a ' \\\n 'third-party app that is __not owned by them__. Please contact the team at AniList.co, or join their ' \\\n 'Discord (`https://discord.gg/uDaxJf7`) to request a series be added. '\n message = generate_message(ctx, question, answer)\n await ctx.send(message)", "def add_to_wish_list(self, user_id, caption):\n with self.connection:\n return self.cursor.execute(\"INSERT INTO 'wish_list' (`user_id`, `wish_list`) VALUES(?,?)\", (user_id,caption))", "def cheer(self, songs):\n if self.favourite_song in songs:\n return \"Whoo!\"", "def add_entry():\n username = util.remove_commas_from_string(request.form[\"name\"])\n link = util.remove_commas_from_string(request.form[\"ytLink\"])\n song = util.remove_commas_from_string(request.form[\"songName\"])\n\n festive = CHRISTMAS_MODE and \"christmasSong\" in request.form\n\n with database.connect_to_database() as db:\n user_id = database.get_userid(db, username)\n database.add_song(db, link, song, user_id, month=12 if festive else None)\n\n return redirect(url_for('main'))", "async def approval_message_edit(\n message: Message,\n approve_text: str,\n rejection: bool = False\n ) -> None:\n if message.embeds:\n embed = message.embeds[0]\n if rejection:\n embed.colour = Colour(settings.embed_color_severe)\n else:\n embed.colour = Colour(settings.embed_color_success)\n\n await message.edit(content=approve_text, embed=embed)", "async def optout(self, ctx):\n optout.insert_one({\"_id\": ctx.author.id})\n await ctx.send(f\"You have **opted out** of A Sound Mood. To join the program again, use ?optin.\")", "def vp():\n if g.active.is_empty:\n txt = F('advise search') if g.model.is_empty else F('advise add')\n g.message = F('pl empty') + \" \" + txt\n\n else:\n g.browse_mode = \"normal\"\n g.model.songs = g.active.songs\n g.message = F('current pl')\n\n g.content = generate_songlist_display(zeromsg=g.message)", "def approve_public_credit_name(self):\n self.public_credit_name_approved = True\n self.save()", "async def autorole(self, ctx: commands.Context):", "async def favor(self, ctx):\n east = ctx.guild.get_member(339119069066297355)\n if not east or east.status != discord.Status.online:\n await ctx.send(f\"I'm afraid I can't do that, {ctx.author.display_name}.\")\n return\n await ctx.send(\"&East, could I ask you for a favor? I need someone to verify my code.\")\n await asyncio.sleep(2)\n async with ctx.typing():\n await asyncio.sleep(1)\n await ctx.send(\"Oh my. Well, if you insist ;)\")", "def approve_me(message):\n users = hf.get_users()\n for user in users:\n if user[\"id\"] == message._get_user_id():\n if user[\"approval_level\"] == \"unapproved\": # Unknown\n message.reply(Strings['APPROVER_REQUEST'])\n admins = hf.get_admins()\n names = []\n for admin in admins:\n names.append(admin[\"name\"])\n\n approval_message = Strings[\n 'APPROVER_REQUEST_DETAIL'].format(\">, <@\".join(names), user[\"name\"])\n\n #message._client.send_message(config.AUTH_CHANNEL, approval_message)\n message._client.send_message(public_channel, approval_message)\n else:\n message.reply(\":x: Your approval level is already: \" + str(user[\"approval_level\"]))", "def add_song_to_playlist(self):\n #populate our songs dictionary\n self.get_liked_videos()\n\n #collect all of uri\n uris = []\n for song,info in self.all_song_info.items():\n uris.append(info[\"spotify_uri\"])\n\n #create a new playlist\n playlist_id = self.create_playlist()\n\n #add all songs into new playlist\n\n #Spotipy can only add 100 songs at a time to a playlist that is why this method is taken\n g = len(uris)\n if g > 100:\n s = 0\n e = 99\n while g > 100:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:e])\n g -= 100\n s = e + 1\n e += 100\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:])\n else:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris)", "def list(ctx):\n\tfor songName in songs:\n\t\tyield from bot.send_message(ctx.message.author,songName)", "async def skip(self, ctx):\r\n if ctx.message.channel.id != 701868237771505695:\r\n return await ctx.send(\"**Error:** Music Bot commands are only available in <#701868237771505695>\")\r\n if self.music_off:\r\n return await ctx.send(\"**Error:** Music Bot features are currently off\")\r\n if ctx.voice_client is None or ctx.voice_client is not self.voice:\r\n return await ctx.send(\"**Error:** You must be connected to the voice channel.\")\r\n\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n if is_mod or ctx.message.author.id == self.songs[0][1]:\r\n await ctx.send(\"Skipping song...\")\r\n return await self.voice.stop()\r\n\r\n if ctx.author.id not in self.songs[0][4]:\r\n self.skip_votes.append(ctx.author.id)\r\n if len(ctx.message.author.voice.channel.members) - 1 > len(self.songs[0][4]) * 2:\r\n await ctx.send(\"{0} skip votes registered, need {1} to skip song.\".format(len(self.songs[0][4]), int(\r\n (len(ctx.message.author.voice.channel.members) - 1) / 2)))\r\n else:\r\n await ctx.send(\"Skipping song\")\r\n await self.voice.stop()", "def change_approval(self, status):\r\n if status == 'approve':\r\n return self.approve()\r\n elif status == 'disapprove':\r\n return self.disapprove()", "def ToggleApprovalTracker(self, event):\n pass", "def approve(user):\n if user.approved:\n logging.warn('noop - User %d already approved', user.id)\n return user\n user.approved = True\n for message in user.messages:\n if message.text == config.MSG_WELCOME:\n session.delete(message)\n session.add(user)\n session.commit()\n return user", "def userapprove_admin(user_id):\n # take the supplied user_id and use that to access a given user.\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get individual user\n user = db.session.query(User).filter(User.id==user_id).first()\n # update status to approved\n user.user_status = 'approved'\n # commit to database\n db.session.commit()\n\n return redirect(url_for('admin_bp.usersview_admin'))", "async def 초대하기(self, ctx):\n embed = discord.Embed(title=\"저를 파티에 초대해주세요!\", description=f\"**{ctx.author.name}**, 아래의 링크를 사용하세요\\n[link](https://discord.com/oauth2/authorize?client_id=749629426777456691&permissions=8&scope=bot)\", color=0xeff0f1)\n await ctx.send(embed=embed)", "async def p(con,*,url):\r\n check = str(con.message.channel)\r\n if check == 'Direct Message with {}'.format(con.message.author.name):\r\n await bot.send_message(con.message.channel, \"**You must be in a `server voice channel ` to use this command**\")\r\n\r\n if check != 'Direct Message with {}'.format(con.message.author.name):\r\n if bot.is_voice_connected(con.message.server) == False:\r\n await bot.join_voice_channel(con.message.author.voice.voice_channel)\r\n\r\n if bot.is_voice_connected(con.message.server) == True:\r\n if player_status[con.message.server.id]==True:\r\n song_names[con.message.server.id].append(url)\r\n await bot.send_message(con.message.channel, \"**Şarkı sıraya alındı :white_check_mark:**\")\r\n\r\n\r\n \r\n if player_status[con.message.server.id]==False:\r\n player_status[con.message.server.id]=True\r\n song_names[con.message.server.id].append(url)\r\n song=await bot.voice_client_in(con.message.server).create_ytdl_player(song_names[con.message.server.id][0], ytdl_options=opts, after=lambda: bot.loop.create_task(after_song(con,False)))\r\n servers_songs[con.message.server.id]=song\r\n servers_songs[con.message.server.id].start()\r\n msg = await bot.send_message(con.message.channel, \"**Şuanda oynatılan > {}**\".format(servers_songs[con.message.server.id].title))\r\n now_playing[con.message.server.id]=msg\r\n song_names[con.message.server.id].pop(0)", "def update_access_token(self):\n self.token = util.prompt_for_user_token(self._username, scope,\n client_id=const.CLIENT_ID,\n client_secret=const.CLIENT_SECRET,\n redirect_uri=const.REDIRECT_URL)\n self._client = spotipy.Spotify(auth=self.token)", "def moderate(request, content_type, object_id, mode):\n user = request.user\n content_type_object = ContentType.objects.get(id = content_type)\n object = content_type_object.model_class().objects.get_all(id = object_id)\n status = ContentApprovalVote.objects.vote(object, user, mode)\n \n redirect_url = request.GET.get('queue_url', reverse('moderation-queue'))\n return http.HttpResponseRedirect(redirect_url)", "def add_songs(self, artist_list):\n\n \"Terms that identify songs that aren't really songs\"\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n if artist_list is None:\n return \"You must provide a list of artists for whom to find songs.\"\n else:\n for artist in artist_list:\n print(\"Finding songs for \" + artist)\n all_songs_by_artist = pygenius_songs.findAllSongs(artist)\n already_scraped = list()\n for song in all_songs_by_artist:\n url = song[0]\n title = song[1]\n print(title)\n c.execute(\"SELECT count(*) FROM songs WHERE title = (?) AND artist = (?)\", (title, artist))\n check_in_db = c.fetchall()\n if check_in_db[0][0] == 0:\n if title not in already_scraped:\n if not [i for i, x in enumerate(self.bad_terms) if x in title]:\n already_scraped.append(title)\n c.execute('INSERT INTO songs(title, artist, url) values (?,?,?)', (title, artist, url))\n conn.commit()", "def set_mute(self, track, xclip, ident, value = None):\n if track in self.song().tracks + self.song().return_tracks:\n if value in KEYWORDS:\n track.mute = KEYWORDS[value]\n else:\n track.mute = not(track.mute)", "def song(self, value):\r\n self._song_id = value\r\n data = Song(value)\r\n self.songtitel = data.songtitel if data.found else \"\"", "def song_added(song, playlist_id):\n if song.added_by == 'cedmunds90':\n print('Ruhpushuh {song_id} ({title}) ruhpush a shuh {playlist_id} rhup {added_by}.'\n .format(song_id=song.id,\n title=song.title,\n playlist_id=playlist_id,\n added_by=song.added_by))\n pass\n else:\n print('Song {song_id} ({title}) added to playlist {playlist_id} by {added_by}.'\n .format(song_id=song.id,\n title=song.title,\n playlist_id=playlist_id,\n added_by=song.added_by))\n\n pass", "def approve(self, feedback=None):\n self.hit.generate_connection()\n self.hit.connection.approve_assignment(self.mturk_id, feedback=feedback)\n self.update()", "async def proo (pros):\n if not pros.text[0].isalpha() and pros.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n index = random.randint(0, len(memes.PRO_STRINGS) - 1)\n reply_text = memes.PRO_STRINGS[index]\n await pros.edit(reply_text)", "def __showEditSpellingMenu(self):\n proj = e5App().getObject(\"Project\")\n projetOpen = proj.isOpen()\n pwl = e5App().getObject(\"Project\").getProjectDictionaries()[0]\n self.__editProjectPwlAct.setEnabled(projetOpen and bool(pwl))\n pel = e5App().getObject(\"Project\").getProjectDictionaries()[1]\n self.__editProjectPelAct.setEnabled(projetOpen and bool(pel))\n \n from QScintilla.SpellChecker import SpellChecker\n pwl = SpellChecker.getUserDictionaryPath()\n self.__editUserPwlAct.setEnabled(bool(pwl))\n pel = SpellChecker.getUserDictionaryPath(True)\n self.__editUserPelAct.setEnabled(bool(pel))", "def moglyv_stud(update, context):\n #update.callback_query.message.reply_text('У нас є багато цікавих можливостей для студентів. З чого почнемо? ')\n \n kb_moglyv_stud = [[InlineKeyboardButton(\"Проєктне навчання\",callback_data = \"proekt_nav\")],\n [InlineKeyboardButton(\"Дуальна освіта\",callback_data = \"du_osvita\")],\n [InlineKeyboardButton(\"Працевлаштування\",callback_data = \"pracevl\")],\n [InlineKeyboardButton(\"Практика\",callback_data = \"prakt\")]]\n \n\n \n reply = InlineKeyboardMarkup(kb_moglyv_stud)\n \n update.callback_query.message.reply_text('У нас є багато цікавих можливостей для студентів. З чого почнемо?', reply_markup = reply)", "def on_accept(self, update, _context):\n self.updater.bot.send_message(\n chat_id=update.effective_chat.id,\n text=\"Alege timpul\",\n reply_markup=InlineKeyboardMarkup(k.build_dynamic_keyboard_first_responses()),\n )", "async def activity(self, ctx, activity:str):\n if activity.lower() not in recommendations.keys():\n await ctx.send(f\"Sorry, I don't have a playlist for that! Maybe you could try `?activity {random.choice(list(recommendations.keys()))}`.\")\n else:\n\n playlists = ''\n \n for i in range(len(recommendations[activity.lower()])): \n playlists += f'\\n[Playlist {i+1}]({recommendations[activity.lower()][i]})'\n\n embed = discord.Embed(title=f\"A Sound Mood's Recommendations for {activity.lower()}\",\n description=playlists,\n color=random.randint(0, 0xFFFFFF))\n\n embed.set_footer(text=f\"Requested by @{ctx.message.author}\", icon_url=ctx.message.author.avatar_url)\n\n await ctx.send('', embed=embed)", "def toggle(self, *_):\r\n \r\n global ac\r\n if self.author_f_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n elif self.author_m_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n elif self.author_l_ent.var.get():\r\n self.add_a['state'] = 'normal'\r\n else:\r\n self.add_a['state'] = 'disabled'", "async def yt(self, ctx, *, url):\r\n if ctx.message.channel.id != 701868237771505695:\r\n return await ctx.send(\"**Error:** Music Bot commands are only available in <#701868237771505695>\")\r\n if self.music_off:\r\n return await ctx.send(\"**Error:** Music Bot features are currently off\")\r\n if ctx.voice_client is None:\r\n return await ctx.send(\"**Error:** The bot is not connected to the voice channel you are in\")\r\n if ctx.voice_client.channel is not ctx.message.author.voice.channel:\r\n return await ctx.send(\"**Error:** You must be connected to the voice channel.\")\r\n if len(self.songs) + self.processing_songs >= 30:\r\n return await ctx.send(\"**Error:** There can only be a maximum of 30 items in the queue\")\r\n if self.processing_songs >= 3:\r\n return await ctx.send(\"**Error:** Please wait until some of the other songs are finished processing\")\r\n\r\n self.played_time = time.time()\r\n self.voice = ctx.voice_client\r\n self.voice_channel = ctx.message.channel\r\n async with ctx.message.channel.typing():\r\n self.processing_songs = self.processing_songs + 1\r\n try:\r\n player = await YTDLSource.from_url(url, loop=self.bot.loop)\r\n except:\r\n self.processing_songs = self.processing_songs - 1\r\n return await ctx.send(\"Error processing song. Invalid URL or no matching videos using that search term\")\r\n if player is None:\r\n self.processing_songs = self.processing_songs - 1\r\n return await ctx.send(\"**Error:** Song file too large!\")\r\n self.processing_songs = self.processing_songs - 1\r\n self.songs.append([player, ctx.message.author.id, ctx.voice_client, ctx.message.channel, []])\r\n\r\n shortened_title = self.title_shorten(player.title)\r\n await ctx.send('Queued: `{}`'.format(shortened_title))", "async def ball(self, ctx, question):\r\n if ctx.message.author == self.bot.user:\r\n return\r\n answers = ['It is certain.', 'It is decidedly so.', 'Without a doubt.', 'Yes, definitely.', 'As I see it, yes.', 'Most likely.', 'Outlook good.', 'Yes.', 'Signs point to yes.',\r\n 'Reply hazy, try again.', 'Ask again later.', 'Better not tell you know.', 'Cannot predict now.', 'Concentrate and try again.',\r\n 'Don\\'t count on it.', 'My reply is no.', 'My sources say no.', 'Outlook not so good.', 'Very doubtful.', 'The chances are the same as you buying every pack, so not likely.']\r\n await self.bot.say('{}, {}'.format(ctx.message.author.mention, random.choice(answers).lower()))", "def song(song_id):\n return process_input(song_id) #jsonify(recomendations)", "async def search(self, ctx, *, query):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n query = \"ytsearch:{}\".format(query)\n if player.is_connected:\n if not ctx.author.voice or not ctx.author.voice.channel or player.connected_channel.id != ctx.author.voice.channel.id:\n return await ctx.send(\"You have to be in my voice channel to queue a song :no_entry:\")\n else:\n if not ctx.author.voice or not ctx.author.voice.channel:\n return await ctx.send(\"Join a voice channel :no_entry:\")\n else:\n player.store('sessionowner', ctx.author.id)\n player.store('channel', ctx.channel.id)\n await player.connect(ctx.author.voice.channel.id)\n results = await self.bot.lavalink.get_tracks(query)\n if not results or not results['tracks']:\n return await ctx.send(\"I could not find any songs matching that query :no_entry:\")\n msg = \"\"\n for i, x in enumerate(results[\"tracks\"][:10], start=1):\n msg += \"{}. **[{}]({})**\\n\".format(i, x[\"info\"][\"title\"], x[\"info\"][\"uri\"])\n message = await ctx.send(embed=discord.Embed(description=msg).set_footer(text=\"Choose a number to the queue the song | cancel\"))\n def check(m):\n return m.channel == ctx.channel and m.author == ctx.author and (m.content.isdigit() or m.content.lower() == \"cancel\")\n try:\n response = await self.bot.wait_for(\"message\", check=check, timeout=60)\n if response.content.lower() == \"cancel\":\n await response.delete()\n return await message.delete()\n else:\n track = results[\"tracks\"][int(response.content) + 1]\n player.add(requester=ctx.author.id, track=track)\n timetill = 0\n for x in player.queue:\n timetill += x.duration\n if player.current:\n timetill += player.current.duration - player.position\n else:\n timetill = 0 \n index = [x.track for x in player.queue].index(track[\"track\"]) + 1\n s=discord.Embed()\n s.set_author(name=\"Added to Queue\", icon_url=ctx.author.avatar_url)\n s.set_thumbnail(url=\"https://img.youtube.com/vi/{}/default.jpg\".format(track[\"info\"][\"identifier\"]))\n s.add_field(name=\"Song\", value=\"[{}]({})\".format(track[\"info\"][\"title\"], track[\"info\"][\"uri\"]), inline=False)\n s.add_field(name=\"Duration\", value=self.format_time(track[\"info\"][\"length\"]), inline=True)\n s.add_field(name=\"Position in Queue\", value=index)\n if timetill != 0:\n s.add_field(name=\"Estimated time till playing\", value=self.format_time(timetill-track[\"info\"][\"length\"]))\n else:\n s.add_field(name=\"Estimated time till playing\", value=\"Next\")\n await response.delete()\n await message.delete()\n await self.bot.get_channel(player.fetch('channel')).send(embed=s)\n if not player.is_playing:\n await player.play()\n except asyncio.TimeoutError:\n return await ctx.send(\"Timed out :stopwatch:\")", "def approve(pengusulan_id, status):\n user = Staff.is_login()\n if user is None:\n return redirect(url_for('auth.login'))\n\n if user.get_unit_role() == 'staff':\n flash(f\"Anda tidak memiliki akses untuk melakukan approval pengusulan\", flash_code.WARNING)\n return redirect(url_for('pengusulan.table'))\n\n pengusulan_approve = Pengusulan.approve(\n pengusulan_id=pengusulan_id,\n status=status,\n petugas_id=user.id\n )\n if pengusulan_approve:\n flash(f\"Status Pengusulan Buku telah berhasil diperbarui\", flash_code.SUCCESS)\n else:\n flash(f\"Status Pengusulan Buku gagal diperbarui\", flash_code.DANGER)\n return redirect(url_for('pengusulan.manage'))", "async def admin(ctx):\n info = await(bot.application_info())\n mention = info.owner.mention\n message = \"My administrator is the glorious {}. Fear them, for they are mighty.\".format(mention)\n await(ctx.send(message))", "def test_approve(self):\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.approve(TOOLNAME,TOOLLICENSEDATA)", "async def song_info(self, msg):\n if msg.voice_client is not None and msg.voice_client.is_playing() is True:\n emb = discord.Embed(colour=self.random_color, title='Currently Playing',\n description=self.player[msg.guild.id]['player'].title)\n emb.set_footer(\n text=f\"{self.player[msg.guild.id]['author'].author.name}\", icon_url=msg.author.avatar_url)\n emb.set_thumbnail(\n url=self.player[msg.guild.id]['player'].thumbnail)\n return await msg.send(embed=emb, delete_after=120)\n\n return await msg.send(f\"**No songs currently playing**\".title(), delete_after=30)", "def press_entry(self, instance):\n name = instance.text\n\n self.status_text2 = \"You have not learned {}\".format((self.song_list.get_song(name))) # This would update the bottom label if the user press on the temp_button\n instance.state = 'normal'\n #Note that I failed to update the bottom label text.", "def can_approve(self, user, **data):\n raise Return(False)", "def quotazione(update, context):\n update.message.reply_text('Quando un giocatore verrà chiamato, sarà considerata come base d’asta la sua quotazione attuale (QA al momento dell’asta) della lista di Fantacalcio. È necessario rimanere sempre con i crediti sufficienti per completare la propria rosa. ')", "def accepting(self):\n quote = self.get_quotes('labor')[0]\n quantity = max(0, quote.price - 14)\n self.create('labor', quantity)\n self.accept_quote_partial(quote, quantity)", "def new_song():\n song_id = int(request.args['song_id'])\n track_info = shiva.get_tracks([song_id])[song_id]\n vlc.add_song(track_info['path'])\n return 'ok'", "def confirmed(self):", "def validate_song_is_added_to_playlist(self):\n if self.track == 'Enjoy Enjaami':\n option = element['EnjoyEnjaami']\n elif self.track == 'Inna Mylu':\n option = element['InnaMylu']\n return com_util.find_text(self.driver, option)", "def add_song(self):\r\n path = input(\"Give file path:\\t\") # Request file path\r\n path = path.replace('\\\\', '/')\r\n if self.path_song_re.match(path) and not self.path_storage_re.match(\r\n path): # Check that the path leads to a song that is not already found in Storage\r\n copy(path, self.p_storage) # Copy the song to the storage directory\r\n file_title, form = path.split(\"/\")[-1].split(\".\") # Save file title and format from the path\r\n sql = \"SELECT COUNT(*) FROM songs WHERE file_title = %s AND form = %s\" # Check the existence of a song\r\n # with the same title and format in the database\r\n self.cursor.execute(sql, (file_title, form))\r\n r = self.cursor.fetchall()\r\n if r[0][0] != 0:\r\n return \"A song with this file name and format already exists!\"\r\n song_title = input(\"Song title:\\t\")\r\n artist = input(\"Artist:\\t\")\r\n data = input(\"Release date:\\t\")\r\n tags = input(\"Associated tags:\\t\")\r\n sql = \"INSERT INTO songs (file_title, song_title, artist, form, data, tag) VALUES (%s, %s, %s, %s, %s, \" \\\r\n \"%s) \" # Insert song into database\r\n columns = (file_title, song_title, artist, form, data, tags)\r\n self.cursor.execute(sql, columns)\r\n self.cnx.commit()\r\n self.cursor.execute(\r\n \"SELECT MAX(ID) FROM songs\")\r\n result = self.cursor.fetchall()\r\n return \"New song ID: \" + str(result[0][0])\r\n else:\r\n return \"Give valid path\"", "async def proo(pros):\n if not pros.text[0].isalpha() and pros.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n index = random.randint(0, len(PRO_STRINGS) - 1)\n reply_text = PRO_STRINGS[index]\n await pros.edit(reply_text)", "async def mathlesson(self, ctx):\r\n await ctx.send('https://www.youtube.com/watch?v=WFoC3TR5rzI')", "def help_command(update,context):\r\n update.message.reply_text('I am a Voice bot')", "def edit_user(self):\n from editWindow import EditPlayer\n self.edit = EditPlayer(self.lang, self.result_table.currentItem().text())\n self.edit.show()", "def on_access_approved(self, handler):\n print \"User with {0} has been GRANTED access.\".format(\n handler.client_address[0]\n )", "def show_auto_quotes(self, message):\n user = self.ts.get_user(message)\n web_view_link = self.spreadsheets['auto_quotes'][1]\n short_url = self.shortener.short(web_view_link)\n self._add_to_whisper_queue(user, 'View the auto quotes at: {}'.format(short_url))", "async def saytext(self,ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.saytext', extra={'invoker': ctx.message.author.name})\r\n await ctx.send(wordsDict.generate())" ]
[ "0.68488747", "0.56438977", "0.55508363", "0.55483466", "0.5504152", "0.5491618", "0.54619044", "0.54378915", "0.54029", "0.5392849", "0.53796095", "0.5319239", "0.52956706", "0.5290236", "0.5269116", "0.52477455", "0.5243103", "0.5234158", "0.52026176", "0.5178342", "0.5166911", "0.5128911", "0.5116017", "0.5100287", "0.50971097", "0.50952", "0.50817424", "0.5062706", "0.50561315", "0.5046288", "0.50348294", "0.5029806", "0.502894", "0.49951383", "0.49933785", "0.49921593", "0.4988813", "0.49823457", "0.4980772", "0.49751583", "0.49701434", "0.49629366", "0.49568418", "0.49528757", "0.4949094", "0.49363178", "0.49331695", "0.4928238", "0.49276048", "0.49232784", "0.4914275", "0.49120829", "0.49089733", "0.49027133", "0.48928303", "0.48917842", "0.488294", "0.48824668", "0.4879985", "0.48778915", "0.487734", "0.48729622", "0.48716184", "0.48570976", "0.48548844", "0.48531222", "0.48468873", "0.4845784", "0.48450363", "0.48435223", "0.4841135", "0.48389298", "0.48340586", "0.48204604", "0.48203886", "0.48144913", "0.48128387", "0.48033026", "0.4801685", "0.4799729", "0.47905838", "0.47891405", "0.47878373", "0.47813812", "0.47726226", "0.4772396", "0.47684684", "0.47671908", "0.47659448", "0.4762298", "0.47616568", "0.47548804", "0.47468325", "0.47419682", "0.47411445", "0.47381955", "0.4733266", "0.47321984", "0.47292113", "0.47276124" ]
0.8136522
0
This method is called to check if a song already has lyrics so as to avoid duplicity of lyrics
def song_has_lyrics(): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_existing_lyrics(self, song_id):\n\t\tlyrics = self.db.lyrics.find_one({'song_id': song_id})['lyrics']\n\t\treturn lyrics", "def add_lyrics(self):\n\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n c.execute(\"SELECT songs.id, artist, title, url FROM songs LEFT JOIN lyrics ON songs.id = lyrics.song_id WHERE lyrics.song_id IS NULL\")\n all_songs_to_scrape = c.fetchall()\n for song in all_songs_to_scrape:\n song_id = song[0]\n song_artist = song[1]\n song_title = song[2]\n song_url = song[3]\n print(\"Looking for lyrics for \" + song_title + \" by \" + song_artist)\n try:\n lyrics = pygenius_songs.searchURL(song_url, 'lyrics')\n for lyric in lyrics:\n for line in lyric.split('\\n'):\n c.execute('INSERT INTO lyrics(song_id, line) VALUES (?,?)', (song_id, line))\n conn.commit()\n except Exception as e:\n print(e)\n print song_url\n print(\"Exception caught! ... continuing.\")\n pass", "def get_existing_lyrics_of_artist(self, artist_name=None, artist_id=None):\n\t\tif artist_name:\n\t\t\tsongs = self.db.artists.find_one({'name': str(artist_name).lower()})\n\t\t\tlyrics = []\n\t\t\tfor song in songs:\n\t\t\t\tlyrics.append((song, self.get_existing_lyrics(song)))\n\t\t\treturn lyrics\n\t\tif artist_id:\n\t\t\tsongs = self.db.artists.find_one({'id': artist_id})['songs']\n\t\t\tprint(len(songs))\n\t\t\tlyrics = []\n\t\t\tfor song in songs:\n\t\t\t\ttry:\n\t\t\t\t\tlyrics.append((song, self.get_existing_lyrics(song)))\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\t\treturn lyrics", "def lyrics_note_is_same_as_original():\n pass", "def fetch_lyrics(self) -> None:\n if self.artist is None or self.title is None:\n return\n Logger.Logger.log('Looking for song lyrics...')\n finder = LyricsFinder.LyricsFinder(self)\n finder.fetch()\n self.lyrics = finder.get_lyrics()\n self.lyrics_writer = finder.get_lyrics_writer()\n if not self.lyrics:\n Logger.Logger.log('No lyrics found for this song.')", "def artist_song_first_pass(self):\n log.debug(\"Called artist_song_first_pass for %s.\" % self.name)\n self.success = False\n song_potentials = []\n potential_count = 0\n _min = 20\n\n def generate_potentials(count):\n results = self.sp.search(q= 'artist: ' + self.artist + ' track: ' + self.song, type='track', limit=2)\n if results['tracks']['total'] >= 1:\n for items in results['tracks']['items']:\n song_potentials.append([items['name'], items['uri']])\n for artist in items['artists']:\n song_potentials[count].append(artist['name'])\n song_potentials[count].append(artist['uri'])\n count += 1\n\n for splitter in splitters:\n if self.name_clean.count(splitter) == 1:\n self.artist, self.song = self.name_clean.split(splitter)\n generate_potentials(potential_count)\n elif self.name_clean.count(splitter) > 1:\n for x in range(0, self.name_clean.count(splitter)):\n self.artist, self.song = split(self.name_clean, splitter, x)\n generate_potentials(potential_count)\n\n cutoff = matching(self.name_clean)\n log.debug(\"%s potential matches found for %d\" % (len(song_potentials), id(self)))\n log.debug(\"Potentials: %s\" % song_potentials)\n for potential in song_potentials:\n log.debug(potential)\n log.debug(self.name_clean)\n log.debug(str(potential[2]) + \" \" + str(potential[0]))\n lev = levenshtein(self.name_clean, str.lower(str(potential[2])) + \" \" + str.lower(str(potential[0])))\n log.debug(lev)\n if lev < _min:\n _min = lev\n self.artist = potential[2]\n self.artist_uri = potential[3]\n self.song = potential[0]\n self.song_uri = potential[1]\n\n if self.artist_uri and self.song_uri is not None:\n log.debug(\"Cutoff point for %s : %d\" % (id(self), cutoff))\n log.debug(\"Current Min: {}\".format(_min))\n log.debug(\"Levenshtein distance between {} and {} : {}\"\n .format(self.name_clean, self.artist + self.song,\n levenshtein(self.name, self.artist + \" \" + self.song)))\n if int(_min) > cutoff:\n log.debug(\"Method artist_song_first_pass failed for %s.\" % self.name)\n self.success = False\n self.artist = None\n self.song = None\n else:\n log.debug(\"Method artist_song_first_pass succeeded for %s.\" % self.name)\n self.success = True\n else:\n log.debug(\"Method artist_song_first_pass failed for %s.\" % self.name)\n self.success = False\n self.artist = None\n self.song = None", "def add_songs(self, artist_list):\n\n \"Terms that identify songs that aren't really songs\"\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n if artist_list is None:\n return \"You must provide a list of artists for whom to find songs.\"\n else:\n for artist in artist_list:\n print(\"Finding songs for \" + artist)\n all_songs_by_artist = pygenius_songs.findAllSongs(artist)\n already_scraped = list()\n for song in all_songs_by_artist:\n url = song[0]\n title = song[1]\n print(title)\n c.execute(\"SELECT count(*) FROM songs WHERE title = (?) AND artist = (?)\", (title, artist))\n check_in_db = c.fetchall()\n if check_in_db[0][0] == 0:\n if title not in already_scraped:\n if not [i for i, x in enumerate(self.bad_terms) if x in title]:\n already_scraped.append(title)\n c.execute('INSERT INTO songs(title, artist, url) values (?,?,?)', (title, artist, url))\n conn.commit()", "def add_lyrics_and_song_data_to_database(artist, song):\n if exists('song_database.txt'):\n f = open('song_database.txt', 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, song)\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print \"Song '\" + song + \"' already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open('song_database.txt', 'w')\n song_list = [Song_data(artist, song)]\n f.seek(0,0)\n pickle.dump(song_list, f)", "def is_lyrics_approved():", "def song_already_exists(song, playlist_id):\n print('Song {title} already in playlist {playlist_id}, adding has been skipped.'\n .format(title=song.title,\n playlist_id=playlist_id))\n pass", "def __add_lyric(self, song, genius_api):\n\t\tentry = {\n\t\t\t'song_id' : int(song['id']),\n\t\t\t'song_title' : song['title'],\n\t\t\t'url' : song['url']\n\t\t\t}\n\t\ttry:\n\t\t\tentry['lyrics'] = genius_api.get_lyrics(song['id'], song['url'])\n\t\texcept:\n\t\t\tentry['lyrics'] = ''\t\n\t\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\ttry:\n\t\t\tself.db.lyrics.insert_one(entry)\n\t\texcept errors.DuplicateKeyError:\n\t\t\tpass", "async def _lyrics(self, ctx: commands.Context):\n if not ctx.voice_state.is_playing:\n raise commands.CommandError('Nothing being played at the moment.')\n\n # Get song name listed on youtube\n song_title = ctx.voice_state.current.source.track\n if not song_title:\n return await ctx.send(\"Couldn't find lyrics for this track!\")\n\n song_title = re.sub(\"[(\\[].*?[)\\]]\", \"\", song_title).strip() # Remove parenthesis from song title\n # Get artist name listed on youtube\n artist_name = ctx.voice_state.current.source.artist\n # Instance of GeniusSong class using the Genius API\n genius_song = GeniusSong(song_title, artist_name)\n # Try getting the lyrics using the lyricsgenius library\n lyrics = genius_song.fastlyrics()\n\n # In case of no lyrics found. Use the other (slower) method\n if not lyrics:\n res = genius_song.get_response() # Generate a response using the Genius API to get the songs\n if res:\n # Find the most similar artist comparing the artist on YouTube and Genius\n artist_name = genius_song.return_similar_artist(res)\n # Artist didn't match\n if not artist_name:\n await ctx.send(\"Couldn't find similar artists. The lyrics might not be the expected.\")\n\n # Get the lyrics using the lyricsgenius library with the new artist\n lyrics = genius_song.fastlyrics(artist_name)\n\n else:\n return await ctx.send(\n \"**Error!**\\nThere is a problem with Genius.\\nTry again in a few minutes. \"\n \"\\nYou can also try the command `fastlyrics`.\")\n\n if lyrics:\n # Split lyrics into fields\n fields = genius_song.split_lyrics(lyrics)\n # Create an embed message\n embed = embed_msg(\n title=song_title.capitalize() + \"\\n{}\".format(artist_name),\n description=\"\",\n footer=\"Lyrics provided by Genius.\",\n field_values=fields,\n inline=False\n )\n return await ctx.send(embed=embed)\n\n return await ctx.send(\"Lyrics couldn't be found.\")", "def get_lyrics(self, artist, song):\n\n # Disable lyrics display\n self.status_bar.hide()\n self.lyrics_view.hide()\n self.scroll.hide()\n\n lyrics = None\n in_database = False\n\n if self.database.status: # Testing connection to database\n lyrics = self.database.retrieve_lyrics(artist, song)\n if lyrics: # False if not found in database\n in_database = True\n\n if not lyrics: # Try next to retrieve from web\n url = self.make_url(artist, song)\n try:\n lyrics = self.fetch_lyrics(url)\n except:\n self.display_message('Internet Connection Problem') # Could not connect to internet\n return\n\n if not lyrics: # Not available in database or on web\n self.display_message('Lyrics Not Available')\n else:\n # Set the display\n lyrics_buffer = self.lyrics_view.get_buffer()\n lyrics_buffer.set_text(lyrics)\n\n if not in_database: # Save if not in database\n self.database.save(artist, song, lyrics)\n\n # Re-enable lyrics display\n self.scroll.show()\n self.lyrics_view.show()\n self.display_message('Lyrics Extracted Successfully')", "def validate_song_is_added_to_playlist(self):\n if self.track == 'Enjoy Enjaami':\n option = element['EnjoyEnjaami']\n elif self.track == 'Inna Mylu':\n option = element['InnaMylu']\n return com_util.find_text(self.driver, option)", "def test_single_track_artist_too_long(self):\n self.add_mp3(set_artist=True, artist='z'*(App.max_artist_album_length+10))\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('is longer than', status)\n self.assertEqual(self.get_album_count(), 0)", "def record_lyrics_result(self, track_id, songdata):\n self.lyrics.insert_one(\n {\n \"_id\": track_id,\n \"response_artist\": songdata.artist,\n \"response_title\": songdata.title,\n \"lyrics\": songdata.lyrics,\n }\n )", "async def async_is_playing_new_track(self):\n if self._playing_mediabrowser and self._media_source_uri is not None:\n # don't trigger new track flag for local mediabrowser files\n return False\n \n if self._icecast_name != None:\n import unicodedata\n artmed = unicodedata.normalize('NFKD', str(self._media_artist) + str(self._media_title)).lower()\n artmedd = u\"\".join([c for c in artmed if not unicodedata.combining(c)])\n if artmedd.find(self._icecast_name.lower()) != -1 or artmedd.find(self._source.lower()) != -1:\n # don't trigger new track flag for icecast streams where track name contains station name or source name; save some energy by not quering last.fm with this\n self._media_image_url = None\n return False\n\n if self._media_artist != self._media_prev_artist or self._media_title != self._media_prev_title:\n return True\n else:\n return False", "def song_lyrics(ans):\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n return words", "def remove_library_dups(self, client):\n\n print \"\\n\\nGetting all Google Play Music library contents for '%s'...\" % (self.user)\n all_songs = client.get_all_songs()\n print \"\\n\"\n\n new_songs = {}\n old_songs = {}\n print \"Checking for duplicates...\"\n for song in all_songs:\n song_id = song.get('id')\n timestamp = song.get('recentTimestamp')\n\n key = \"%s: %d-%02d %s\" % (song.get('album'), song.get('discNumber'), song.get('trackNumber'), song.get('title'))\n\n # Identify duplicates within this library...\n if key in new_songs:\n if new_songs[key]['timestamp'] < timestamp:\n old_songs[key] = new_songs[key]\n new_songs[key] = {'id': song_id, 'timestamp': timestamp}\n else:\n old_songs[key] = {'id': song_id, 'timestamp': timestamp}\n\n new_songs[key] = {'id': song_id, 'timestamp': timestamp}\n\n if len(old_songs):\n print \"Found duplicate songs\"\n\n old_song_ids = []\n for key in sorted(old_songs.keys()):\n old_song_ids.append(old_songs[key]['id'])\n print \" ==> %s <==\" % (key.encode('utf-8'))\n\n print \"Deleting duplicate songs...\"\n client.delete_songs(old_song_ids)\n else:\n print \"No duplicate songs\"\n\n print \"Processed all %d songs\" % (len(all_songs))", "def test_next_song_existing(bot, monkeypatch):\n tracks = [fake_res['title'], 'war squids']\n song_next = Song(fake_res['artist'], 'war squids', fake_res['album'])\n bot.log_result('chat_id', fake_log)\n monkeypatch.setattr(bot, 'get_album_tracks', lambda x: tracks)\n monkeypatch.setattr(bot, 'get_lyrics', lambda s, c: f'Searching for {s}')\n\n assert bot._get_next_song('chat_id') == f'Searching for {song_next}'", "def test_single_track_no_artist(self):\n self.add_mp3(set_artist=True)\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no artist tag', status)\n self.assertEqual(self.get_album_count(), 0)", "def lyrics(self):\n return get_lyrics(self.artist, self.title,'')", "def scrape_new(self):\n\t\tno_urls = 0\n\t\tfalse_urls = 0\n\t\tfor i, song in enumerate(self._songs):\n\t\t\theaders = { \n\t\t\t\t'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36' \n\t\t\t\t}\n\t\t\t# metro structure\n\t\t\tmetro = \"http://www.metrolyrics.com/\" + '-'.join(song.title.split()) + \"-lyrics-\" + '-'.join(song.artist.split()) + \".html\"\n\t\t\trequest = requests.head(metro) # check if site exists\n\t\t\tif request.status_code == 301: # 301 == moved permanantely (new url exists)\n\t\t\t\tr = urllib.request.Request(metro, data=None, headers=headers)\n\t\t\t\tpage = urllib.request.urlopen(r)\n\t\t\t\tsoup = BeautifulSoup(page, \"lxml\")\n\t\t\t\tlyric_body = soup.find(\"div\", {\"id\": \"lyrics-body-text\"})\n\t\t\t\tverses = lyric_body.find_all(\"p\", class_='verse')\n\t\t\t\tif verses:\n\t\t\t\t\tdir = self._path + song.song_file\n\t\t\t\t\tself.write_to_file_obj(dir, verses)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Wrong Format?\")\n\t\t\t\t\tfalse_urls += 1\n\t\t\telse:\n\t\t\t\tprint(\"BAD URL\")\n\t\t\t\tno_urls += 1\n\t\t\tif i != len(self._songs)-1: \n\t\t\t\ttime.sleep(7) # set timeout to not overburdden the server\n\t\tprint(\"URLs Failed: \", no_urls)", "def song_check(song):\n msg = choose_song(song)\n return msg != ERROR", "def test_single_track_blank_artist(self):\n self.add_mp3(set_artist=True, artist='')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no artist tag', status)\n self.assertEqual(self.get_album_count(), 0)", "def test_single_track_album_too_long(self):\n self.add_mp3(set_album=True, album='z'*(App.max_artist_album_length+10))\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('is longer than', status)\n self.assertEqual(self.get_album_count(), 0)", "def add_songs(self, song, position=None):\n song_found = find_object(song, self.tracks)\n if song_found is None:\n song_found = Song(song, self.artist)\n if position is None:\n self.tracks.append(song_found)\n else:\n self.tracks.insert(position, song_found)", "async def lyrics(\n self, ctx: commands.Context, query: str = None\n ) -> Optional[Tuple[str, str, str]]:\n\n query = await self.now_playing(ctx) if query is None else query\n if not query:\n return\n\n url = f\"https://some-random-api.ml/lyrics?title={query}\"\n\n async with aiohttp.ClientSession() as session:\n request = await session.get(url)\n request_json = await request.json(content_type=None)\n\n authors = request_json.get(\"author\")\n title = request_json.get(\"title\")\n lyrics = request_json.get(\"lyrics\")\n\n return (title, authors, lyrics) if lyrics else None", "def test_get_lyrics_notfound(monkeypatch, bot):\n\n def assert_not_found(msg):\n msg = get_lyrics(song, 1)\n msg = msg.lower()\n assert song.artist in msg\n assert song.title in msg\n assert 'could not be found' in msg\n\n song = Song('nothing more', 'christ copyright')\n result = Nothing()\n result.source = 'hello'\n monkeypatch.setattr(bot, 'get_lyrics_threaded', lambda a, b: result)\n\n msg = bot.get_lyrics(song, 1)\n assert_not_found(msg)\n\n result.source = None\n song.lyrics = 'hello'\n msg = bot.get_lyrics(song, 1)\n assert_not_found(msg)", "def test_two_tracks_mismatched_album(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3', set_album=True, album='Album 2')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('changed to', status)\n self.assertEqual(self.get_album_count(), 0)", "def testOneWord(self):\n\n\t\t\t\tspinner.Synonym.objects.add('directory', 'catalog', 10)\n\t\t\t\tspinner.Synonym.objects.add('list', 'directory', 20)\n\t\t\t\tspinner.Synonym.objects.add('directory', 'guide', 10)\n\n\t\t\t\tsynonyms = spinner.Synonym.objects.get_synonyms(['directory'])\n\t\t\t\tassert len(synonyms) < 3, synonyms", "def add_song(self):\r\n path = input(\"Give file path:\\t\") # Request file path\r\n path = path.replace('\\\\', '/')\r\n if self.path_song_re.match(path) and not self.path_storage_re.match(\r\n path): # Check that the path leads to a song that is not already found in Storage\r\n copy(path, self.p_storage) # Copy the song to the storage directory\r\n file_title, form = path.split(\"/\")[-1].split(\".\") # Save file title and format from the path\r\n sql = \"SELECT COUNT(*) FROM songs WHERE file_title = %s AND form = %s\" # Check the existence of a song\r\n # with the same title and format in the database\r\n self.cursor.execute(sql, (file_title, form))\r\n r = self.cursor.fetchall()\r\n if r[0][0] != 0:\r\n return \"A song with this file name and format already exists!\"\r\n song_title = input(\"Song title:\\t\")\r\n artist = input(\"Artist:\\t\")\r\n data = input(\"Release date:\\t\")\r\n tags = input(\"Associated tags:\\t\")\r\n sql = \"INSERT INTO songs (file_title, song_title, artist, form, data, tag) VALUES (%s, %s, %s, %s, %s, \" \\\r\n \"%s) \" # Insert song into database\r\n columns = (file_title, song_title, artist, form, data, tags)\r\n self.cursor.execute(sql, columns)\r\n self.cnx.commit()\r\n self.cursor.execute(\r\n \"SELECT MAX(ID) FROM songs\")\r\n result = self.cursor.fetchall()\r\n return \"New song ID: \" + str(result[0][0])\r\n else:\r\n return \"Give valid path\"", "def add_songs(self, name, year, title):\n\n album_found = find_object(name, self.album)\n if album_found is None:\n print(\"Not Found \" + name)\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n else:\n print(\"Found album \"+name)\n\n album_found.add_songs(title)", "def add_song(self, song, position=None):\n\n # Use find_object to see if the song exist already.\n song_found = find_object(song, self.tracks) # look for song.tracks to see if it exist in the list\n if song_found is None: # if song is not found\n song_found = Song(song, self.artist) # We create new song using \"Song\" function and assign it to song_found\n if position is None: # If there are no songs in this track\n self.tracks.append(song_found) # Add this song_found in the first position\n else: # else if there are already some songs in the track\n self.tracks.insert(position, song_found) # inserts the position and song in self.tracks list", "def cheer(self, songs):\n if self.favourite_song in songs:\n return \"Whoo!\"", "def add_lyrics(self, songs, genius_api, nthreads=0):\n\t\tif isinstance(songs, list):\n\t\t\tprint(f'{len(songs)} songs to get their lyrics')\n\t\t\tif nthreads <2:\n\t\t\t\tfor song_id in songs:\n\t\t\t\t\tsong = genius_api.search(song_id, 'song')\n\t\t\t\t\tself.__add_lyric(song, genius_api)\n\t\t\telif nthreads >1:\n\t\t\t\tassert len(songs) > 0\n\t\t\t\tthreads=[]\n\t\t\t\tscrapping_batch_size = len(songs) // nthreads\n\t\t\t\tprint(f'thread list size = {scrapping_batch_size}')\n\t\t\t\tfor i in range(nthreads):\n\t\t\t\t\tthreads.append(Thread(target=self.add_lyrics, \n\t\t\t\t\t\targs=(songs[scrapping_batch_size * i : scrapping_batch_size * (i + 1)], genius_api,)))\n\t\t\t\t\tif i == threads - 1:\n\t\t\t\t\t\tthreads[i] = Thread(self.add_lyrics, (songs[scrapping_batch_size * i:], genius_api,))\n\t\t\t\t\tthreads[i].start()\n\t\t\t\t\tprint('thread {} activated'.format(i+1))\n\t\telse:\n\t\t\tsong = genius_api.search(songs, 'song')\n\t\t\tself.__add_lyric(song, genius_api)\n\t\t\tprint(' lyrics of {} added with success'.format(song['title']))", "def is_same_song(a, b):\n\n for k in 'album', 'title', 'artists':\n if a[k] != b[k]:\n return False\n\n return True", "def testSynonymDuplicate(self):\n\t\t\t\tone = spinner.Word.objects.get_single('mac', True)\n\t\t\t\ttwo = spinner.Word.objects.get_single('macintosh', True)\n\t\n\t\t\t\tsyn = spinner.Synonym.objects.get_single(one, two, True)\n\t\t\t\t\n\t\t\t\tsyn2 = spinner.Synonym.objects.get_single(two, one, True)\n\n\t\t\t\tassert syn == syn2\n\n\t\t\t\tsyn.delete()\n\t\t\t\tone.delete()\n\t\t\t\ttwo.delete()", "def get_single_lyrics(self, url):\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n lyrics = ''\n \n all_divs = soup.findAll('div')\n filtered_divs = [x for x in all_divs if x.has_attr('class') and ('Lyrics__Container-sc' in x['class'][0] or x['class'] == ['lyrics'])]\n filtered_divs_classes = [x['class'] for x in filtered_divs if x.has_attr('class')]\n \n if len(filtered_divs) == 0:\n lyrics = ''\n elif len(filtered_divs) == 1 and filtered_divs_classes[0][0] == 'lyrics':\n lyrics = filtered_divs[0].text\n else:\n for part in filtered_divs:\n for e in part.descendants:\n if isinstance(e, str):\n lyrics += e.strip()\n elif e.name == 'br' or e.name == 'p':\n lyrics += '\\n'\n \n return lyrics", "def add_song(self, song, position=None):\n\n song_found = find_object(song, self.tracks)\n if song_found is None:\n song_found = Song(song, self.artist)\n if position is None:\n self.tracks.append(song_found)\n else:\n self.tracks.insert(position, song_found)", "def add_song_to_playlist(self):\n #populate our songs dictionary\n self.get_liked_videos()\n\n #collect all of uri\n uris = []\n for song,info in self.all_song_info.items():\n uris.append(info[\"spotify_uri\"])\n\n #create a new playlist\n playlist_id = self.create_playlist()\n\n #add all songs into new playlist\n\n #Spotipy can only add 100 songs at a time to a playlist that is why this method is taken\n g = len(uris)\n if g > 100:\n s = 0\n e = 99\n while g > 100:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:e])\n g -= 100\n s = e + 1\n e += 100\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:])\n else:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris)", "def test_too_many_songs(self):\n artist = Artist.objects.create(name='Artist', normname='artist')\n album = Album.objects.create(artist=artist, name='Album', normname='album')\n for num in range(501):\n Song.objects.create(filename='file%03d.mp3' % (num+1),\n artist=artist,\n album=album,\n title='Title %03d' % (num+1),\n year=0,\n tracknum=0,\n normtitle='title %03d' % (num+1),\n raw_artist='artist',\n filetype=Song.MP3,\n bitrate=128000,\n mode=Song.CBR,\n size=123000,\n length=90,\n sha256sum='0cf31fc7d968ec16c69758f9b0ebb2355471d5694a151b40e5e4f8641b061092',\n )\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(album)])\n self.assertEqual(response.context['have_songs'], False)\n self.assertNotIn('songs', response.context)\n self.assertNotContains(response, 'Songs by %s' % (artist))\n self.assertContains(response, reverse('exordium:artist', args=(artist.normname,)))\n self.assertContains(response, reverse('exordium:album', args=(album.pk,)))\n self.assertContains(response, '1 album')\n self.assertNotContains(response, '1 song')", "def check_music(self):\n\t\tif self.menu_music_played == 0:\n\t\t\tif self.initial_menu_music_element == self.next_menu_music_element:\n\t\t\t\tself.ingame_music.extend(self.menu_music)\n\t\t\t\tself.music = self.ingame_music\n\t\t\t\tself.music_rand_element = random.randint(0, len(self.ingame_music) - 1)\n\t\t\t\tself.menu_music_played = 1\n\t\t\telse:\n\t\t\t\tself.music = self.menu_music\n\n\t\tif hasattr(self, '_bgsound_old_byte_pos') and hasattr(self, '_bgsound_old_sample_pos'):\n\t\t\tif self._bgsound_old_byte_pos == self.emitter['bgsound'].getCursor(fife.SD_BYTE_POS) and self._bgsound_old_sample_pos == self.emitter['bgsound'].getCursor(fife.SD_SAMPLE_POS):\n\t\t\t\tself.music_rand_element = self.music_rand_element + 1 if \\\n\t\t\t\t\t self.music_rand_element + 1 < len(self.music) else 0\n\t\t\t\tself.play_sound('bgsound', self.music[self.music_rand_element])\n\t\t\t\tif self.menu_music_played == 0:\n\t\t\t\t\tself.next_menu_music_element = self.music_rand_element\n\n\t\tself._bgsound_old_byte_pos, self._bgsound_old_sample_pos = \\\n\t\t\t self.emitter['bgsound'].getCursor(fife.SD_BYTE_POS), \\\n\t\t\t self.emitter['bgsound'].getCursor(fife.SD_SAMPLE_POS)", "def checkHash(song):\n\tsql = \"Select path, filename, hash from songs where hash = '\" + song.hash + \"';\"\n\tc, conn = connect()\n\tc.execute(sql)\n\tnotexists = True\n\tfor (path, filename, hash) in c:\n\t\tif hash == song.hash:\n\t\t\tnotexists = False\n\t\telse:\n\t\t\tnotexists = True\n\treturn notexists", "def test_metrolyrics(self):\n bad_res = lw.get_lyrics('metrolyrics', 'eminem', 'los yourself')\n good_res = lw.get_lyrics('metrolyrics', 'eminem', 'lose yourself')\n self.assertEqual(bad_res, 404)\n self.assertTrue(good_res)", "def enough_lords(self):\n real_numbers = {\n Title.count: len(self.get_lords_of_title(Title.count)),\n Title.baron: len(self.get_lords_of_title(Title.baron)),\n Title.baronet: len(self.get_lords_of_title(Title.baronet)),\n Title.chevalier: len(self.get_lords_of_title(Title.chevalier)),\n Title.client: len(self.get_lords_of_title(Title.client)),\n }\n\n counter = {Title.baron: 0, Title.baronet: 0, Title.chevalier: 0,\n Title.client: 0}\n\n for title, vassals in LORDS_VASSALS.items():\n for i in range(real_numbers[title]):\n for lower_title, value in vassals.items():\n counter[lower_title] += value\n\n for title, count in counter.items():\n if count > real_numbers[title]:\n print(f'Not enough lords of title: {title.value}, '\n f'required: {count}, actual: {real_numbers[title]}')\n return False\n return True", "def acquire_song_chords(title, artist):\r\n #search for song in database, and demand that it has no chords (otherwise an exception is thrown by db)\r\n song = Song.objects.get(title=title, artist=artist, chords__isnull=True) \r\n chord_vector = get_chords(song.title, song.artist)\r\n for index,chord in enumerate(chord_vector):\r\n root, notes = decode(chord)\r\n #instead of creating a huge amount of duplicated Chord instances, \r\n #create a new instance only for a first appearing chord in the db.\r\n chord, is_new = Chord.objects.get_or_create(root=root, notes=str(notes), symbol = chord) \r\n# if is_new: print \"created new chord instance - \", root, notes \r\n #create the chord_index relationship\r\n Song_chord_index.objects.create(song=song, chord=chord, index=index) \r\n song.previously_failed_chords = False #if this attempt succeeded, remove bad flag\r", "def incorporate_song_chords_from_external_source(source_path):\r\n number_of_songs_read = 0\r\n number_of_songs_with_existing_chords = 0\r\n songs_to_chord_symbols = pickle.load(open(source_path,'r'))\r\n start_time = time.time()\r\n #'\\r' endings result from windows and linux handling line breaks differently (external source from linux mahcine)\r\n songs_to_chord_symbols = {(key[0],key[1].replace('\\r','')):value for key,value in songs_to_chord_symbols.items()}\r\n# songs_to_chord_symbols = {(title,artist):value for (title,artist),value in songs_to_chord_symbols.items() if \\\r\n# Song.objects.filter(title=title,artist=artist,chords__isnull=True).exists()}#skip already chorded\r\n print len(songs_to_chord_symbols), \"songs to update with chords from file\", source_path.split(\"/\")[-1]\r\n #create a mapping from chord symbols to database chords, for less db access in the iterations\r\n symbols_to_db_chords = dict()\r\n for symbol_list in songs_to_chord_symbols.values():\r\n for symbol in symbol_list: \r\n if not symbol in symbols_to_db_chords: \r\n try:\r\n root, notes = decode(symbol)\r\n except Exception as e:\r\n# print e\r\n break #skip this symbol list, it belongs to a song that will not get saved eventually\r\n symbols_to_db_chords[symbol]=Chord.objects.get_or_create(root=root, notes=notes,symbol=symbol)[0]\r\n for title,artist in songs_to_chord_symbols.keys():\r\n try:\r\n song = Song.objects.get(title=title, artist=artist) \r\n if song.chords.exists():\r\n continue\r\n chord_symbols_vector = songs_to_chord_symbols[title,artist]\r\n chord_vector = [symbols_to_db_chords[symbol] for symbol in chord_symbols_vector]\r\n with transaction.commit_on_success():\r\n for index,chord in enumerate(chord_vector):\r\n Song_chord_index.objects.create(song=song, chord=chord, index=index) \r\n number_of_songs_read += 1\r\n if not (number_of_songs_read % 100): print \"saved chords for\", \\\r\n number_of_songs_read, \"songs.\"\r\n except KeyError as e:\r\n song.previously_failed_chords = True\r\n song.save()\r\n continue\r\n except Exception as e:#assume exception was before chord linking phase.\r\n song.previously_failed_chords = True\r\n song.save()\r\n print e\r\n continue\r\n print \"done updating\", number_of_songs_read, ' successfully,', number_of_songs_with_existing_chords, \\\r\n \" of them already updated. Time taken\", time_elapsed_minutes(start_time)", "def test_two_tracks_same_album(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 4)\n self.assertEqual(album.totaltracks, 2)", "def get_all_lyrics(self):\n if len(self.track_urls) == 0:\n raise ValueError('URLs have not be retrieved yet. Call get_track_list() first.')\n \n if len(self.track_urls) == 0:\n return\n\n for url in self.track_urls:\n lyrics = self.get_single_lyrics(url)\n self.lyrics.append(lyrics)", "def _has_phrase(self, box):\n lines = box.get_lines()\n pattern = self.field.settings.pattern_builder.list_pattern(self._phrases)\n for line in lines:\n if re.search(pattern, line.text) is not None:\n return True\n return False", "def add_song(self, song: Song) -> None:\n\n self.songs.append(song)\n self.set_song_count(len(self.songs))", "def test_no_lyrics_are_shown_in_tab(self, mock_song):\n with self.app.test_client() as c:\n response = c.get('/')\n self.assertIn(b'Nothing playing at the moment.', response.data)", "def songs_are_related(self, song1_id, song2_id, rel_str):\n if rel_str not in self.SONG_ADJECTIVES.keys():\n print(f\"ERROR: relationship '{rel_str}' is not recognized.\")\n return False\n\n compare_func = self._get_comparison_func(rel_str)\n if compare_func is None:\n print(f\"ERROR: could not find comparison function for relationship: '{rel_str}'\")\n return False\n\n feature_name = self._get_audio_feature_name(rel_str)\n if feature_name is None:\n print(f\"ERROR: could not find feature name for relationship: '{rel_str}'\")\n return False\n\n song1_data = self.get_song_data(song_id=song1_id)\n song2_data = self.get_song_data(song_id=song2_id)\n\n if len(song1_data) == 0:\n print(f\"ERROR: Could not find song with id={song1_id}\")\n return False\n if len(song2_data) == 0:\n print(f\"ERROR: Could not find song with id={song2_id}\")\n return False\n\n song1_val = song1_data[0].get(feature_name)\n song2_val = song2_data[0].get(feature_name)\n\n if song1_val is None:\n print(f\"ERROR: could not find '{feature_name}' value for song with id={song1_id}\")\n return False\n if song2_val is None:\n print(f\"ERROR: could not find '{feature_name}' value for song '{song2_id}'\")\n return False\n return compare_func(song1_val, song2_val)", "def add_song(self, song):\n self.songs.append(song)", "def test_single_track_no_album(self):\n self.add_mp3(set_album=True)\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no album tag', status)\n self.assertEqual(self.get_album_count(), 0)", "def _check_duplicate_notes(self, tokens, curr_note, step) -> bool:\n same_note_cnt = 0\n idx = step - 3\n while idx > 0:\n prev_note = self._get_num(self.tgt_dict.string(tokens[0, idx : idx + 1]))\n if prev_note != curr_note:\n break\n same_note_cnt += 1\n idx -= 4\n\n if same_note_cnt > _config.PitchPara.Max_Same_Pitch.value:\n return True\n return False", "def __cached_scrape_available(song_name, artist):\n cache_path = __cache_path(song_name, artist)\n\n return op.exists(cache_path)", "def test_get_all_unassociated_single_track_already_associated(self):\n track = Track(artist='Artist', album='Album',\n title='Title', album_id=1)\n track.insert(self.app.db, self.app.curs,\n 'xmms', datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 1)\n tracks = Track.get_all_unassociated(self.app.curs)\n self.assertEqual(len(tracks), 0)", "def dict_is_song(info_dict):\n if \"full album\" in info_dict[\"title\"].lower():\n return False\n if int(info_dict[\"duration\"]) > 7200:\n return False\n return True", "def acquire_db_song_chords():\r\n number_of_songs_read = 0\r\n number_of_songs_failed = 0\r\n unchorded_songs = Song.objects.filter(chords__isnull=True, previously_failed_chords=False)\r\n print \"number of songs without chords (that have not been queried yet) - \", unchorded_songs.count()\r\n# chorded_songs = {(song.title, song.artist) for song in \\\r\n# Song.objects.filter(chords__isnull=False).distinct()}\r\n# print \"number of songs with chords - \", len(chorded_songs)\r\n# for song in chorded_songs:\r\n# print song\r\n rand = random.Random()\r\n rand.seed()\r\n rand.shuffle(list(unchorded_songs))\r\n print 'starting iterations'\r\n for song in unchorded_songs:\r\n if song.previously_failed_chords: continue #currently try only fresh things\r\n start = time.time()\r\n try:\r\n acquire_song_chords(song.title,song.artist)\r\n number_of_songs_read += 1\r\n print 'successful song took ' + str(time.time() - start) + ' seconds'\r\n if not (number_of_songs_read % 30): print \"saved chords for\", \\\r\n number_of_songs_read, \"songs.\"\r\n except (ChordsNotFoundException, DecodingFailedException, Exception) as e: #later we might handle exceptions personally\r\n song.previously_failed_chords = True #later on we can find bad behaving songs\r\n song.save()\r\n #remove all chords that did get linked to the failed song\r\n for song_chord_index in Song_chord_index.objects.filter(song=song): song_chord_index.delete()\r\n number_of_songs_failed += 1\r\n if not (number_of_songs_failed % 300): print \"failed saving chords for\", \\\r\n number_of_songs_failed, \"songs.\"\r\n #restart program after two hours, since we get weird results otherwise\r", "def add_song(self):\n # Error check for blank inputs\n if \"\" in (self.root.ids.input_title.text, self.root.ids.input_artist.text, self.root.ids.input_year.text):\n self.root.ids.status_text.text = \"All fields must be completed\"\n return\n # Error check for negative numbers\n try:\n if int(self.root.ids.input_year.text) < 0:\n self.root.ids.status_text.text = \"Year must be >= 0\"\n return\n # Error check for invalid numbers\n except ValueError:\n self.root.ids.status_text.text = \"Please enter a valid number\"\n return\n # Song add, clear inputs, sort songlist\n song_to_add = Song(self.root.ids.input_title.text, self.root.ids.input_artist.text,\n int(self.root.ids.input_year.text))\n self.songs.add_song(song_to_add)\n SongsToLearnApp.clear_inputs(self)\n self.sort_songs(self.root.ids.sort_options.text)", "def get_lyrics(self, name: str, artists: List[str], **_) -> Optional[str]:\n\n # Join every artist by comma in artists\n artist_str = \", \".join(artist for artist in artists if artist)\n\n song_name = name.replace(\" \", \"+\").lower()\n song_artists = artist_str.replace(\" \", \"+\").lower()\n song_artists = song_artists.replace(\",\", \"%2C\")\n\n url = f\"https://search.azlyrics.com/search.php?q={song_name}+{artists}\"\n\n response = requests.get(url, headers=self.headers)\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n td_tags = soup.find_all(\"td\")\n if len(td_tags) == 0:\n return None\n\n result = td_tags[0]\n\n a_tags = result.find_all(\"a\", href=True)\n if len(a_tags) != 0:\n lyrics_url = a_tags[0][\"href\"]\n else:\n return None\n\n if lyrics_url.strip() == \"\":\n return None\n\n response = requests.get(lyrics_url, headers=self.headers)\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n # Find all divs that don't have a class\n div_tags = soup.find_all(\"div\", class_=False, id_=False)\n\n # Find the div with the longest text\n lyrics_div = sorted(div_tags, key=lambda x: len(x.text))[-1]\n\n lyrics = lyrics_div.get_text()\n\n # Remove the 3 first new lines\n lyrics = lyrics[3:]\n\n return lyrics", "def test_adding_album_twice(self):\n self.add_mp3(filename='1.mp3')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n\n self.add_mp3(filename='2.mp3')\n (added, status) = self.app.add_album(self.filenames, 'ep')\n self.assertEqual(added, False)\n self.assertIn('Would update to', status)\n self.assertEqual(self.get_album_count(), 1)\n\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 2)\n self.assertEqual(album.totaltracks, 1)", "def check_common_word(song: Song, result: Result) -> bool:\n\n sentence_words = slugify(song.name).split(\"-\")\n to_check = slugify(result.name).replace(\"-\", \"\")\n\n for word in sentence_words:\n if word != \"\" and word in to_check:\n return True\n\n return False", "def add_song_to_database(artist, name, db):\n if exists(db):\n f = open(db, 'r+')\n song_list = pickle.load(f)\n current_entry = Song_data(artist, name);\n if current_entry.id in [previous_entry.id for previous_entry in song_list]:\n print str(current_entry) + \" already in database.\"\n return\n song_list.append(current_entry)\n f.seek(0,0)\n pickle.dump(song_list, f)\n else:\n f = open(db, 'w')\n song_list = [Song_data(artist, name)]\n f.seek(0,0)\n pickle.dump(song_list, f)", "def add_song_to_playlist(self, song_uri, playlist_id, user=None):\n\n if song_uri[0] in self.list_pl_songs(playlist_id, user=None):\n logging.debug('Song already in playlist')\n else:\n if user:\n self.sp.user_playlist_add_tracks(user, playlist_id, song_uri)\n else:\n self.sp.user_playlist_add_tracks(\n self.user, playlist_id, song_uri)", "def get_existing_songs(self):\n\t\tsongs = self.db.songs.find()\n\t\texisting_songs = []\n\t\tfor song in songs:\n\t\t\texisting_songs.append(song['id'])\n\t\treturn existing_songs", "def add_song(self, name, year, title):\n album_found = find_object(name, self.albums)\n if album_found is None:\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n album_found.add_song(title)", "def test_single_track(self):\n self.add_mp3()\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Artist', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 2)\n self.assertEqual(album.totaltracks, 1)", "def test_lyricswikia(self):\n bad_res = lw.get_lyrics('lyricswikia', 'eminem', 'los yourself')\n good_res = lw.get_lyrics('lyricswikia', 'eminem', 'lose yourself')\n self.assertEqual(bad_res, 404)\n self.assertTrue(good_res)", "def has_said(self, quote: str) -> bool:\n return any(q for q in self.quotes if q.content.lower() == quote.lower())", "def check_forbidden_words(song: Song, result: Result) -> Tuple[bool, List[str]]:\n\n song_name = slugify(song.name).replace(\"-\", \"\")\n to_check = slugify(result.name).replace(\"-\", \"\")\n\n words = []\n for word in FORBIDDEN_WORDS:\n if word in to_check and word not in song_name:\n words.append(word)\n\n return len(words) > 0, words", "def set_lyrics(self, lyrics: str) -> None:\n self.lyrics = lyrics", "def add_song(self, name, year, title):\n\n # Here we check if album exist under artist.\n album_found = find_object(name, self.albums)\n if album_found is None: # If there is no album found\n print(name + \"not found\") # we print \"Album name not found\n album_found = Album(name, year, self.name) # Change_3: Pass \"self.name\" instead of \"self\"\n self.add_album(album_found) # We add new_album to song.\n else: # if we found an existing album with same name\n print(\"found album\" + name) # we print found album name\n\n # so we add song to album_found\n album_found.add_song(title)", "def create_last_word_chains():\n song_urls = lyricsorter.get_song_url_list()\n word_list = []\n word_dict = {}\n Item = {}\n Item['id'] = \"last_words\"\n Item['words'] = {}\n viable_words = find_viable_words()\n for i, link in enumerate(song_urls):\n print(\"Last wording through song #{}\".format(str(i)))\n response = song_table.get_item(\n Key={\n 'id': link\n }\n )\n lyrics = []\n try:\n lyrics = response['Item']['lyric_array']\n except KeyError:\n pass\n\n for line in lyrics:\n if len(line) > 2 and len(line) < 12:\n last_word = line[len(line) - 1]\n second_last_word = line[len(line) - 2]\n third_last_word = line[len(line) - 3]\n # print(line)\n if last_word in viable_words and second_last_word in viable_words:\n if last_word not in word_list:\n word_list.append(last_word)\n if last_word not in word_dict:\n word_dict[last_word] = {\n \"1\": {\n\n },\n \"2\": {\n\n }\n }\n if second_last_word not in word_dict[last_word][\"1\"]:\n word_dict[last_word][\"1\"][second_last_word] = 1\n else:\n word_dict[last_word][\"1\"][second_last_word] = word_dict[last_word][\"1\"][second_last_word] + 1\n if third_last_word in viable_words:\n if third_last_word not in word_dict[last_word][\"2\"]:\n word_dict[last_word][\"2\"][third_last_word] = 1\n else:\n word_dict[last_word][\"2\"][third_last_word] = word_dict[last_word][\"2\"][third_last_word] + 1\n\n # print(word_dict)\n for i, word in enumerate(word_list):\n # print(\"Inserting word #{} of {}\".format(str(i), str(len(word_list))))\n label_1 = str(word + \"_last1\")\n label_2 = str(word + \"_last2\")\n item1 = {\n \"id\": label_1,\n \"words\": word_dict[word][\"1\"]\n }\n item2 = {\n \"id\": label_2,\n \"words\": word_dict[word][\"2\"]\n }\n word_relation_table.put_item(\n Item=item1\n )\n word_relation_table.put_item(\n Item=item2\n )", "def _run_lyrics_nogui(self):\n self.read_files()\n\n # find lyrics\n self._log_print(msg_GREEN=\"Searching for lyrics\")\n\n self.save_lyrics()\n\n if not self.write_tags():\n self._log_print(msg_WHITE=\"Cannot write tags because there are no \"\n \"coresponding files\")\n else:\n self._log_print(msg_GREEN=\"Done\")", "def add_song(self, song: Song):\n self.playlist.append(song)", "def test_genius(self):\n bad_res = lw.get_lyrics('genius', 'eminem', 'los yourself')\n good_res = lw.get_lyrics('genius', 'eminem', 'lose yourself')\n self.assertEqual(bad_res, 404)\n self.assertTrue(good_res)", "def test_single_track_blank_album(self):\n self.add_mp3(set_album=True, album='')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('has no album tag', status)\n self.assertEqual(self.get_album_count(), 0)", "def __init__(self, lyrics_url, artist=None, album_title=None, folder_path=None, song_order=None, cover_size='600'):\n self.album = Album(title=album_title, artist=artist)\n self.artist = artist\n self.album_title = album_title\n self.lyrics_url = lyrics_url\n self.song_order = song_order\n self.folder_path = Path(folder_path) if folder_path else None\n self.cover_file_name = 'cover.jpg'\n self.cover_size = f'{cover_size}x{cover_size}'\n self.track_urls = []\n self.cover_downloaded = False\n \n # self.r = requests.get(lyrics_url).text\n # self.soup = BeautifulSoup(self.r, 'html.parser')\n self.r = requests.get(lyrics_url)\n self.soup = BeautifulSoup(self.r.content, 'html.parser')", "def unsaved_details_exist(self):\r\n return (self.talkDetailsWidget.saveButton.isEnabled() and\r\n (self.talkDetailsWidget.titleLineEdit.text() or\r\n self.talkDetailsWidget.presenterLineEdit.text() or\r\n self.talkDetailsWidget.categoryLineEdit.text() or\r\n self.talkDetailsWidget.descriptionTextEdit.toPlainText()))", "def associate_song(self, song):\n self.songs.append(song)", "def get_non_existing_songs_of_artists(self, artists_songs=None, existing_songs=None):\n\t\tif not isinstance(artists_songs, list):\n\t\t\tprint(1)\n\t\t\tartists_songs = self.get_songs_of_all_artists()\n\t\tif not isinstance(existing_songs, list):\n\t\t\tprint(2)\n\t\t\texisting_songs = self.get_existing_songs()\n\t\tnon_existing_songs = [song for i, song in enumerate(artists_songs) \n\t\t\tif song not in existing_songs]\n\t\treturn non_existing_songs", "def loadLyrics(dirName):\n lyricsDir = os.path.dirname(os.path.abspath(__file__)) + \"/lyrics/\"\n artistDir = os.path.join(lyricsDir, dirName) + \"/\"\n\n if not os.path.isdir(artistDir):\n print \"No artist named\", artistDir, \"in directory\", lyricsDir\n return None\n\n lyrics = []\n\n songs = os.listdir(artistDir)\n for song in songs:\n with open(artistDir + song, 'r') as songFile:\n songLines = songFile.readlines()\n\n # clean each line in each song and add if not empty\n for line in songLines:\n line = line.translate(None, string.punctuation)\n line = line.lower().strip()\n if line:\n lyrics.append(line.split())\n return lyrics", "def find_similar_songs(song, root):\n lyrics = song_information.get_lyrics(song)\n adjectives = find_repeated_adjectives(lyrics)\n similar_songs = []\n for child in root:\n if child != song:\n lyrics_child = song_information.get_lyrics(child)\n adjectives_child = find_repeated_adjectives(lyrics_child)\n for topic in adjectives_child:\n if topic in adjectives:\n song_artist = (\"'\" + song_information.get_songtitle(child)\n + \"' by \" + song_information.get_artist(child))\n similar_songs.append(song_artist)\n result = get_duplicates(similar_songs)\n return result", "def only_once(self) -> bool:\n return self.times == 1", "def lyrics_by_word(ans):\r\n songs_list = \"\"\r\n ans = ans.lower()\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n x = song_lyrics(song)\r\n song = str(song)\r\n if ans in x:\r\n songs_list += song + \", \"\r\n return songs_list[:-2]", "def song_length(ans):\r\n length = 0\r\n flag = 1\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n words = words.split()\r\n for word in words:\r\n length += 1\r\n flag = 1\r\n return str(length)\r\n\r\n elif ans != song and flag == 0:\r\n return \"song not found!\"", "def search_done(self):\n \n if len(self.hypotheses) == 0:\n return True\n elif len(self.hypotheses[0]._sequence) >= min([\n self.input_length + 50,\n self.max_sequence_length\n ]):\n return True\n return False", "def scrape_song_to_db(self, artist, title, track_id):\n\n # remove featured artist names\n artist = stripFeat(artist)\n\n try:\n # record stout from lyricsgenius call because it catches errors and prints\n with Capturing() as output:\n songdata = self.api.search_song(title, artist)\n\n # for the few errors that have been raised\n except ReadTimeout:\n self.api.sleep_time += 3\n print(f\"sleep time increased to {self.api.sleep_time}\")\n self.record_error(track_id, \"ReadTimeout\")\n self.scrape_song_to_db(artist, title, track_id)\n return\n\n # take sleep time slowly back to minimum\n if self.api.sleep_time > self.minsleep:\n self.api.sleep_time -= 0.25\n print(f\"sleep time decreased to {self.api.sleep_time}\")\n\n # search successful\n if songdata != None:\n self.record_lyrics_result(track_id, songdata)\n\n # handle (record & retry) Timeout error\n elif output[1].startswith(\"Timeout\"):\n self.api.sleep_time += 3\n self.record_error(track_id, \"Timeout\")\n self.scrape_song_to_db(artist, title, track_id)\n return\n\n # record error: not in genius db\n elif output[1].startswith(\"No results\"):\n self.record_error(track_id, \"no_results\")\n\n # record error: song without lyrics\n elif output[1] == \"Specified song does not contain lyrics. Rejecting.\":\n self.record_error(track_id, \"lacks_lyrics\")\n\n # record error: URL issue\n elif (\n output[1]\n == \"Specified song does not have a valid URL with lyrics. Rejecting.\"\n ):\n self.record_error(track_id, \"invalid_url\")", "def get_lyrics(self):\n\t\treturn self._lyrics_list", "def create_playlist(self, playlist_name):\n playlist_name = Playlist()\n if self != playlist_name:\n print(f\"successfully created new playlist: {playlist_name}\")\n else:\n print(\"Cannot create playlist: A playlist with the same name already exists\")", "def update(self):\n self.haveClub = len(self.clubs()) > 0", "def exist(self):", "def test_no_song(self):\n expect = [\"nobody\", \"nothing\", \"no preview_url\", \"./static/defaultCoverArt.png\"]\n with mock.patch(\"spotlogin_api.get_current_call\", self.mock_no_song):\n result = spotify_login.get_current_song(self.user[INPUT])\n self.assertEqual(result, expect)", "def _has_turtle(self, name):\n\t\treturn name in self._turtles", "def is_once(self):\n return self.subscription_list.mode == gnmi_pb2.SubscriptionList.ONCE", "def check_if_already_exists(list_name, title, description):\n\n for item in list_name:\n if item['title'] == title:\n return 'Sorry, This title has already been used in another question'\n if item['description'] == description:\n return 'Sorry, This description has already been used in another question'", "def find_album(self):\n item = self.clementine_albums.currentItem()\n if not item:\n self.focus_albums()\n item = self.clementine_albums.currentItem()\n if item.text(0) in self.albums_map[self.c_artist]:\n ok = qtw.QMessageBox.question(self, self.appname, 'Album already has a '\n 'match - do you want to reassign?',\n qtw.QMessageBox.Yes | qtw.QMessageBox.No,\n qtw.QMessageBox.Yes)\n if ok == qtw.QMessageBox.No:\n return\n self.albums_map[self.c_artist].pop(item.text(0))\n # select albums for self.a_artist and remove the ones that are already matched\n albums = dmla.list_albums_by_artist('', self.a_artist, 'Titel')\n album_list = []\n for album in albums:\n test = album.id\n found = False\n for a_item in self.albums_map[self.c_artist].values():\n if a_item[1] == test:\n found = True\n break\n if not found:\n album_list.append((build_album_name(album), album.id))\n if album_list:\n albums = [x[0] for x in album_list]\n selected, ok = qtw.QInputDialog.getItem(self, self.appname, 'Select Album',\n albums, editable=False)\n if ok:\n a_item = self.albums_albums.findItems(\n str(album_list[albums.index(selected)][1]),\n core.Qt.MatchFixedString, 2)[0]\n c_year = str(item.data(0, core.Qt.UserRole))\n if c_year:\n a_year = a_item.text(1)\n if c_year != a_year:\n ask = f\"Clementine year ({c_year}) differs from Albums year ({a_year})\"\n ok = qtw.QMessageBox.question(self, self.appname, f\"{ask}, replace?\",\n qtw.QMessageBox.Yes | qtw.QMessageBox.No,\n qtw.QMessageBox.Yes)\n if ok == qtw.QMessageBox.Yes:\n a_item.setText(1, c_year)\n\n self.albums_to_update[self.c_artist].append(\n (a_item.text(0), a_item.text(1), int(a_item.text(2)), False, []))\n self.update_item(a_item, item)\n return\n self.add_album()" ]
[ "0.67191", "0.6461495", "0.64484215", "0.62452585", "0.6116291", "0.6073265", "0.603493", "0.60067284", "0.59900224", "0.59794277", "0.5927802", "0.5798049", "0.56955206", "0.56919104", "0.56533647", "0.5607341", "0.5586889", "0.55832136", "0.553297", "0.5515789", "0.54869497", "0.5463637", "0.54492414", "0.53928244", "0.53823465", "0.5352275", "0.5342825", "0.53323585", "0.53206784", "0.5286877", "0.5234254", "0.5233993", "0.52324516", "0.5228425", "0.52085984", "0.5198564", "0.51864254", "0.5184182", "0.51722926", "0.51543665", "0.5147829", "0.5136864", "0.5124679", "0.51237667", "0.5122275", "0.5100734", "0.50935096", "0.50925565", "0.5081712", "0.50811166", "0.50799745", "0.5064747", "0.5063836", "0.50636697", "0.50561535", "0.5044596", "0.5043959", "0.5042338", "0.503918", "0.50314313", "0.50217223", "0.50185406", "0.5006779", "0.5000091", "0.49961755", "0.49883544", "0.4987916", "0.4987467", "0.49820274", "0.49723786", "0.49683753", "0.49516106", "0.49509847", "0.49497062", "0.49490774", "0.4940827", "0.4915262", "0.4911272", "0.49001345", "0.48983014", "0.4896525", "0.4895944", "0.48932952", "0.4885021", "0.48841384", "0.48793197", "0.48763195", "0.4874689", "0.4873567", "0.48697975", "0.48664007", "0.48659897", "0.4863005", "0.48553172", "0.48499256", "0.4843019", "0.48419192", "0.4834537", "0.4826544", "0.48120472" ]
0.7237534
0
This is called to compare a lyrics note to the original to ensure they are not the same..if they are , such a lyrics note is rejected
def lyrics_note_is_same_as_original(): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_duplicate_notes(self, tokens, curr_note, step) -> bool:\n same_note_cnt = 0\n idx = step - 3\n while idx > 0:\n prev_note = self._get_num(self.tgt_dict.string(tokens[0, idx : idx + 1]))\n if prev_note != curr_note:\n break\n same_note_cnt += 1\n idx -= 4\n\n if same_note_cnt > _config.PitchPara.Max_Same_Pitch.value:\n return True\n return False", "def test_ensure_passage_is_not_removed(self):\n simple = self.TEI.getPassage(MyCapytain.common.reference.Reference(\"1.pr.1-1.2.5\"))\n orig_refs = self.TEI.getValidReff(level=3)\n self.assertIn(\"1.pr.1\", orig_refs)\n self.assertIn(\"1.1.1\", orig_refs)\n self.assertIn(\"1.2.4\", orig_refs)\n self.assertIn(\"1.2.5\", orig_refs)\n\n simple = self.TEI.getPassage(MyCapytain.common.reference.Reference(\"1.pr-1.2\"))\n orig_refs = self.TEI.getValidReff(level=3)\n self.assertIn(\"1.pr.1\", orig_refs)\n self.assertIn(\"1.1.1\", orig_refs)\n self.assertIn(\"1.2.4\", orig_refs)\n self.assertIn(\"1.2.5\", orig_refs)", "def check_note_for_history(self):\r\n testrun_notes = [\r\n \"multiple loci suspected\",\r\n \"suspected multicopy, poor performance\",\r\n \"fixed allele 1\",\r\n \"very poor amplification\",\r\n \"very poor amplification, high off target percent\",\r\n \"poor amplification, maybe redesign\",\r\n \"mono-allele 1?\",\r\n \"redesign primer\",\r\n \"most of target\",\r\n \"poor performance\",\r\n \"poor performance, primers off target\",\r\n \"off target amp\",\r\n \"mono-allele 1\",\r\n \"mono-allele 2 and off target\",\r\n \"Nate said it is a mess\",\r\n \"off target amp\",\r\n \"mono-allele 1 and off target\"\r\n ]\r\n if self.note == \"No primers made by primer3\":\r\n self.add_history(\"2018-2-12\",\"Nate\",\"primers were not made for this sequence variation\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"Removed by nate, close to other SNP\":\r\n self.add_history(\"2018-2-19\",\"Nate\",\"Primers designed for this SNP were taken out, were to close to other SNP\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"Predicted to form hetrodymer\":\r\n self.add_history(\"2018-2-19\",\"Nate\",\"Predicted to form hetrodymer\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"no valid primer pair could be made for this position\":\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note in testrun_notes:\r\n self.add_history(\"2018-2-23\",\"Thomas\",self.note)\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n #check if any were missed.\r\n if self.active and self.note != \"sequence variant selected by GBS-SNP-selection\":\r\n pass #print(self.note)\r", "def isEqualNote(self, notestring):\n if isinstance(notestring, Note):\n noteEncoding = notestring.getEncoding()\n else:\n note = self.new(notestring)\n simpleNote = note.simplify()\n noteEncoding = simpleNote.getEncoding()\n if noteEncoding == None:\n return False\n else:\n if self.getEncoding() == noteEncoding:\n return True\n else:\n return False", "def handle_one_off(self, shorter, longer):\n found = False\n for n, c in enumerate(shorter):\n if shorter[n] == longer[n]:\n continue\n elif shorter[n] == longer[n+1]:\n if not found:\n found = True\n else:\n return False\n return True", "def get_mismatches(rec):\n qseq = rec.get_forward_sequence().upper()\n if rec.is_reverse:\n qseq = reverseComplement(qseq)\n rseq = rec.get_reference_sequence().upper()\n for qpos, rpos in rec.get_aligned_pairs():\n if qpos == None or rpos == None:\n continue # no indels yet\n q = qseq[qpos]\n r = rseq[rpos - rec.reference_start]\n if q != r:\n position = (rec.reference_name, rpos)\n change = (r, q)\n yield (position, change)", "def test_disambiguate(self):\n self.assertEqual(self.RNA(\"\").disambiguate(), \"\")\n self.assertEqual(\n self.RNA(\"AGCUGAUGUA--CAGU\").disambiguate(), \"AGCUGAUGUA--CAGU\"\n )\n self.assertEqual(\n self.RNA(\"AUn-yrs-wkmCGwmrNMWRKY\").disambiguate(\"strip\"), \"AU--CG\"\n )\n s = self.RNA(\"AUn-yrs-wkmCGwmrNMWRKY\")\n t = s.disambiguate(\"random\")\n u = s.disambiguate(\"random\")\n for i, j in zip(str(s), str(t)):\n if i in s.moltype.degenerates:\n assert j in s.moltype.degenerates[i]\n else:\n assert i == j\n self.assertNotEqual(t, u)\n self.assertEqual(len(s), len(t))", "def test_titles_do_not_match(self):\r\n gm_title = 'Zhao Hua'\r\n sp_title = 'MMXXX (ft Moor Mother)'\r\n self.assertFalse(gmspotify.titles_match(gm_title, sp_title))", "def monkey_trouble(a_smile, b_smile):\r\n return a_smile == b_smile", "def is_lyrics_approved():", "def test_single_not_match_returns_line(self):\n eq_(self.line,line_no_matches_ngreps(self.line,[\"nomatch\"]))", "def testSynonymDuplicate(self):\n\t\t\t\tone = spinner.Word.objects.get_single('mac', True)\n\t\t\t\ttwo = spinner.Word.objects.get_single('macintosh', True)\n\t\n\t\t\t\tsyn = spinner.Synonym.objects.get_single(one, two, True)\n\t\t\t\t\n\t\t\t\tsyn2 = spinner.Synonym.objects.get_single(two, one, True)\n\n\t\t\t\tassert syn == syn2\n\n\t\t\t\tsyn.delete()\n\t\t\t\tone.delete()\n\t\t\t\ttwo.delete()", "def test_check_consistency_05():\n xml_notes, divisions = R.get_notes(os.path.join(\n '..', 'test', 'data',\n 'test_cross_barline_8-8_no_rest_no_pitch.xml'))\n note_attr_list = [\n R.get_note_attrs(xml_note, divisions) for xml_note in xml_notes]\n assert not U.check_consistency(note_attr_list)", "def monkey_trouble2(a_smile, b_smile):\n if a_smile == b_smile:\n return True\n else:\n return False", "def _compareIgnoreVersion(self, loc1, loc2, msg=None):\r\n if loc1.version_agnostic() != loc2.version_agnostic():\r\n self.fail(self._formatMessage(msg, u\"{} != {}\".format(unicode(loc1), unicode(loc2))))", "def test_notes_invalid_student(self):\n student_id = '1234567890'\n career_id = 34\n perdiod_id = 115\n result = self.ucuenca.notes(student_id, career_id, perdiod_id)\n self.assertFalse(result)", "def test_cclwarning_not_equal():\n w = pyccl.CCLWarning(\"blah\")\n w2 = pyccl.CCLWarning(\"blahh\")\n assert w is not w2\n assert w != w2\n assert hash(w) != hash(w2)\n\n v = pyccl.CCLDeprecationWarning(\"blah\")\n v2 = pyccl.CCLDeprecationWarning(\"blahh\")\n assert v is not v2\n assert v != v2\n assert hash(v) != hash(v2)", "def assertMultiLineEqual(self, first, second, msg=None):\n self.assertTrue(isinstance(first, str),\n 'First argument is not a string')\n self.assertTrue(isinstance(second, str),\n 'Second argument is not a string')\n\n if first != second:\n message = ''.join(difflib.ndiff(first.splitlines(True),\n second.splitlines(True)))\n if msg:\n message += \" : \" + msg\n self.fail(\"Multi-line strings are unequal:\\n\" + message)", "def test_idempotent():\n dirty = u'<span>invalid & </span> < extra http://link.com<em>'\n\n clean = bl.clean(dirty)\n eq_(clean, bl.clean(clean))\n\n bleached = bl.bleach(dirty)\n eq_(bleached, bl.bleach(bleached))\n\n linked = bl.linkify(dirty)\n eq_(linked, bl.linkify(linked))", "def __citation_correction(self, bs, ground_truth):\n bs_ref = bs.findNext('bibl')\n gt_ref = ground_truth.findNext('ref')\n while gt_ref is not None:\n if gt_ref.find('article-title') != bs_ref.title:\n pass\n gt_ref = gt_ref.findNext('ref')", "def compareLyricalSimilarity(userTranscript, originalCaption, verbose=False, profile=False):\n error = \"\"\n if (profile): start = time.time()\n # cmp = compareToDialogue(audioFile, originalCaption, verbose=verbose)\n cmp = similar(userTranscript, originalCaption)\n if (profile):\n end = time.time()\n print(\"(profile) lyrical similarity :\", end-start)\n return cmp, error", "def test_equality(self):\n\n # change .phones\n pw1: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n pw2: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"P\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n self.assertNotEqual(pw1, pw2)\n\n # change .stress_pattern\n pw1: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NON_VOWEL,\n ],\n )\n pw2: PhonologicalWord = PhonologicalWord(\n phones=(\"P\", \"ER0\", \"M\", \"IH1\", \"T\"),\n stress_pattern=[\n Stress.NON_VOWEL,\n Stress.PRIMARY,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n Stress.NO_STRESS,\n Stress.NON_VOWEL,\n ],\n )\n self.assertNotEqual(pw1, pw2)", "def test_check_consistency_04():\n xml_notes, divisions = R.get_notes(os.path.join(\n '..', 'test', 'data', 'test_cross_barline_8-8_extra_rest.xml'))\n note_attr_list = [\n R.get_note_attrs(xml_note, divisions) for xml_note in xml_notes]\n assert not U.check_consistency(note_attr_list)", "def test_can_mismatch(self):\n assert not self.RNA(\"\").can_mismatch(\"\")\n assert self.RNA(\"N\").can_mismatch(\"N\")\n assert self.RNA(\"R\").can_mismatch(\"R\")\n assert self.RNA(\"N\").can_mismatch(\"r\")\n assert self.RNA(\"CGUACGCAN\").can_mismatch(\"CGUACGCAN\")\n assert self.RNA(\"U\").can_mismatch(\"C\")\n assert self.RNA(\"UUU\").can_mismatch(\"UUC\")\n assert self.RNA(\"UUU\").can_mismatch(\"UUY\")\n assert not self.RNA(\"UUU\").can_mismatch(\"UUU\")\n assert not self.RNA(\"UCAG\").can_mismatch(\"UCAG\")\n assert not self.RNA(\"U--\").can_mismatch(\"U--\")", "def forbid_sequence(*s,min_len=2):\n assert len(s) >= 1\n notes = [part for part in tools.iter_melodies(*s)]\n\n for start in range(len(notes)):\n for end in range(start,len(notes)):\n if end - start < min_len:\n continue\n\n # try a motif\n motif = []\n for i in range(start,end+1):\n motif.extend(notes[i])\n\n # try a following\n part_nb = end - start + 1\n try:\n following = []\n for i in range(end+1, part_nb + end + 1):\n following.extend(notes[i])\n except IndexError:\n break\n\n # is there a sequence?\n try:\n if tools.matchSequence(motif, following, s[0].scale):\n warn(f\"Sequence in {(s.title for s in s)}.\",motif,following)\n except ValueError:\n continue", "def __CompareText(self, s1, s2):\n # The \"splitlines\" method works independently of the line ending\n # convention in use.\n return s1.splitlines() == s2.splitlines()", "def negation_check(self,sentence):", "def test_same_sentence_check(self):\n block = get_text(SAMPLE_SENTENCE)\n self.assertTrue(same_sentence_check(block, 0, 98))\n self.assertFalse(same_sentence_check(block, 166, 168))", "def isEqualPitch(self, notestring):\n if self.isEqualNote(notestring) and self.isEqualOctave(notestring):\n return True\n else:\n return False", "def test_unequality(self):\n self.assertFalse(Record(1, 2) != Record(1, 2))\n self.assertTrue(Record(1, 2) != Record(1, 3))\n self.assertTrue(Record(1, 2) != Record(2, 2))\n self.assertTrue(Record(1, 2) != Record(3, 4))", "def invalid_notes(self):\n return self._invalid_notes", "def test_two_footnotes(self):\n text = \"Footnote[^1]\\n\\n[^1]: Footnote text\"\n self.assertNotEqual(self.md(text), self.md(text))", "def check_doc_unchanged(original, new, doc_name):\n changes = get_doc_changes(original, new)\n\n if changes:\n raise ValueError(\n '{} differs from stored ({})'.format(\n doc_name,\n ', '.join(['{}: {!r}!={!r}'.format('.'.join(offset), v1, v2) for offset, v1, v2 in changes])\n )\n )", "def test_artists_match_diff_styles(self):\r\n gm_artists = ['Walter Bishop Jr.']\r\n sp_artists = ['Walter Bishop Jr']\r\n self.assertTrue(gmspotify.artists_match(gm_artists, sp_artists))", "def test_not_equal_on_equal(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def diff_log(self, other):\n if self._seq_list != other._seq_list:\n print(\"Fuzzing sequence lists do not match.\")\n return False\n return True", "def __CompareText1(self, s1, s2,result):\n # lines with tag that are excluded by hand (by the user) \n s0_excluded=list()\n for l0 in self.excluded_lines.splitlines():\n s0_excluded.append(l0.strip()) \n\n s1_filtered=list()\n s2_filtered=list() \n for l1 in s1.splitlines(): \n if l1.__contains__(self.stdout_tag): \n check=0\n for k in s0_excluded: \n if l1.__contains__(k): check=check+1 \n if check==0: s1_filtered.append(l1.strip())\n for l2 in s2.splitlines(): \n if l2.__contains__(self.stdout_tag): \n check=0\n for k in s0_excluded: \n if l2.__contains__(k): check=check+1 \n if check==0: s2_filtered.append(l2.strip())\n # some debug: shows the lines which will be compared \n mm=''\n nTot=0\n nDif=0\n nMis=0\n for l in range(max(len(s1_filtered),len(s2_filtered))): \n nTot=nTot+1\n if ( l>len(s1_filtered)-1 ): # ref[l] exists but log[l] does not\n nMis=nMis+1\n if ( nMis<=5 ) : # print only the first 5 missing lines\n mm=mm+'\\n%5i'%l\n if ( nMis<5 ) :\n mm=mm+'-> log: ...MISSING('+repr(nMis)+')...'+'\\n' \n else:\n mm=mm+'-> log: ...MISSING(5)... SKIP THE REST'+'\\n' \n if(l<=len(s2_filtered)-1):\n mm=mm+' ref: '+s2_filtered[l]+'\\n'\n else:\n mm=mm+' ref: '+'\\n'\n elif( l>len(s2_filtered)-1 or # log[l] exists but ref[l] does not\n s1_filtered[l] != s2_filtered[l] ): # log[l] != ref[l]\n nDif=nDif+1\n mm=mm+'\\n%5i'%l\n mm=mm+'-> log: '+s1_filtered[l]+'\\n'\n if(l<=len(s2_filtered)-1):\n mm=mm+' ref: '+s2_filtered[l]+'\\n' \n else:\n mm=mm+' ref: '+'\\n' \n if(nDif>0 or nMis>0): mm=mm+'\\nSummary: '+repr(nDif)+' lines differ and '+repr(nMis)+' are missing out of '+repr(nTot)+' lines\\n'\n result[\"ExecTest.stdout_VS_expected_stdout\"] = result.Quote(mm)\n logger.debug('ExecTestBase2:__CompareTest1: '+mm) \n # Comparision of filtered sets \n # - filtered sets should have the same length: if this is not the \n # case the test will stop here \n if not(len(s1_filtered)==len(s2_filtered)): \n self.causes.append(' Different number of tagged lines to compare \\n'+\\\n 'in the stdout and ref_stdout')\n return False \n # Scan of the s1 and ref_s1=s2 looking for the '=' \n s1_filtered_equals=list()\n s2_filtered_equals=list() \n for i in range(len(s1_filtered)):\n if s1_filtered[i].__contains__('='):\n s1_filtered_equals.append(s1_filtered[i].replace(\\\n self.stdout_tag,'').strip())\n if s2_filtered[i].__contains__('='):\n s2_filtered_equals.append(s2_filtered[i].replace(\\\n self.stdout_tag,'').strip()) \n # - in case there is not '=' the strings have to be the same \n if(not(s1_filtered[i].__contains__('=')) and \n s2_filtered[i].__contains__('=')): return False \n if(not(s1_filtered[i].__contains__('=')) and \n not(s2_filtered[i].__contains__('=')) and \n not(s1_filtered[i]==s2_filtered[i])): return False \n\n # Analysis of lines with '='\n fail_cond=True \n logger.debug('ExecTestBase2:__CompareTest1: # lines with = is '+\\\n repr(len(s1_filtered_equals))) \n for i in range(len(s1_filtered_equals)):\n s1_split_list=s1_filtered_equals[i].split('=')\n s2_split_list=s2_filtered_equals[i].split('=')\n logger.debug('ExecTestBase2:__CompareTest1: right side of = for '+\\\n repr(i)+' are '+s1_split_list[1]+' '+s2_split_list[1])\n # - No local tolerance marked with '+-' in the s2\n if not(s2_split_list[1].__contains__('+-')):\n try: # integer and float to float\n s1_split_list_1=float(s1_split_list[1].strip())\n s2_split_list_1=float(s2_split_list[1].strip())\n # - comparison with global tolerance (if any) \n if(s1_split_list_1!=0.): \n if(not(s1_split_list[0]==s2_split_list[0]) or \n not((s1_split_list_1==s2_split_list_1) or \n ((s1_split_list_1<s2_split_list_1+\\\n s2_split_list_1*float(self.stdout_tol)/100) and \n (s1_split_list_1>s2_split_list_1-\n s2_split_list_1*float(self.stdout_tol)/100))) \n ): fail_cond=fail_cond and False \n else: # case = 0 \n if(not(s1_split_list[0]==s2_split_list[0]) or \n not((s1_split_list_1==s2_split_list_1) or \n ((s1_split_list_1<s2_split_list_1+\\\n float(self.stdout_tol)/100) and \n (s1_split_list_1>s2_split_list_1-\n float(self.stdout_tol)/100))) \n ): fail_cond=fail_cond and False \n logger.debug('ExecTestBase2:__CompareTest1: right side of = for '+\\\n repr(i)+' are '+repr(s1_split_list_1)+' '+\\\n repr(s2_split_list_1)+' with global tol (%) '+\\\n repr(self.stdout_tol)+' '+repr(fail_cond) )\n except: # strings\n s1_split_list[1]=s1_split_list[1].strip() \n s2_split_list[1]=s2_split_list[1].strip() \n logger.debug('ExecTestBase2:__CompareTest1: right side of = for '+\\\n repr(i)+' are '+s1_split_list[1]+' '+\\\n s2_split_list[1])\n if(not(s1_split_list[0]==s2_split_list[0]) or \n not(s1_split_list[1]==s2_split_list[1]) ): fail_cond=fail_cond and False \n else: \n # - comparison with local tolerance \n print 'mgallas, to be done local tolerance'\n return fail_cond\n\n for j in range(len(self.causes)):\n print 'mgallas causes '+causes[j]\n \n return True", "def __ne__(self, other):\n if not isinstance(other, CaptionsTest):\n return True\n\n return self.to_dict() != other.to_dict()", "def isduplicate(self, a, b):\n db = bibtexparser.loads(a+'\\n'+b)\n e1, e2 = db.entries\n refs = Biblio(similarity=self.similarity)\n return refs.eq(e1, e2)", "def remove_duplicate_notes(self):\n res = []\n for x in self.notes:\n if x not in res:\n res.append(x)\n self.notes = res\n return res", "def _check_consistency(self) -> None:\n lbl_vals_from_metadata = set(self.infos.keys())\n lbl_vals_from_data = set(np.unique(self.data))\n # TODO: check if numerical datatype shenanigans ruin the day\n # i.e. something along the lines of 1.0 != 1\n symm_diff = lbl_vals_from_data ^ lbl_vals_from_metadata\n\n if len(symm_diff) != 0:\n msg = (f'Label mismatch between data and metadata! Expected vanishing '\n f'symmetric difference but got: {symm_diff}')\n raise ValueError(msg)", "def can_generate_ransom_note(self):\n if self.ransom_text == '' or self.ransom_text == ' ':\n return True\n ransom_text_words = self.ransom_text.split(' ')\n magazine_text_words = self.magazine_text.split(' ')\n # counting the occurrences of words in the ransom and magazine texts.\n ransom_count = self._count_words_in_string(ransom_text_words)\n magazine_count = self._count_words_in_string(magazine_text_words)\n result = False\n for i in ransom_text_words:\n # if magazine_count hashmap doesn't have word\n if magazine_count.get(i) is None:\n result = False\n break\n # if ransom_count hashmap have less word occurances than magazine count.\n if ransom_count.get(i) <= magazine_count.get(i):\n result = True\n else:\n result = False\n break\n return result", "def remove_dup(noteList):\n newList = []\n for note in noteList:\n if (note == 'R') or (note not in newList):\n newList.append(note)\n else:\n newList.append('R')\n\n return newList", "def text_compare(t1, t2):\n if not t1 and not t2:\n return True\n if t1 == '*' or t2 == '*':\n return True\n return (t1 or '').strip() == (t2 or '').strip()", "def is_same_song(a, b):\n\n for k in 'album', 'title', 'artists':\n if a[k] != b[k]:\n return False\n\n return True", "def is_correction(self):\n # OK, go looking for RESENT style tags, assume it happens within first\n # 300 chars\n if RESENT.search(self.text[:300]):\n return True\n if self.bbb is None or not self.bbb:\n return False\n if self.bbb[0] in ['A', 'C']:\n return True\n return False", "def test_journal_non_triple_syntax(self):\n spi_search = \"find j physics jcap\"\n inv_search = \"journal:physics and journal:jcap\"\n self._compare_searches(inv_search, spi_search)", "def all_notes_line_up(a_list, b_list):\n a_list = [x for x in a_list if not x.is_rest] # copy NoteList to list\n b_list = [x for x in b_list if not x.is_rest] # copy NoteList to list\n\n # remove matched notes\n for a_note in a_list[:]:\n for b_note in b_list[:]:\n if (a_note.start, a_note.end) == (b_note.start, b_note.end):\n # remove the matched pair from their respective lists\n a_list.remove(a_note)\n b_list.remove(b_note)\n break\n\n return a_list, b_list", "def _gitline_comparator(self, a, b):\n if a.startswith('!'):\n return -1\n elif b.startswith('!'):\n return 1\n else:\n return a == b", "def _gitline_comparator(self, a, b):\n if a.startswith('!'):\n return -1\n elif b.startswith('!'):\n return 1\n else:\n return a == b", "def do_verify(self, args):\n\n pn = \\\n self._get_choice_(\"pn\", self.promissory_notes, \"Which promissory note needs to be verified?\")\n\n try:\n verify_promissory_note(pn)\n except Exception as e:\n self._print_exception_(e)\n return\n\n print(\"Promissory note is correct.\\n\")", "def is_copy_modify_with_no_change(diff_header):\n return re.search(r\"(?m)^similarity index 100%\", diff_header)", "def check_correctness(self, expected, got):\n expected_lines = expected.strip().splitlines()\n got_lines = got.strip().splitlines()\n if len(got_lines) != len(expected_lines):\n return False\n else:\n for exp, got in zip(expected_lines, got_lines):\n if self.params['strictwhitespace']:\n if exp.rstrip() != got.rstrip():\n return False\n else:\n if exp.strip().split() != got.strip().split():\n return False\n return True", "def _print_first_difference(\n arec, brec, ignore_case=False, ignore_N=False, report_match=True\n):\n aseq, bseq = arec.seq, brec.seq\n asize, bsize = len(aseq), len(bseq)\n\n matched = True\n for i, (a, b) in enumerate(zip_longest(aseq, bseq)):\n if ignore_case and None not in (a, b):\n a, b = a.upper(), b.upper()\n\n if ignore_N and (\"N\" in (a, b) or \"X\" in (a, b)):\n continue\n\n if a != b:\n matched = False\n break\n\n if i + 1 == asize and matched:\n if report_match:\n printf(\"[green]Two sequences match\")\n match = True\n else:\n printf(\"[red]Two sequences do not match\")\n\n snippet_size = 20 # show the context of the difference\n\n printf(\"[red]Sequence start to differ at position {}:\".format(i + 1))\n\n begin = max(i - snippet_size, 0)\n aend = min(i + snippet_size, asize)\n bend = min(i + snippet_size, bsize)\n\n printf(\"[red]{}|{}\".format(aseq[begin:i], aseq[i:aend]))\n printf(\"[red]{}|{}\".format(bseq[begin:i], bseq[i:bend]))\n match = False\n\n return match", "def test_not_equal_on_type_mismatch(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = \"invalid\"\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def equal_ignore_order(self, a, b):\n unmatched = list(b)\n for element in a:\n try:\n unmatched.remove(element)\n except ValueError:\n return False\n return not unmatched", "def assert_contents_equivalent(contents_a, contents_b):\n assert normalize_contents(contents_a) == normalize_contents(contents_b)", "def test_fastq_mismatch():\n cluster = clust.Clustering.from_fastq(TMP + 'mismatch.fastq',\n id_length=4,\n adapter='ACGT',\n threshold=0, prefix=1)\n uid1_expect = 'AAAACCCC'\n uid2_expect = 'CCCCAAAA'\n uid3_expect = 'AAAAAAAA'\n seq1_expect = 'ACCTCTCCCTGTGGGTCATGTGACT'\n seq2_expect = 'TTGTTTGAAAAACCTCGAAAGTAAC'\n seq3_expect = 'CATTTTTGTGTCCAATGCCTAAATTCCTTTTTGTGTCCAATGCCTAAATT'\n\n assert uid1_expect in cluster, \"%r not in %r\" % (uid1_expect, list(cluster.keys()))\n assert uid2_expect in cluster, \"%r not in %r\" % (uid2_expect, list(cluster.keys()))\n assert uid3_expect in cluster, \"%r not in %r\" % (uid3_expect, list(cluster.keys()))\n assert cluster[uid1_expect].sequence.sequence == seq1_expect, \\\n \"%r != %r\" % (cluster[uid1_expect].sequence.sequence, seq1_expect)\n assert cluster[uid2_expect].sequence.sequence == seq2_expect, \\\n \"%r != %r\" % (cluster[uid2_expect].sequence.sequence, seq2_expect)\n assert cluster[uid3_expect].sequence.sequence == seq3_expect, \\\n \"%r != %r\" % (cluster[uid3_expect].sequence.sequence, seq3_expect)", "def rubbish_notes(notes):\n regex = re.compile(RUBBISH_NOTE_REGEX)\n return regex.search(notes)", "def compare_broken_content(broken_content_before, broken_content_after):\n unique_ids_before = set([i[\"unique_id\"] for i in broken_content_before])\n unique_ids_after = set([i[\"unique_id\"] for i in broken_content_after])\n \n new_broken_content_ids = unique_ids_after.difference(unique_ids_before)\n new_broken_content = []\n for item in broken_content_after:\n if item[\"unique_id\"] in new_broken_content_ids:\n new_broken_content.append(item)\n return new_broken_content", "def get_similar_lines(self, Coe1, Coe2):\n line1_victor = [Coe1[1], -Coe1[0]]\n line2_victor = [Coe2[1], -Coe2[0]]\n victor = line1_victor[1] * line2_victor[0] - line2_victor[1] * line1_victor[0]\n if 0 <= round(victor, 2) <= 0.2:\n return True\n else:\n return False", "def check_call_similarity(self):\r\n \r\n if self.old and not self.new:\r\n self.similarity = \"LOSS\"\r\n elif not self.old and self.new:\r\n self.similarity = \"GAIN\"\r\n else:\r\n if not self.old.is_variant and self.new.is_variant:\r\n self.similarity = \"GAIN\" \r\n elif self.old.is_variant and not self.new.is_variant:\r\n self.similarity = \"LOSS\" \r\n\r\n else:\r\n self.similarity = \"SAME\"", "def _is_duplicate(a: str, b: str) -> bool:\n la = len(a)\n lb = len(b)\n diff = abs(la - lb)\n if diff > 50:\n return False\n denom = min(la, lb) + diff / 2\n ratio = levenshtein(a.casefold(), b.casefold()) / denom\n return ratio < 0.1", "def assert_not_equal(self, first, second, msg=\"\"):\r\n assert first != second", "def test_diff(self):\n self.assertEqual(self.RNA(\"UGCUGCUC\").diff(\"\"), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").diff(\"U\"), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").diff(\"UCCCCCUC\"), 3)\n # case-sensitive!\n self.assertEqual(self.RNA(\"AAAAA\").diff(\"CCCCC\"), 5)\n # raises TypeError if other not iterable\n self.assertRaises(TypeError, self.RNA(\"AAAAA\").diff, 5)", "def not_equal(self, skip):\n for word in self.two_words():\n if word.value != skip:\n return word", "def __ne__(self, line):\n \n return not self.__eq__(line)", "def test_not_equal(self):\n x = Point(\n lat=24.4,\n lng=23.1,\n author=self.u\n )\n self.assertFalse(self.a == x)\n self.assertTrue(self.a != x)", "def __ne__(self, other):\n if not isinstance(other, NotaryJournalMetaData):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if (self.timestamp != other.timestamp) and (self.hash != other.hash):\n return True\n\n else:\n return False", "def test_match_negative(self):\n self.paste.body = \"\"\n self.assertFalse(self.analyzer.match(self.paste))\n self.paste.body = None\n self.assertFalse(self.analyzer.match(self.paste))\n self.paste.body = \"https://www.google.com\"\n self.assertFalse(self.analyzer.match(self.paste))\n self.paste.body = \"123456789\"\n self.assertFalse(self.analyzer.match(self.paste))\n self.paste.body = \"123 456 789\"\n self.assertFalse(self.analyzer.match(self.paste))\n self.paste.body = \"+42\"\n self.assertFalse(self.analyzer.match(self.paste))\n self.paste.body = \"+42 1\"\n self.assertFalse(self.analyzer.match(self.paste))\n self.paste.body = \"+123456789012345678901234567890\"\n self.assertFalse(self.analyzer.match(self.paste))", "def test_lowquoteSanity(self):\n for s in stringSubjects:\n self.assertEqual(s, irc.lowDequote(irc.lowQuote(s)))", "def __ne__(self, other):\n if not isinstance(other, AuthenticityCheckResultItem):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_metrolyrics(self):\n bad_res = lw.get_lyrics('metrolyrics', 'eminem', 'los yourself')\n good_res = lw.get_lyrics('metrolyrics', 'eminem', 'lose yourself')\n self.assertEqual(bad_res, 404)\n self.assertTrue(good_res)", "def test_unwanted_words(self) -> None:\n pad_open: bool = False\n for word in self.report.get_words():\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n for u_word in self.rules.unwanted_words:\n if word.text == u_word[\"word\"]:\n self.add_error(\n f\"Ordet {word.text} är inte tillåtet, \"\n f\"använd {u_word['alternative']} istället.\",\n word=word,\n )\n break", "def test_get_lyrics_invalid_format(bot):\n assert get_lyrics('asdf', 1) == 'Invalid format!'", "def test_not_equal_on_not_equal_value(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_b, enums.OpaqueDataType.NONE)\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def _fake_message_compare(m1, m2):\r\n m1 = m1.serialize()\r\n m2 = m2.serialize()\r\n diff = False\r\n for i in range(len(m1)):\r\n if m1[i] is None:\r\n continue\r\n if m1[i] != m2[i]:\r\n diff = True\r\n break\r\n return not diff", "def testRichnessDifferent(self):\n self.assertNotEqual(self.coal.get_species_richness(1), self.coal2.get_species_richness(1))\n self.assertEqual(2621, self.coal.get_species_richness(1))", "def test_new_log_diff():\n assert get_clip(audlist, log, 1) != get_clip(audio['NTF'], log, 1)", "def test__same_text_correlation(self):\n \n _log.info('-'*80)\n \n # arrange \n text1 = \"love is rain as long story short\"\n text2 = text1\n\n dump_file = getInputFile(\"swiki_knowledge_output.xml\")\n parsed_file = getOutputFile(\"swiki_knowledge_output.parsed.xml\")\n #wdb_file = getOutputFile(\"swiki_knowledge_output.wdb\")\n\n articles = ['Rain', 'Love', 'Tree'] \n \n # act\n wn.make_dump(dump_file, articles, compress=False)\n wn.parse_dump(dump_file, parsed_file)\n db_wrapper = wn.build_database_wrapper(parsed_file, StopWordsStemmer([]))\n \n #self.addCleanup(os.remove, self.tmp_dump_file)\n \n comparer = SemanticComparer(db_wrapper)\n correlation = comparer.compare(text1, text2)\n _log.info(test_utils.get_texts_correlation_message(text1, text2, correlation))\n self.assertAlmostEqual(correlation, 1.0, msg=\"for same text correlation should be 1\")", "def verify_not_equal(self, first, second, msg=\"\"):\r\n try:\r\n self.assert_not_equal(first, second, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def validate_notes_input(notes):\n if len(notes) == 0:\n notes = 'None'\n clear()\n return notes", "def assertMultiLineEqual(self, first, second, msg=None):\r\n self.assert_(isinstance(first, basestring), (\r\n 'First argument is not a string'))\r\n self.assert_(isinstance(second, basestring), (\r\n 'Second argument is not a string'))\r\n\r\n if first != second:\r\n standardMsg = '%s != %s' % (safe_repr(first, True), safe_repr(second, True))\r\n diff = '\\n' + ''.join(difflib.ndiff(first.splitlines(True),\r\n second.splitlines(True)))\r\n standardMsg = self._truncateMessage(standardMsg, diff)\r\n self.fail(self._formatMessage(msg, standardMsg))", "def violated(self) -> bool:\n ...", "def CanDoIRDiff(old_lines, new_lines):\n total_chars = (sum(len(line) for line in old_lines) +\n sum(len(line) for line in new_lines))\n return total_chars <= MAX_TOTAL_LEN", "def test_primer_exceeds_mismatches(self):\r\n primers = ['AAAA', 'TTTT']\r\n exact = 'AAAA'\r\n mismatch_ok = 'AAAT'\r\n mismatch_bad = 'GGGG'\r\n self.assertEqual(primer_exceeds_mismatches(exact, primers, 0), False)\r\n self.assertEqual(primer_exceeds_mismatches(mismatch_ok, primers, 1),\r\n False)\r\n self.assertEqual(primer_exceeds_mismatches(mismatch_bad, primers, 2),\r\n True)", "def __ne__(self, other: 'LTL'):\n return not (self == other)", "def __ne__(self, other):\n if not isinstance(other, AddonReview):\n return True\n\n return self.to_dict() != other.to_dict()", "def test_cclerror_not_equal():\n e = pyccl.CCLError(\"blah\")\n e2 = pyccl.CCLError(\"blahh\")\n assert e is not e2\n assert e != e2\n assert hash(e) != hash(e2)", "def __eq__(self, other):\n\n if not other.isinstance(Note):\n return False\n\n return self.nbr and other.nbr", "def test_identical(self):\n write this test!", "def test_equal_on_type_mismatch(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = \"invalid\"\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def _check(self, old_lines, expected_new_lines, inline_namespace=True):\n old_text = '\\n'.join(old_lines) + '\\n'\n new_text = _rewrite_one_text(\n text=old_text, edit_include=self._edit_include.items(),\n inline_namespace=inline_namespace)\n expected_new_text = '\\n'.join(expected_new_lines) + '\\n'\n self.assertMultiLineEqual(expected_new_text, new_text)", "def strings_differ(string1, string2):\n if len(string1) != len(string2):\n return True\n invalid_bits = 0\n for a, b in zip(string1, string2):\n invalid_bits += a != b\n return invalid_bits != 0", "def isduplicate(a, b):\n db = bibtexparser.loads(a+'\\n'+b)\n e1, e2 = db.entries\n refs = Biblio()\n return refs.eq(e1, e2)", "def handle_same_length(self, a, b):\n found = False\n for i, j in zip(a, b):\n if i == j:\n continue\n elif found:\n return False # this case is the second found edit, thus return false\n else:\n found = True\n return True", "def song_has_lyrics():\n pass", "def test_unequal(self):\n\n qs = FBO(path=TEST_FILES_ROOT, glob='*.md').order_by('name')\n # There are four of these.\n for a, b in combinations(qs.all(), 2):\n self.assertNotEqual(a, b)", "def test_check_for_duplicates_with_duplicates(self):\n quotes = [api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author2\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author3\", \"Publication\", [\"tag1, tag2\"])]\n\n with self.assertRaisesRegexp(Exception, \"a duplicate quote was found on line 2 of 'stdin'. \"\n \"Quote: \\\"This is an added quote.\\\".\"):\n\n api._check_for_duplicates(quotes, \"stdin\")" ]
[ "0.6234901", "0.6063626", "0.6018202", "0.59463143", "0.57870716", "0.5715159", "0.57044494", "0.56992126", "0.5657637", "0.5643236", "0.5580238", "0.5577914", "0.5575537", "0.5558365", "0.5553078", "0.5549295", "0.5545337", "0.54852504", "0.54813206", "0.54713225", "0.54681325", "0.5460688", "0.54572344", "0.54562765", "0.54514766", "0.5439124", "0.5434291", "0.5431563", "0.54080796", "0.53780496", "0.5374515", "0.53565544", "0.53533053", "0.5333164", "0.53302675", "0.5323154", "0.5320862", "0.530536", "0.5305311", "0.52892166", "0.5282739", "0.52786946", "0.5278296", "0.52733445", "0.52712077", "0.527011", "0.5268209", "0.52644306", "0.5245586", "0.5245586", "0.5238989", "0.5233982", "0.5233463", "0.52307427", "0.52256703", "0.5222441", "0.5215636", "0.520224", "0.5200436", "0.5183732", "0.5178792", "0.5165076", "0.5164", "0.5159004", "0.51564324", "0.5154526", "0.5153329", "0.5152784", "0.5149201", "0.51417464", "0.5139763", "0.51395833", "0.51350397", "0.5131476", "0.5129344", "0.5128818", "0.51258934", "0.5122199", "0.51208717", "0.51183355", "0.5114807", "0.50989264", "0.50977093", "0.50963175", "0.50961715", "0.5091121", "0.50897026", "0.50890917", "0.508691", "0.50786424", "0.50768334", "0.5072391", "0.50633746", "0.5058248", "0.5057462", "0.5055055", "0.5054422", "0.5050985", "0.5049083", "0.50483745" ]
0.82412505
0
Checks if the lyrics has been approved or not
def is_lyrics_approved():
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def approve_lyrics():\n pass", "def song_has_lyrics():\n pass", "def is_approved(self) -> bool:\n return self.state == Order.OrderState.APPROVED.choice_value", "def approve_tweet(worker_responses):\n approvals = [len(get_tweet_text(response)) > 0 for response in worker_responses]\n return approvals", "def is_approved(self):\n return self.moderator_state in (Page.MODERATOR_APPROVED, Page.MODERATOR_APPROVED_WAITING_FOR_PARENTS)", "def is_complete(self):\n return all([\n len(strip_tags(score.notes)) > 0 for score in self.scores.all()\n ])", "def pops_agree(x):\n return len(x.all_open_closed) == 1", "def need_attention(self):\n msg = [\"not staged\", \"behind\", \"ahead\", \"Untracked\"]\n status_msg = self.status()\n if any([each in status_msg for each in msg]):\n return True\n return False", "def Confirm(self):\n self.PrintMetadata()\n answer = input(\"Continue [Y/n]? \").lower()\n return not answer.startswith(\"n\")", "def approves(self):\n # verify trailing stop-loss threshold has been met\n thresholdMet = self.analysis.trailing_percentage >= constants.PERCENT_TRAILING_CLOSE_THRESHOLD\n\n # verify price has reverted back to the mean\n if self.analysis.initial_order_type == \"buy\":\n meanReverted = self.analysis.current_price >= self.analysis.current_volume_weighted_average_price\n else:\n meanReverted = self.analysis.current_price <= self.analysis.current_volume_weighted_average_price\n\n # return approval\n _approval = thresholdMet or meanReverted\n if _approval:\n self.logger.log(self.analysis.__dict__)\n self.logger.log(\"%s close approved!\" % self.ticker)\n return _approval", "def ConfirmAllowedCopyrightHolder(holder):\n return holder in ALLOWED_COPYRIGHT_HOLDERS", "def _check_required(self):\n if self.data['history_file'] is None:\n return\n required = self.data.get('required_changelog_text')\n if not required:\n return\n if isinstance(required, six.string_types):\n required = [required]\n history_last_release = self.data['history_last_release']\n for text in required:\n if text in history_last_release:\n # Found it, all is fine.\n return\n pretty_required = '\"{}\"'.format('\", \"'.join(required))\n if not utils.ask(\n \"WARNING: Changelog should contain at least one of \"\n \"these required strings: {}. Are you sure you \"\n \"want to release?\".format(pretty_required),\n default=False):\n sys.exit(1)", "async def status(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if adv.is_on_adventure(ctx.author.id):\n out = adv.print_adventure(ctx.author.id)\n else:\n out = 'You are not doing anything at the moment.'\n await ctx.send(out)", "def customer_wants_condiments(self):\n answer = raw_input(\"Would you like Lemon? (y/n)\").lower()\n if answer.startswith('y'):\n return True\n else:\n return False", "def approved(self) -> bool:\n return all(d.approved for d in self.affected_directories)", "def _check_for_license_acceptance(self, dep):\n if \"license\" in self.dependency_dict[dep]:\n license_name = self.dependency_dict[dep][\"license\"]\n else:\n license_name = \"restrictive\"\n if \"license_file\" in self.dependency_dict[dep]:\n license_text = Path(\n self.dependency_dict[dep][\"license_file\"]\n ).read_text()\n logger.warning(license_text)\n while \"invalid answer\":\n reply = (\n str(\n input(\n f\"Do you accept this {license_name} license? (y/n): \"\n )\n )\n .lower()\n .strip()\n )\n if len(reply) > 0:\n if reply[0] == \"y\":\n return True\n if reply[0] == \"n\":\n return False", "def approve(self):\n self._check_if_open()\n data = {\"approved\": True}\n return self.post(\"approve\", data)", "def is_retweet(self, strict=True):\n if self.tweet.get('retweeted_status', False):\n return True\n if not strict:\n text_lower = self.tweet['text'].lower()\n if text_lower.startswith('rt '):\n return True\n if ' rt ' in text_lower:\n if not 'please rt' in text_lower \\\n and not 'pls rt' in text_lower \\\n and not 'plz rt' in text_lower:\n return True\n return False", "def test_single_aclhook_true(self):\n self._test_hook_approval_sequence([True], True)", "def hod_approve(self):\n print \"HOD approved this form. Current state:\", self.state", "def jao_approve(self):\n print \"JAO approved this form. Current state:\", self.state", "def is_valid(self):\n return (self.time is not None\n and self.author is not None\n and self.content is not None)", "def seesSuggestions(self):\n return self.floor.owner == self.user and self.floor.permissiveness == \"permissive\"", "def is_eligible(self, myself):\n if self.author().screen_name == myself.screen_name:\n log_.debug(\"Not replying to my own tweets\")\n return False\n if self.is_retweet():\n log_.debug(\"Not processing pure retweets\")\n return False\n return True", "def check():\n\t\t# This forces user to set dirs before running the app for first time.\n\t\tif len(Config.lyrics_dir) == 0:\n\t\t\t# see which directory in not set and raise BadConfigError with that as value\n\t\t\tprint('lyrics_dir is not set.')\n\t\t\tprint('Please use the \"set\" command to set lyrics_dir.')\n\t\t\tprint('use \"lyrico --help\" to view commands.')\n\t\t\treturn False\n\n\t\tif len(Config.source_dir) == 0:\n\t\t\t# see which directory in not set and raise BadConfigError with that as value\n\t\t\tprint('source_dir is not set.')\n\t\t\tprint('Please use the \"set\" command to set source_dir or pass it as parameter.')\n\t\t\tprint('use \"lyrico --help\" to view commands.')\n\t\t\treturn False\n\n\t\t# if user disable both saving mode. Notify & force user to correct on next run.\n\t\tif not Config.save_to_file and not Config.save_to_tag:\n\t\t\tprint('Both \"save_to_file\" and \"save_to_tag\" modes are disabled. Please enable one.')\n\t\t\tprint('use \"lyrico --help\" to view commands.')\n\t\t\treturn False\n\n\t\t# if user disables all sources. Notify & force user to enable one.\n\t\tif (not Config.lyric_wikia\n\t\t and not Config.az_lyrics\n\t\t and not Config.musix_match\n\t\t and not Config.lyricsmode):\n\t\t\tprint('All lyrics sources are disabled. Please enable one.')\n\t\t\tprint('use \"lyrico --help\" to view commands.')\n\t\t\treturn False\n\t\treturn True", "def ok(self):\n return self['webok'] == 'OK'", "def need_admin_approval(self):\n return self._need_admin_approval", "def change_availability():\n artwork_sold = get_artwork_name()\n if not controls_utils.artwork_exists(artwork_sold):\n print('No record of that piece of art. ')\n else:\n artist = controls_utils.name_of_artist(artwork_sold)\n if not controls_utils.artwork_available(artwork_sold, artist):\n print('Sorry that piece has already been sold. ')\n else:\n response = input('Mark ' + artwork_sold + ' as sold? Y or N ')\n if response.upper() == 'Y':\n mark_as_sold(artwork_sold)\n while not controls_utils.response_affirmative(response):\n response = input('Are you sure you want to mark '\n + artwork_sold + ' by ' + artist + ' as sold? Y or N or press X to escape ')\n if response.upper() == 'X':\n break\n elif response.upper() == 'N':\n break", "def asking(self):\n return 'Sure.'", "def check(self, description: Description) -> bool:", "def approve(self):\n self.approved = True\n self.quest_node['approved'] = True\n graph.push(self.quest_node)\n self.payout()", "async def verify_agree(self, ctx: commands.Context):\n author = ctx.author\n joined_at = author.joined_at\n member_joined, since_joined = (\n author.joined_at.strftime(\"%d %b %Y %H:%M\"),\n (ctx.message.created_at - joined_at).days,\n )\n member_created, since_created = (\n author.created_at.strftime(\"%d %b %Y %H:%M\"),\n (ctx.message.created_at - author.created_at).days,\n )\n created_on = \"{}\\n({} days ago)\".format(member_created, since_created)\n joined_on = \"{}\\n({} days ago)\".format(member_joined, since_joined)\n author_avatar = author.avatar_url_as(static_format=\"png\")\n\n data = await self.config.guild(ctx.guild).all()\n log_config = data[\"logs\"]\n\n if not data[\"temprole\"] and not data[\"autoroles\"]:\n await ctx.send(\n (\n \"Sorry, there is no role configuration set. Please contact the moderation \"\n \"team of this server.\"\n ),\n delete_after=60,\n )\n self.log.warning(\"No role set. Unable to process verification.\")\n return\n\n try:\n result = await self._handle_role(author)\n await ctx.message.delete()\n except discord.Forbidden:\n await ctx.send(\n \"Error: I am unable to remove your role, please contact the moderation team.\"\n )\n return self.log.warning(\"Error: No permissions to remove roles.\")\n except discord.HTTPException as e:\n return self.log.warning(\"HTTPException: {} - {}\".format(e.status, e.code))\n if log_config is not None:\n embed = discord.Embed(color=discord.Color.green())\n embed.title = \"{}#{} - Verified\".format(author.name, author.discriminator)\n embed.set_thumbnail(url=author_avatar)\n embed.set_footer(text=\"User ID: {}\".format(author.id))\n embed.add_field(name=\"Account Creation:\", value=created_on, inline=True)\n embed.add_field(name=\"Joined Date:\", value=joined_on, inline=True)\n embed.add_field(name=\"Status:\", value=result[1], inline=True)\n try:\n await ctx.bot.get_channel(log_config).send(embed=embed)\n except discord.Forbidden:\n return self.log.warning(\n \"Error: Unable to send log message to {}\".format(\n ctx.bot.get_channel(log_config)\n )\n )\n except discord.HTTPException as e:\n return self.log.warning(\"HTTPException: {} - {}\".format(e.status, e.code))", "def ok(cls):\n return cls.autograder_format == \"ok\"", "def is_correctness_available_for_response(self, response):\n return True", "def is_valid(self):\n logger.info('MORTECH-SCENARIO: EXISTS %s, LENDERS %s',\n self.instance.rate_quote_requests.exists(),\n self.instance.rate_quote_requests.first().rate_quote_lenders.exists())\n return (\n self.instance.rate_quote_requests.exists() and\n self.instance.rate_quote_requests.first().rate_quote_lenders.exists() and\n self.instance.ownership_time\n )", "def verify(self):\r\n self.title = self.title and self.title or '' \r\n self.descr = self.descr and self.descr or '' \r\n self.link = self.link and self.link or ''\r\n self.channelURL = self.channelURL and self.channelURL or ''", "def has_talk(self):\n if self.applicant.talks.filter(Q(status=SUBMITTED) |\n Q(status=UNDER_CONSIDERATION) |\n Q(status=PROVISIONAL) |\n Q(status=ACCEPTED)):\n return True\n return False", "def succeeded(self):\n return self.current_reward == 300", "def ask_dirty(self):\n if not self.unsaved: return True\n if QtWidgets.QMessageBox.question(self, \"Are you sure\", \"There are unsaved changes in the file you sure\",\n\t\t\tQtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No) != QtWidgets.QMessageBox.Yes:\n return False\n return True", "def check(self, roommate_instance):\n if self.status == Item.PROCESSING_CODE and self.check_who == roommate_instance:\n self.status = Item.CHECKED_CODE\n else:\n raise PermissionDenied", "def can_modify_answers(self):\n if self.status not in (Order.STATUS_PENDING, Order.STATUS_PAID, Order.STATUS_EXPIRED):\n return False\n modify_deadline = self.event.settings.get('last_order_modification_date', as_type=datetime)\n if modify_deadline is not None and now() > modify_deadline:\n return False\n ask_names = self.event.settings.get('attendee_names_asked', as_type=bool)\n for cp in self.positions.all().prefetch_related('item__questions'):\n if (cp.item.admission and ask_names) or cp.item.questions.all():\n return True\n return False # nothing there to modify", "def check_status(self):", "def is_accepting(self):\n return (self.position == 1) and (self.lhs.content == LANGUAGE)", "def test_accepted(self):\n actions = signoff_actions(appversions={\"code\": \"fx1.0\"},\n locales={\"code\": \"de\"})\n actions = list(actions)\n eq_(len(actions), 1)\n so = Signoff.objects.get(action=actions[0][0])\n eq_(so.push.tip.shortrev, \"l10n de 0002\")\n eq_(so.locale.code, \"de\")\n eq_(so.action_set.count(), 2)", "def approved(message):\n hf.query_users(message, hf.get_users(), \"approved\")", "def vote_result(self) -> bool:\n token_score = self.create_interface_score(self._token_score.get(), TokenInterface)\n yes = 0\n no = 0\n for address in self._voted:\n vote = self._vote[str(address)]\n if vote == 'yes':\n yes += token_score.balanceOf(address)\n else:\n no += token_score.balanceOf(address)\n self._yes_votes.set(yes)\n self._no_votes.set(no)\n if self._yes_votes.get() > (token_score.totalSupply() - token_score.balanceOf(self._rewards_score.get())) // 2:\n return True\n else:\n return False", "def how_eligible(essay):\n eligibility_requirements = ['?', '\"', ',', '!']\n return len(set(filter((lambda x: x in eligibility_requirements), essay)))", "def answer_available(self):\r\n if self.showanswer == '':\r\n return False\r\n elif self.showanswer == \"never\":\r\n return False\r\n elif self.runtime.user_is_staff:\r\n # This is after the 'never' check because admins can see the answer\r\n # unless the problem explicitly prevents it\r\n return True\r\n elif self.showanswer == 'attempted':\r\n return self.attempts > 0\r\n elif self.showanswer == 'answered':\r\n # NOTE: this is slightly different from 'attempted' -- resetting the problems\r\n # makes lcp.done False, but leaves attempts unchanged.\r\n return self.lcp.done\r\n elif self.showanswer == 'closed':\r\n return self.closed()\r\n elif self.showanswer == 'finished':\r\n return self.closed() or self.is_correct()\r\n\r\n elif self.showanswer == 'past_due':\r\n return self.is_past_due()\r\n elif self.showanswer == 'always':\r\n return True\r\n\r\n return False", "def get_approved(self):\n return self.filter(verified=True, blacklisted=False,\n flags__lte=ExamFlag.LIMIT)", "async def is_bear(ctx):\n return ctx.message.author.id == 353730886577160203 or ctx.message.author.id == 715048392408956950", "def verify_if_keywords_updated_message_is_displayed(self):\n self._basket.verify_if_keywords_updated_message_is_displayed()", "def ask_allow_purchase() -> bool:\n allow_purchase_str: str = ask_user_input(\"\\t\\t\\tAllow purchase: [Y/n] \")\n return allow_purchase_str.lower() == \"y\" or allow_purchase_str == \"\"", "def is_valid(self) -> bool:\n return all(\n (\n not self.author,\n self.unit,\n )\n )", "def check_forbidden_words(song: Song, result: Result) -> Tuple[bool, List[str]]:\n\n song_name = slugify(song.name).replace(\"-\", \"\")\n to_check = slugify(result.name).replace(\"-\", \"\")\n\n words = []\n for word in FORBIDDEN_WORDS:\n if word in to_check and word not in song_name:\n words.append(word)\n\n return len(words) > 0, words", "def validate_presence_(self):\n WebDriverWait(self.browser.driver, 60).until(EC.visibility_of_element_located((By.ID, values.reviews)))\n reviews = self.browser.driver.find_element_by_id(values.reviews)\n try:\n assert values.ratings in reviews.text\n print('\\nText is Present on The Page.\\n')\n except:\n raise AssertionError(\"Error! Please Reload Page\")", "def should_ask_if_examiner_want_to_give_another_chance(self):\n if self.assignment.is_electronic:\n return (self.delivery_status == \"corrected\" and not self.feedback.is_passing_grade) \\\n or self.delivery_status == 'closed-without-feedback'\n else:\n return False", "def mark_completed(self,status):\r\n if status == \"r\":\r\n self.status = \"c\"#It is to test the mark complete function in the test_book.py, otherwise this program works fine in the main.py\r\n return True\r\n elif status == \"c\":\r\n return False", "def can_approve(self, user, **data):\n raise Return(False)", "def isOn(self):\r\n return len(self.__agenda)>2", "def confirmed(self):", "def dr_approve(self):\n print \"DR approved this form. Current state:\", self.state", "def should_auto_approve(self):\r\n if self.group and self.group.allow_auto_approval:\r\n return True\r\n\r\n # some orders (like those duplicated by CIT) will not have owners\r\n if self.is_multilevel_approval():\r\n if self.has_all_approver_roles(self.owner, self.group):\r\n return True\r\n return False\r\n\r\n else:\r\n if self.owner and self.owner.has_permission('order.approve', self.group):\r\n return True\r\n\r\n return False", "def is_ready_for_website(self):\n return self.title != \"\" and self.poster_image_url != \"\" and self.trailer_youtube_url != \"\"", "def _is_in_stock(cls, resp_body: str) -> bool:\n raise NotImplementedError", "def cheer(self, songs):\n if self.favourite_song in songs:\n return \"Whoo!\"", "def test_is_revoked(self):\n self.assertEqual(self.project.is_revoked(), False)", "def check_video_pruning(self, artist, name, title):\n\n\t\tweeders = ['cover','live','vevo','remix']\t\t\t# words that we want to ignore in our video search\n\t\tname_contains_weed_word = any(weed_word in name.lower() for weed_word in weeders) \n\t\tartist_cointains_weed_word = any(weed_word in artist.lower() for weed_word in weeders)\n\t\tvideo_title_contains_weed_word = any(weed_word in title.lower() for weed_word in weeders)\n\n\t\t# ensure that the artist or track name does not actually include the weeders Ex. live house\n\t\tif video_title_contains_weed_word and (name_contains_weed_word is False and artist_cointains_weed_word is False):\n\t\t\tret_val = True\n\t\telse:\n\t\t\tret_val = False\n\n\n\n\t\t# check duration of song\n\n\t\treturn ret_val", "def begin_present_are_quiz():\n\n print(\"Get ready for a quiz.\\nInstructions: You'll be shown a verb and a pronoun. Conjugate it in the present tense\"\n \".\\n\")\n verb_good = False\n verb = \"\"\n pronoun = \"\"\n go_again = True\n while go_again:\n while not verb_good:\n verb = random.choice(are_verb_options)\n checker = functions.verb_ending_good(verb)\n if checker is True:\n verb_good = True\n pronoun_good = False\n while not pronoun_good:\n pronoun = random.choice(pronouns)\n if pronoun in pronouns:\n pronoun_good = True\n ending = functions.verb_ending(verb)\n if ending == \"are\":\n answer = are_present_quiz(verb, pronoun)\n checker = input(f'Tense: Presente \\nVerb: {verb} \\nPronoun: {pronoun}...\\n')\n if answer == checker:\n print(\"Correct!\")\n else:\n print(f'Incorrect.\\nCorrect answer is {answer}')\n another = input(\"Go again? y/n\\n\").lower()\n if another != \"y\":\n go_again = False\n print(\"Quiz over.\")\n else:\n verb_good = False", "def check_status_book_students() -> None:\r\n print(f\"Returning back requests : {global_req['back']}\")\r\n print(f\"Applying for new book's requests: {global_req['new_req']}\")\r\n print(\"Format [(Student NAME, Book NAME), (Student NAME, Book NAME)]\")", "def is_accepting(self):\n for item_id, lookahead in self.id_to_lookahead.items():\n if lookahead.includesEndOfText():\n item = self.id_to_item[item_id]\n if item.is_accepting():\n return True\n return False", "def ready(self):\n return self.snippets is not None", "def status_check(self):\n from coordinator.tasks import cancel_release\n # Check if we hit the time limit\n last_update = self.events.order_by('-created_at')\\\n .first().created_at\n diff = datetime.datetime.utcnow() - last_update.replace(tzinfo=None)\n\n if diff.total_seconds() > settings.RELEASE_TIMEOUT:\n if self.state == 'canceling':\n return\n logger.error(f'canceling release {self.kf_id} for time out.')\n self.cancel()\n self.save()\n django_rq.enqueue(cancel_release, self.kf_id)\n return\n\n # Check if any contained tasks have failed/canceled\n for task in self.tasks.all():\n if task.state in ['failed', 'canceled', 'rejected']:\n if self.state == 'canceling':\n return\n logger.error(f'canceling release: {self.kf_id} task is ' +\n f'{task.state}')\n self.cancel()\n self.save()\n django_rq.enqueue(cancel_release, self.kf_id)\n return", "def check_kb_status():\n result = minisat(agent.kb.clauses)\n if result:\n print \"Agent KB is satisfiable\"\n else:\n print \"Agent KB is NOT satisfiable!! There is contradiction that needs fixing!\"", "def action_approve(self):\n if not self.date_approve:\n self.date_approve = fields.Datetime.now()\n\n config = self.env['ka_hr_payroll.config'].default_config()\n if check_rapel_status(self, config):\n self.action_rapel()\n else:\n self.action_done()", "def is_proved(self):\n return len(self.proofs) > 0", "def is_auto_approval_allowed(self):\n\n if not self.upload:\n raise ImproperlyConfigured(\n 'Need an upload to call is_auto_approval_allowed()'\n )\n\n return self._is_action_allowed('auto_approval')", "def not_complete(request):\n print(\"not_complete method in tutor_helper.py\")\n if user_auth(request):\n user = User.objects.get(email=request.user.email)\n print(\"\\t\", user)\n current_user = UserInformation.objects.get(user=user)\n if current_user.current_main_set is None:\n return False\n if current_user.completed_sets is not None:\n if current_user.current_main_set not in current_user.completed_sets.all():\n print(\"not complete\")\n print(current_user.current_main_set)\n return True\n else:\n if current_user.completed_sets is None:\n return True\n return False", "def are_ere_future_quiz(verb, pronoun):\n return functions.conjugate_future_are_ere_verb(verb, pronoun, \"futuro\")", "def change_learned_status(self, instance):\n self.song = self.songs.get_song_by_title(instance.text)\n # Marks song as learned and shows according status text\n if self.song.required:\n self.song.mark_learned()\n status_text = \"You have learned {}\".format(self.song.title)\n # Marks song as required and shows according status text\n else:\n self.song.mark_required()\n status_text = \"You need to learn {}\".format(self.song.title)\n # Shows status text, sorts songs by current s\n self.root.ids.status_text.text = status_text\n self.sort_songs(self.root.ids.sort_options.text)", "def test_approve_agreement(self):\n pass", "def are_present_quiz(verb, pronoun):\n\n return functions.conjugate_present_are_verb(verb, pronoun, \"presente\")", "def ready(self):\n if self.status == self.STATUS_NEED_FORCED:\n return True\n elif self.airdate and self.status in (self.STATUS_NEED, self.STATUS_NONE):\n return self.aired and not self.obsolete and self.season.number != 0\n else:\n return False", "async def appcheck(self, ctx: commands.Context, user_id: discord.Member):\n return await ctx.send(\n \"This command is currently being reworked, follow updates in The Kompound\"\n )", "def is_wcw(status):\n test_text = ' '.join(status['text'].lower().split()) # Remove capital letters and excessive whitespace/linebreaks\n usernames = ['just_to_say_bot', 'thisisjustbot', 'Dcd200S', 'willslostplum', 'sosweetbot', 'JustToSayBot', 'thatisjustplums', \\\n\t\t 'EatenBot', 'the_niche_bot', 'KristenCostel10', 'litabottal', 'pythonnina', 'alatest5', 'LisaRob96585017','Stilson28400122', \\\n\t\t 'JohnDun40217560','Cordelia28', 'Rick63556459', 'botsnthings', 'timbot301', 'Rachel53001595', 'NicholasMillma6', 'ThisIsJustTo1'\\\n\t\t'MayISay4', 'breakfast_plum', 'BotBot53368932'] # Block screen_names of known parody accounts\n if status['user']['screen_name'] not in usernames and all(u not in status['text'] for u in usernames) and 'Cheap Bots, Done Quick!' not in status['source']:\n if 'which you were probably' in test_text: # Capture parodies of the form\n return True\n elif 'plums' in test_text and 'icebox' in test_text: # Capture parodies of the content\n return True\n elif 'plum' in test_text and 'icebox' in test_text: # Capture singular 'plum'\n return True\n elif 'plums' in test_text and 'ice box' in test_text: #Capture 'ice box' with a space\n return True\n elif 'plum' in test_text and 'ice box' in test_text: \n return True\n elif 'William Carlos Williams'.lower() in test_text and 'plums' in test_text: #Capture mentions of WCW\n return True\n elif 'William Carlos Williams'.lower() in test_text and 'plum' in test_text:\n return True\n elif 'this is just to say' in test_text and 'that were in' in test_text: # Get only relevant instances of \"this is just to say\"\n return True\n elif 'this is just to say' in test_text and 'forgive me' in test_text:\n return True\n elif 'this is just to say' in test_text and 'and so' in test_text:\n return True\n elif 'so sweet and so cold' in test_text and 'the arms of the ocean' not in test_text: # Get 'so sweet and so cold' tweets that aren't quoting Florence and the Machine\n return True\n else:\n return False\n else:\n return False", "def ParseYesNo(src):\n return src.strip().lower() == u'yes'", "def testValidDescriptions(self):\n self.assertTrue(self.app._ignore_jobs(\"\"))\n self.assertTrue(self.app._ignore_jobs(\"This is valid\"))\n self.assertTrue(self.app._ignore_jobs(\"you can telecommute\"))", "def yncheck(msg, *args):\r\n if msg.content.lower() in 'y':\r\n return (True)\r\n elif msg.content.lower() in 'n':\r\n if args:\r\n return (False)\r\n return (True)", "def is_candidate(line):\n line = line.lower()\n line = prepare_text_line(line)\n return (has_content(line) and any(s in line for s in copyrights_hint.statement_markers))", "def approve_latest(cls):\n cls.latest_vintage.validate()", "def refundable(self):\r\n course_mode = CourseMode.mode_for_course(self.course_id, 'verified')\r\n if course_mode is None:\r\n return False\r\n else:\r\n return True", "def unverifiable(self, resp):\n return any(a in str(resp).lower() for a in UNVERIFIABLE_KEYWORDS)", "def test_apply_corporate_approval(self):\n p = self.make('Prescription')\n self.set_cbas_attributes(p)\n p.planning_status = p.PLANNING_SUBMITTED\n p.save()\n\n url = reverse('admin:prescription_prescription_corporate_approve',\n args=(str(p.id),))\n self.client.login(username='fmsb', password='test')\n response = self.client.post(url, {}, follow=True)\n self.assertEqual(response.status_code, 200)\n\n p = Prescription.objects.get(name='test')\n self.assertTrue(p.planning_status == p.PLANNING_APPROVED)\n self.assertTrue(p.planning_status_modified is not None)", "def game_on(self):\n doc = self.documentation\n return (self.draw.accepted or doc[len(doc)-1].accepted) and (self.board.stones_set < self.board.max_nr_stones) and (self.board.score[opponent(self.draw.player)] > 0)", "def ready_check(self,prompt=True):\n\t\tif len([e for e in self.elements if e.status=='pending' and e.pool != 'unknown']) == 0:\n\t\t\tmessage = \"Nothing to archive.\"\n\t\t\t#print \" \u001b[42mNotice:\u001b[m %s\" % message\n\t\t\traise DLANothingToArchive,message\n\t\t# prompt before beginning what could potentially\n\t\t# be a very long archive\n\t\tif prompt:\n\t\t\tri = raw_input(\"\\n Press [enter] to start the archive.\")\n\t\t\tif ri != \"\":\n\t\t\t\tmessage = \"Aborting archive.\"\n\t\t\t\traise Exception,message\n\t\treturn True", "def check_items(self, items):\n for item in items:\n if item['user'] in self.required_list and item['user'] not in self.approved_required_list:\n if item['result'] is True or item['result'] == 'approved':\n logger.info(\"%s approved\", item['user'])\n self.approved_required_list.append(item['user'])\n else:\n logger.info(\"%s declined\", item['user'])\n error_msg = \"Since {} is a required member for approval, this approval step fails. \".format(item['user'])\n logger.error(error_msg)\n self.exit(rc=2, detail=error_msg)\n if item['user'] in self.optional_list and item['user'] not in self.approved_optional_list:\n if item['result'] is True or item['result'] == 'approved':\n logger.info(\"%s approved\", item['user'])\n if item['user'] in self.declined_optional_list:\n self.declined_optional_list.remove(item['user'])\n self.approved_optional_list.append(item['user'])\n else:\n logger.info(\"%s declined\", item['user'])\n self.declined_optional_list.append(item['user'])\n if len(self.declined_optional_list) >= (len(self.optional_list) - self.number_optional):\n error_msg = \"Not be able to fulfill requirement that {} optional approvals, since {} declined request.\".format(\n self.number_optional, self.declined_optional_list)\n logger.error(error_msg)\n self.exit(rc=2, detail=error_msg)\n\n if len(self.approved_required_list) >= len(self.required_list) \\\n and len(self.approved_optional_list) >= self.number_optional:\n logger.info(\"Approval requirements are fully met. Exit gracefully.\")\n self.exit(0)", "def can_vote(self):\n now = timezone.now()\n return self.pub_date <= now <= self.end_date", "def is_revoked(self, token: str) -> bool:\n return token in self.revoked_tokens", "async def auto(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'Still working on integration with the election results. Maybe have a command to link to an elections '\n 'database?')\n else:\n await ctx.message.channel.send('Hey! You do not have permission to do that.')", "def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True", "def ok(self):\n return self.res.ok and not self.rejected" ]
[ "0.7582891", "0.6132226", "0.5762543", "0.5721694", "0.56735605", "0.5672874", "0.5540766", "0.5498608", "0.54710966", "0.5407162", "0.5385652", "0.53676486", "0.53605825", "0.5347708", "0.53281146", "0.5326186", "0.53110224", "0.5303602", "0.5287871", "0.5286905", "0.5272448", "0.52441275", "0.52437484", "0.52387387", "0.523041", "0.52121", "0.52111715", "0.5198329", "0.5184507", "0.51838124", "0.5183557", "0.51830727", "0.51801217", "0.5169447", "0.5166324", "0.5145822", "0.51411116", "0.5135239", "0.51272744", "0.5123851", "0.5123244", "0.5121973", "0.5119023", "0.5118825", "0.5110145", "0.510035", "0.5100262", "0.5098946", "0.5093329", "0.50791854", "0.5078226", "0.5061596", "0.5060321", "0.50596315", "0.5059239", "0.5042149", "0.5040095", "0.5039464", "0.5038272", "0.50313467", "0.5031153", "0.5030253", "0.50299245", "0.5028348", "0.5020321", "0.5013591", "0.50014985", "0.5000413", "0.49998882", "0.49980277", "0.49971282", "0.4994479", "0.49854073", "0.49823454", "0.49781194", "0.4973124", "0.49685782", "0.49673924", "0.4963074", "0.4962219", "0.4959721", "0.49547", "0.49517247", "0.4951041", "0.49509898", "0.4950246", "0.4949994", "0.49478886", "0.4942883", "0.49403995", "0.4940294", "0.49378854", "0.49352664", "0.49335492", "0.4931606", "0.4928505", "0.49284774", "0.49274734", "0.49272686", "0.49229392" ]
0.8734696
0
r"""Calculate the cold plasma dispersion surfaces according to equation 2.64 in Plasma Waves by Swanson (2nd ed.)
def disp_surf_calc(kc_x_max, kc_z_max, m_i, wp_e): # Make vectors of the wave numbers kc_z = np.linspace(1e-6, kc_z_max, 35) kc_x = np.linspace(1e-6, kc_x_max, 35) # Turn those vectors into matrices kc_x_mat, kc_z_mat = np.meshgrid(kc_x, kc_z) # Find some of the numbers that appear later in the calculations kc_ = np.sqrt(kc_x_mat ** 2 + kc_z_mat ** 2) # Absolute value of k theta_ = np.arctan2(kc_x_mat, kc_z_mat) # The angle between k and B wc_i = 1 / m_i # The ion gyro frequency wp_i = wp_e / np.sqrt(m_i) # The ion plasma frequency wp_ = np.sqrt(wp_e ** 2 + wp_i ** 2) # The total plasma frequency # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # For every k_perp and k_par, turn the dispersion relation into a # polynomial equation and solve it. # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # The polynomial coefficients are calculated pol_koeff_8 = -2 * kc_ ** 2 pol_koeff_8 -= (1 + wc_i ** 2 + 3 * wp_ ** 2) * np.ones(kc_.shape) pol_koeff_6 = (2 * kc_ ** 2 + wp_ ** 2) * (1 + wc_i ** 2 + 2 * wp_ ** 2) pol_koeff_6 += kc_ ** 4 + (wp_ ** 2 + wc_i) ** 2 pol_koeff_4 = -kc_ ** 4 * (1 + wc_i ** 2 + wp_ ** 2) pol_koeff_4 -= 2 * kc_ ** 2 * (wp_ ** 2 + wc_i) ** 2 pol_koeff_4 -= (kc_ * wp_) ** 2 * (1 + wc_i ** 2 - wc_i) * ( 1 + np.cos(theta_) ** 2) pol_koeff_4 -= wp_ ** 2 * (wp_ ** 2 + wc_i) ** 2 pol_koeff_2 = kc_ ** 4 * (wp_ ** 2 * (1 + wc_i ** 2 - wc_i) * np.cos( theta_) ** 2 + wc_i * (wp_ ** 2 + wc_i)) pol_koeff_2 += kc_ ** 2 * wp_ ** 2 * wc_i * (wp_ ** 2 + wc_i) * ( 1 + np.cos(theta_) ** 2) pol_koeff_0 = -kc_ ** 4 * wc_i ** 2 * wp_ ** 2 * np.cos(theta_) ** 2 w_final = np.zeros((10, len(kc_z), len(kc_x))) # For each k, solve the equation for k_z, k_x in itertools.product(range(len(kc_z)), range(len(kc_x))): disp_polynomial = [1, 0, pol_koeff_8[k_z, k_x], 0, pol_koeff_6[k_z, k_x], 0, pol_koeff_4[k_z, k_x], 0, pol_koeff_2[k_z, k_x], 0, pol_koeff_0[k_z, k_x]] # theoretically should be real (A. Tjulin) w_temp = np.real(np.roots(disp_polynomial)) # We need to sort the answers to get nice surfaces. w_final[:, k_z, k_x] = np.sort(w_temp) n2_ = kc_ ** 2 / w_final ** 2 v_ph_c = np.sqrt(1. / n2_) va_c = 1 / (wp_e * np.sqrt(m_i)) v_ph_va = v_ph_c / va_c diel_tensor = _calc_diel(kc_, w_final, theta_, wp_e, wp_i, wc_i) e_x, e_y, e_z, e_per, e_tot, e_pol = _calc_e(diel_tensor) e_par = (kc_x_mat * e_x + kc_z_mat * e_z) / kc_ b_x, b_y, b_z, b_par, b_per, b_pol, b_tot = _calc_b(kc_x_mat, kc_z_mat, w_final, e_x, e_y, e_z) dk_x, dk_z = [kc_x_mat[1], kc_z_mat[1]] dw_x, dw_z = [np.zeros(w_final.shape) for _ in range(2)] dw_x[:, :, 1:] = np.diff(w_final, axis=2) dw_z[:, 1:, :] = np.diff(w_final, axis=1) v_x, v_z = [dw_ / dk for dw_, dk in zip([dw_x, dw_z], [dk_x, dk_z])] s_par, s_tot = _calc_s(e_x, e_y, e_z, b_x, b_y, b_z) # Compute ion and electron velocities v_ex, v_ey, v_ez, v_ix, v_iy, v_iz = _calc_vei(m_i, wc_i, w_final, e_x, e_y, e_z) # Ratio of parallel and perpendicular to B speed vepar_perp = v_ez * np.conj(v_ez) vepar_perp /= (v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey)) vipar_perp = v_iz * np.conj(v_iz) vipar_perp /= (v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy)) # Total particle speeds v_e2 = v_ex * np.conj(v_ex) + v_ey * np.conj(v_ey) + v_ez * np.conj(v_ez) v_i2 = v_ix * np.conj(v_ix) + v_iy * np.conj(v_iy) + v_iz * np.conj(v_iz) # Ion and electron energies m_e = -1 en_e = 0.5 * m_e * v_e2 en_i = 0.5 * m_i * v_i2 # Ratio of particle and field energy densities ratio_part_field = _calc_part2fields(wp_e, en_e, en_i, e_tot, b_tot) # Continuity equation dn_e_n, dn_i_n, dne_dni = _calc_continuity(kc_x_mat, kc_z_mat, w_final, v_ex, v_ez, v_ix, v_iz) dn_e_n_db_b = dn_e_n / b_tot dn_i_n_db_b = dn_i_n / b_tot dn_e_n_dbpar_b = dn_e_n / b_par dn_i_n_dbpar_b = dn_i_n / b_par dn_e = dn_e_n * wp_e ** 2 k_dot_e = e_x * kc_x_mat + e_z * kc_z_mat k_dot_e = np.sqrt(k_dot_e * np.conj(k_dot_e)) # Build output dict extra_param = {"Degree of electromagnetism": np.log10(b_tot / e_tot), "Degree of longitudinality": np.abs(e_par) / e_tot, "Degree of parallelity E": e_z / e_tot, "Degree of parallelity B": np.sqrt( b_z * np.conj(b_z)) / b_tot, "Ellipticity E": e_pol, "Ellipticity B": b_pol, "E_part/E_field": np.log10(ratio_part_field), "v_g": np.sqrt(v_x ** 2 + v_z ** 2), "v_ph/v_a": np.log10(v_ph_va), "E_e/E_i": np.log10(en_e / en_i), "v_e/v_i": np.log10(np.sqrt(v_e2 / v_i2)), "v_epara/v_eperp": np.log10(vepar_perp), "v_ipara/v_iperp": np.log10(vipar_perp), "dn_e/dn_i": np.log10(dne_dni), "(dn_e/n)/ (dB/B)": np.log10(dn_e_n_db_b), "(dn_i/n)/(dB/B)": np.log10(dn_i_n_db_b), "(dn_i/n)/(dBpar/B)": np.log10(dn_i_n_dbpar_b), "(dn_e/n)/(dB/B)": np.log10(dn_e / k_dot_e), "(dn_e/n)/(dBpar /B)": np.log10(dn_e_n_dbpar_b), " Spar/Stot": s_par / s_tot} for k, v in zip(extra_param.keys(), extra_param.values()): extra_param[k] = np.transpose(np.real(v), [0, 2, 1]) kx_ = np.transpose(kc_x_mat) kz_ = np.transpose(kc_z_mat) wf_ = np.transpose(w_final, [0, 2, 1]) return kx_, kz_, wf_, extra_param
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_mixing_coefficients_surf(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n\n # SET UP NEW MIXING COEFFICIENT ARRAYS\n self.Kv_surf = np.zeros([Ly,N+1])\n self.Kt_surf = np.zeros([Ly,N+1])\n \n self.ghat = np.zeros([Ly,N+1])\n \n\n #################################\n # \tSURFACE KPP\n ################################\n #---> j-loop\n \n self.wm2 = []\n self.ws2 = []\n self.sigma_y = []\n for j in range(Ly):\n #--> k-loop (top to kbl[j])\n # in fortran k=N-1,kbl(j),-1\n for k in range(N-1,self.kbl[j]-1,-1):\n k_w = k\n k_r = k-1\n\n Bfsfc = self.Bfsfc_bl[j]\n zscale = z_u_w[j,N] - z_u_w[j,k_w]\n \n # CALCULATE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm2.append(wm)\n self.ws2.append(ws)\n # COMPUTE VERTICAL MIXING COEFFICIENTS\n sigma = (z_u_w[j,N] - z_u_w[j,k_w]) / np.max([self.hbls[j],self.eps])\n self.sigma1 = sigma #for debugging\n if j == 25: \n self.sigma_y.append(sigma)\n a1 = sigma - 2.\n a2 = 3.-2.*sigma\n a3 = sigma - 1.\n\n if sigma < 0.07:\n cff = 0.5 * (sigma-0.07)**2/0.07\n else:\n cff = 0\n \n \n if k == N-1: \n self.wm_debug = wm\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n\n self.Kv_surf[j,k_w] = wm * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gm1[j]+a3*self.dGm1_dS[j])))\n\n if k == N-1:\n self.ws_debug = ws\n self.hbls_debug = self.hbls[j]\n self.cff_debug = cff\n self.sigma_debug = sigma\n self.a1_debug = a1\n self.a2_debug = a2\n self.a3_debug = a3\n \n self.Kt_surf[j,k_w] = ws * self.hbls[j] * ( cff + sigma * (1. + sigma * (\\\n a1 + a2*self.Gt1[j]+a3*self.dGt1_dS[j])))\n #---> end k-loop \n if self.LMD_NONLOCAL:\n if Bfsfc < 0:\n self.ghat[j,k_w] = 0\n self.ghat[j,k_w] = self.Cg * sigma * (1.-sigma)**2\n else:\n self.ghat[j,k_w] = 0.\n\n # ADD CONVECTIVE ADJUSTMENT IN SURFACE MIXED LAYER \n if self.LMD_CONVEC and self.MLCONVEC: \n for k in range(N-1,int(self.kbl[j]-1),-1):\n k_w = k\n k_r = k -1\n\n if self.bvf[j,k_w] < 0:\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.ffac*self.nu0c\n\n # ADD CONVECTIVE ADJUSTMENT BELOW SURFACE MIXED LAYER\n # IF BKPP IS SWITCHED OFF!!\n for k in range(int(self.kbl[j]-1),-1,-1):\n k_w = k\n k_r = k -1\n if self.LMD_NONLOCAL:\n self.ghat[j,k_w] = 0\n if self.LMD_CONVEC and self.LMD_BKPP == False:\n if self.bvf[j,k_w] < 0:\n self.Kv_surf[j,k_w] = self.Kv_surf[j,k_w] + self.nu0c\n self.Kt_surf[j,k_w] = self.Kt_surf[j,k_w] + self.nu0c\n \n\n #---> end j-loop", "def get_bforce_wm_ws_Gx_surf(self):\n\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w'] \n \n self.Gm1 = np.zeros([Ly])\n self.dGm1_dS = np.zeros([Ly]) \n self.Gt1 = np.zeros([Ly])\n self.dGt1_dS = np.zeros([Ly]) \n self.Bfsfc_bl = np.zeros([Ly])\n self.Av_bl = np.zeros([Ly])\n self.dAv_bl = np.zeros([Ly])\n \n #debugging\n self.wm_surf = np.zeros([Ly])\n self.ws_surf = np.zeros([Ly]) \n\n #---> j-loop\n for j in range(Ly): \n k_w = self.kbl[j] # KBL is \"new bl index after calling find_new_kbl()\n z_bl = z_u_w[j,N] - self.hbls[j]\n zscale = self.hbls[j] \n \n if self.swr_frac[j,k_w-1] > 0:\n Bfsfc = self.Bo[j] + self.Bosol[j] * ( 1. - self.swr_frac[j,k_w-1]\\\n * self.swr_frac[j,k_w] * ( z_u_w[j,k_w] - z_u_w[j,k_w-1] )\\\n / (self.swr_frac[j,k_w] * (z_u_w[j,k_w] - z_bl)\\\n + self.swr_frac[j,k_w-1] * (z_bl - z_u_w[j,k_w-1]) ))\n \n else:\n Bfsfc = self.Bo[j] + self.Bosol[j]\n \n # CALCUALTE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm_surf[j] = wm\n self.ws_surf[j] = ws \n\n if self.LIMIT_UNSTABLE_ONLY:\n f1 = 5. * np.max([0,Bfsfc]) * self.vonKar / (self.ustar[j]**4+self.eps)\n else:\n f1 = 0\n\n \n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n\n #MOMENTUM \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * (self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl[j] = Av_bl\n self.dAv_bl[j] = dAv_bl\n self.Gm1[j] = Av_bl / (self.hbls[j] * wm + self.eps)\n self.dGm1_dS[j] = np.min([0.,Av_bl*f1-dAv_bl/(wm+self.eps)]) \n\n #TEMPERATURE(BUOYANCY)\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * (self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1[j] = At_bl / (self.hbls[j] * ws + self.eps)\n self.dGt1_dS[j] = np.min([0.,At_bl*f1-dAt_bl/(ws+self.eps)]) \n\n self.Bfsfc_bl[j] = Bfsfc", "def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n self.ni[0], self.ni[-1] = 1e11, 1e11\n self.nn[0], self.nn[-1] = 1e11, 1e11\n self.Te[0], self.Te[-1] = 0.1, 0.1\n self.Ti[0], self.Ti[-1] = 0.01, 0.01\n # self.coll_em[0], self.coll_em[-1] = 1e5, 1e5\n # self.coll_im[0], self.coll_im[-1] = 1e5, 1e5", "def dry_snow_density(self):\n return (self.rho - self.h2o_vol * RHO_W0) / \\\n (1 - self.h2o_vol * RHO_W0 / RHO_ICE)", "def snow_depth(lon, lat, month):\n\n im = month - 1\n\n h0 = np.array( [28.01, 30.28, 33.89, 36.80, 36.93, 36.59,\n 11.02, 4.64, 15.81, 22.66, 25.57, 26.67] )\n a = np.array( [ 0.1270, 0.1056, 0.5486, 0.4046, 0.0214, 0.7021,\n 0.3008, 0.3100, 0.2119, 0.3594, 0.1496, -0.1876] )\n b = np.array( [-1.1833, -0.5908, -0.1996, -0.4005, -1.1795, -1.4819,\n -1.2591, -0.6350, -1.0292, -1.3483, -1.4643, -1.4229] )\n c = np.array( [-0.1164, -0.0263, 0.0280, 0.0256, -0.1076, -0.1195,\n -0.0811, -0.0655, -0.0868, -0.1063, -0.1409, -0.1413] )\n d = np.array( [-0.0051, -0.0049, 0.0216, 0.0024, -0.0244, -0.0009,\n -0.0043, 0.0059, -0.0177, 0.0051, -0.0079, -0.0316] )\n e = np.array( [ 0.0243, 0.0044, -0.0176, -0.0641, -0.0142, -0.0603,\n -0.0959, -0.0005, -0.0723, -0.0577, -0.0258, -0.0029] )\n\n x = (90. - lat) * np.cos( np.radians(lon) )\n y = (90. - lat) * np.sin( np.radians(lon) )\n\n h = ( h0[im] + ( a[im] * x ) + ( b[im] * y ) + ( c[im] * x * y ) +\n ( d[im] * x * x ) + ( e[im] * y * y ) )\n\n return h", "def pwlFly(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n numAZ = int(360./zenSpacing)\n pwl_All = np.zeros((numAZ,numZD))\n pwlSig_All = np.zeros((numAZ,numZD))\n Bvec_complete = []\n Sol_complete = []\n meas_complete = []\n model_complete = []\n postchis = []\n prechis = []\n aics = []\n bics = []\n #w = 1;\n\n for j in range(0,numAZ):\n # Find only those value within this azimuth bin:\n if(j - azSpacing/2. < 0) :\n criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )\n ind = np.array(np.where(criterion))[0]\n azData =data[ind,:]\n numd = np.shape(azData)[0]\n #print(\"NUMD:\",numd)\n if numd < 2:\n continue\n #\n # Neq is acting like a constrain on the model a small value 0.001\n # let the model vary by 1000 mm\n # will let it vary more. a large value -> 1 will force the model to be closer to 0\n # This gets too large for lots of observations, s best to doit on the fly..\n #\n Neq = np.eye(numZD,dtype=float)# * 0.001\n Apart = np.zeros((numd,numZD))\n\n for i in range(0,numd):\n iz = int(np.floor(azData[i,2]/zenSpacing))\n Apart[i,iz] = (1.-(azData[i,2]-iz*zenSpacing)/zenSpacing)\n Apart[i,iz+1] = (azData[i,2]-iz*zenSpacing)/zenSpacing\n w = np.sin(data[i,2]/180.*np.pi)\n for k in range(iz,iz+2):\n for l in range(iz,iz+2):\n Neq[k,l] = Neq[k,l] + (Apart[i,l]*Apart[i,k]) * 1./w**2\n\n prechi = np.dot(azData[:,3].T,azData[:,3])\n\n Bvec = np.dot(Apart.T,azData[:,3])\n for val in Bvec:\n Bvec_complete.append(val)\n\n Cov = np.linalg.pinv(Neq)\n Sol = np.dot(Cov,Bvec)\n for val in Sol:\n Sol_complete.append(val)\n\n #Qxx = np.dot(Apart.T,Apart)\n #Qvv = np.subtract( np.eye(numd) , np.dot(np.dot(Apart,Qxx),Apart.T))\n #sd = np.squeeze(np.diag(Qvv))\n #dx = np.dot(np.linalg.pinv(Qxx),Bvec)\n #dl = np.dot(Apart,dx)\n\n postchi = prechi - np.dot(Bvec.T,Sol)\n postchis.append(np.sqrt(postchi/numd))\n prechis.append(np.sqrt(prechi/numd))\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n\n # calculate the model values for each obs\n model = np.dot(Apart,Sol) #np.zeros(numd)\n for d in range(0,numd):\n model_complete.append(model[d])\n meas_complete.append(azData[d,3])\n # zen = azData[d,2]\n # iz = int(np.floor(azData[d,2]/zenSpacing))\n # #model[d] = Sol[iz]\n\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),gls_results.rsquared,gls_results.aic,gls_results.bic)\n \n # loglikelihood(meas,model,sd)\n #sd = np.squeeze(np.diag(Qvv))\n #print(\"meas, model, sd:\",np.shape(azData),np.shape(model),np.shape(sd))\n f = loglikelihood(azData[:,3],model)\n dof = numd - np.shape(Sol)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n aics.append(aic) \n bics.append(bic) \n #print(\"=========================\")\n pwl_All[j,:] = Sol \n pwlSig_All[j,:] = pwlsig\n\n del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind\n\n #A_complete = np.squeeze(np.asarray(A_complete.todense()))\n #print(\"A shape\",np.shape(A_complete))\n\n print(\"Doing a fit to the data\")\n f = loglikelihood(np.array(meas_complete),np.array(model_complete))\n numd = np.size(meas_complete)\n dof = numd - np.shape(Sol_complete)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n #prechi = np.dot(data[:,3].T,data[:,3])\n prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))\n postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n\n return pwl_All, pwlSig_All", "def solid_surface_density_RC2014_given_observed_catalog(sss_per_sys, max_core_mass=10.):\n mult_obs = sss_per_sys['Mtot_obs']\n mult_obs_2p = []\n a_obs_2p = []\n core_mass_obs_2p = []\n sigma_obs_2p = []\n for i in np.arange(len(mult_obs))[mult_obs > 1]: # only consider multi-planet systems\n a_sys = gen.a_from_P(sss_per_sys['P_obs'][i], sss_per_sys['Mstar_obs'][i])\n core_mass_sys = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(sss_per_sys['radii_obs'][i][a_sys > 0])\n core_mass_sys[core_mass_sys > max_core_mass] = max_core_mass\n a_sys = a_sys[a_sys > 0]\n\n mult_obs_2p += [len(a_sys)]*len(a_sys)\n a_obs_2p += list(a_sys)\n core_mass_obs_2p += list(core_mass_sys)\n sigma_obs_2p += list(solid_surface_density_system_RC2014(core_mass_sys, a_sys))\n mult_obs_2p = np.array(mult_obs_2p)\n a_obs_2p = np.array(a_obs_2p)\n core_mass_obs_2p = np.array(core_mass_obs_2p)\n sigma_obs_2p = np.array(sigma_obs_2p)\n return sigma_obs_2p, core_mass_obs_2p, a_obs_2p, mult_obs_2p", "def main():\n\n varList = {'beta': 6., 'convSpeed': 1.2, 'Mark': 0., 'axi': 1, 'acModes': 4, 'Nr': 801, 'Tf': 600., 'xf': 0.51}\n\n # Solve steady flame.\n # BC1: I have the attachment BC at r = 1, always\n # BC2: I need to set dF/dr = 0 at r = 0 iff Mark != 0\n [qMean, r, FMean] = steady_flame_area_FD3(varList['Mark'], varList['beta'], varList['axi'], varList['Nr'])\n r = r * varList['beta']\n\n # Calculate mean flame derivatives\n dFMeanDr = derivsnew.FD1_CT2_D(FMean, r[1] - r[0])\n d2FMeanDr2 = derivsnew.FD2_CT2_D(FMean, r[1] - r[0])\n\n #Apply BC smooth tip:\n if(varList['Mark']!=0.0):\n dFMeanDr[-1] = 0.0\n\n # Use correct number of points. Remember that the extrems need to be set depending on the BC!\n # The attach BC (first point) is always assumed to be true and removed from the vector list\n if(varList['Mark']==0):\n Nr = varList['Nr'] / 2\n dFMeanDr = dFMeanDr[1:]\n d2FMeanDr2 = d2FMeanDr2[1:]\n r = r[1:]\n # The smooth BC holds only if Mark!=0 (second derivatives appear): remove also the last point\n else:\n Nr = varList['Nr'] / 2 - 1\n dFMeanDr = dFMeanDr[1:-1]\n d2FMeanDr2 = d2FMeanDr2[1:-1]\n r = r[1:-1]\n\n # Calculate geometric values\n den = 1 + varList['beta'] * varList['beta'] * dFMeanDr * dFMeanDr\n dR = r[1] - r[0]\n # Set Nx equal to Nr for now.\n # The implementation is more complicated if they differ, and need to interpolate between values.\n Nx = Nr\n\n # Nonuniform grid spacing along x!\n # Nx = length(dx) has to hold.\n dx = np.empty(len(FMean) - 1)\n for ii in range(1, len(FMean)):\n dx[ii - 1] = FMean[ii] - FMean[ii - 1]\n\n [A, B, C, tau] = loadAcoustics(varList['xf'], varList['Tf'], varList['acModes'], varList['beta'])\n\n Matrix = buildMatrix(Nr, dR, varList['beta'], den, r, FMean, dFMeanDr, d2FMeanDr2, varList['Mark'], varList['acModes'], A,\n B, C, Nx, dx, tau, qMean, varList['convSpeed'])\n\n [d, W, V] = eigProblem.solveEigProb(Matrix)\n [dnew, Wnew, Vnew] = eigProblem.selectUnstable(d, W, V)\n\n print dnew / (2. * np.pi)", "def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):\r\n #print \"Reaching Surface function...\"\r\n\r\n # First get the As\r\n A_total = self.get_A(surface, areaDict, areaWinDict)\r\n if Aflag == 0:\r\n # If it is the first surface of the space, label the space ID in the log file:\r\n la = str(surface.obj_id)\r\n lb = str(surface.obj_type)\r\n #TM_user.info(\"%s,surface area,%s,%s\" % (la, A_total, lb))\r\n A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)\r\n A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)\r\n T_space = spaces_dict[space.obj_id][1]\r\n T1 = weather[\"t_outside\"]\r\n hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))\r\n transmitted_win = 0\r\n Q_flux = 0\r\n\r\n # need the surface related information, T_space, U, R3\r\n U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below\r\n #print U\r\n R3 = 1/U\r\n # Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML\r\n C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids\r\n #print C\r\n\r\n temperature = Temperature()\r\n\r\n #Sub-routines for each wall type based on the returned hc_external\r\n # This hc is different for each surface type so moved under this sub-routine area\r\n #hc = 3.076 sent this to the Temperature Object\r\n if surface.obj_type == \"ExteriorWall\":\r\n transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux\r\n if surface.obj_type == \"Roof\":\r\n transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"InteriorWall\":\r\n transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)\r\n #print Q_flux # Matches for Four Room\r\n if surface.obj_type == \"UndergroundWall\":\r\n transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test\r\n if surface.obj_type == \"RaisedFloor\":\r\n # This will eventually need some values when we start using raised floors\r\n transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test\r\n\r\n return transmitted_win, Q_flux", "def dynamic(self):\n # FrostIndexChangeRate=-(1-Afrost)*FrostIndex - Tavg*exp(-0.04*Kfrost*SnowCover/SnowWaterEquivalent);\n\n FrostIndexChangeRate = -(1 - self.var.Afrost) * self.var.FrostIndex - self.var.Tavg * \\\n np.exp(-0.04 * self.var.Kfrost * self.var.SnowCover / self.var.SnowWaterEquivalent)\n # FrostIndexChangeRate=self.var.AfrostIndex - self.var.Tavg* pcraster.exp(self.var.Kfrost*self.var.SnowCover*self.var.InvSnowWaterEquivalent)\n # Rate of change of frost index (expressed as rate, [degree days/day])\n # CHANGED 9 September 2004:\n # - first term should be negative\n # - second term should be subtracted, not added!!\n\n self.var.FrostIndex = np.maximum(self.var.FrostIndex + FrostIndexChangeRate * self.var.DtDay, 0)\n # frost index in soil [degree days]\n # based on Molnau and Bissel (1983, A Continuous Frozen Ground Index for Flood\n # Forecasting. In: Maidment, Handbook of Hydrology, p. 7.28, 7.55)\n # if Tavg is above zero, FrostIndex will stay 0\n # if Tavg is negative, FrostIndex will increase with 1 per degree C per day\n # Exponent of 0.04 (instead of 0.4 in HoH): conversion [cm] to [mm]!\n # Division by SnowDensity because SnowDepth is expressed as equivalent water\n # depth(always less than depth of snow pack)\n # SnowWaterEquivalent taken as 0.100 (based on density of 100 kg/m3) (Handbook of Hydrology, p. 7.5)\n # Afrost, (daily decay coefficient) is taken as 0.97 (Handbook of Hydrology,\n # p. 7.28)\n # Kfrost, (snow depth reduction coefficient) is taken as 0.57 [1/cm],\n # (HH, p. 7.28)", "def solid_surface_density_nHill_given_observed_catalog(sss_per_sys, max_core_mass=10., n=10.):\n Mstar_obs = np.repeat(sss_per_sys['Mstar_obs'][:,None], np.shape(sss_per_sys['P_obs'])[1], axis=1)[sss_per_sys['P_obs'] > 0] # flattened array of stellar masses repeated for each planet\n a_obs_per_sys = gen.a_from_P(sss_per_sys['P_obs'], sss_per_sys['Mstar_obs'][:,None])\n a_obs = a_obs_per_sys[sss_per_sys['P_obs'] > 0]\n radii_obs = sss_per_sys['radii_obs'][sss_per_sys['P_obs'] > 0]\n core_mass_obs = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(radii_obs)\n core_mass_obs[core_mass_obs > max_core_mass] = max_core_mass\n sigma_obs = solid_surface_density_nHill(core_mass_obs, a_obs, Mstar=Mstar_obs, n=n)\n return sigma_obs, core_mass_obs, a_obs", "def oceansim(sun_az,sun_zen,cam_head,cam_elev=0,m2=1.33,npart=1.08,mu=3.483, debug=True):\n\n #Water surface norm\n n = np.array([0,0,1])\n m1 = 1.0\n #vector from sun:\n ki = -np.asarray([np.sin(sun_az)*np.sin(sun_zen),\n np.cos(sun_az)*np.sin(sun_zen),\n np.cos(sun_zen)])\n xi = norm_cross(n,ki)\n #transmitted sunlight\n #tx, ty are the transmission amplitude coefficients in the xt, yt directions\n kt,tx,ty = Fresnel.transmission(ki,n,m1,m2)\n xt = xi\n #vector to camera\n kc = -np.asarray([np.sin(cam_head)*np.cos(cam_elev),\n np.cos(cam_head)*np.cos(cam_elev),\n np.sin(cam_elev)])*np.linalg.norm(kt)\n xc = norm_cross(n, kc) #right\n yc = norm_cross(kc, xc) #up\n #vectors for scattering\n ys = norm_cross(kt, kc) # y-axis of scattering event\n xst = norm_cross(ys, kt) # x-axis of scattering event relative to transmitted sunlight\n xsc = norm_cross(ys, kc) # x-axis of scattering event relative to camera\n #Mueller matrices\n # transmission through water surface:\n mm1 = Mueller.polarizer(tx,ty)\n # rotate to scattering plane\n mm2 = Mrotv(kt,xt,xst)\n # scatter\n th_s = vector_angle(kt,kc)\n #mm3 = Mocean(rad2deg(th_s)) #using Empirical ocean scattering\n mm3 = Mueller.rayleigh_norm(th_s) #normalized Rayleigh scattering matrix\n #b = Scattering.bsf_fournier(npart,mu)\n b = Scattering.vspf_fournier(th_s,npart,mu)\n # transform to camera's horizontal and up vectors\n mm4 = Mxform(xsc,ys, xc,yc)\n #Combined: mm4 . (b*mm3) . mm2 . mm1\n m = mm4.dot(b*mm3.dot(mm2.dot(mm1)))\n #stokes vector\n s = m.dot([1,0,0,0])\n if debug:\n return s,m,(ki,xi),(kt,xt,xst),(kc,xc,xsc),(mm1,mm2,mm3,b,mm4)\n else:\n return s,m", "def calc_ked_WFI(self):\n\n #Initialize kinetic energy density\n self.ked_WFI = np.zeros( (self.grid.Nelem, 1))\n\n #Figure out the number of occupied orbitals\n if self.m == 0:\n if self.pol == 1:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n else:\n Nocc = np.floor(self.N)\n nu = self.N - Nocc\n\n else:\n #m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n Nocc = np.floor(self.N / 4)\n nu = self.N / 4 - Nocc\n else:\n Nocc = np.floor(self.N/2)\n nu = self.N / 2 - Nocc\n\n #Construct density\n for i in range(int(Nocc)):\n # print(\"phi from pssolver\", self.phi)\n # print(\"phi subset\", self.phi[:,i])\n # print(\"integrate returns\", self.grid.integrate( self.phi[:,i]**2 )**0.5)\n\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:,i]**2 )**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += (phi_norm * (self.H0 @ phi_norm)) / self.grid.w[:, None]\n\n #If we are doing fractional robitals and are non-integer\n if self.FRACTIONAL is True and nu != 0:\n #Normalized orbital\n phi_norm = self.phi[:,i] / self.grid.integrate( self.phi[:, Nocc+1]**2)**0.5\n phi_norm = phi_norm[:, None]\n self.ked_WFI += nu * ( phi_norm * (self.H0 @ phi_norm) ) / self.grid.w[:, None]\n\n #Scale densities appropriately\n if self.m == 0:\n if self.pol == 1: #Unpolarized electrons\n self.ked_WFI = 2 * self.ked_WFI\n\n else: # m>0 orbitals hold twice as many electrons due to +-m symmetry\n if self.pol == 1:\n self.ked_WFI = 4 * self.ked_WFI\n else:\n self.ked_WFI = 2 * self.ked_WFI", "def gz(xp, yp, zp, prisms):\n if xp.shape != yp.shape != zp.shape:\n raise ValueError(\"Input arrays xp, yp, and zp must have same shape!\")\n dummy = 1e-10\n res = 0\n for prism in prisms:\n if prism is None or 'density' not in prism.props:\n continue\n x, y = prism.x, prism.y\n z1, z2 = prism.z1, prism.z2\n density = prism.props['density']\n nverts = prism.nverts\n # Calculate the effect of the prism\n Z1 = z1 - zp\n Z2 = z2 - zp\n Z1_sqr = Z1**2\n Z2_sqr = Z2**2\n kernel = 0\n for k in range(nverts):\n Xk1 = x[k] - xp\n Yk1 = y[k] - yp\n Xk2 = x[(k + 1) % nverts] - xp\n Yk2 = y[(k + 1) % nverts] - yp\n p = Xk1*Yk2 - Xk2*Yk1\n p_sqr = p**2\n Qk1 = (Yk2 - Yk1)*Yk1 + (Xk2 - Xk1)*Xk1\n Qk2 = (Yk2 - Yk1)*Yk2 + (Xk2 - Xk1)*Xk2\n Ak1 = Xk1**2 + Yk1**2\n Ak2 = Xk2**2 + Yk2**2\n R1k1 = np.sqrt(Ak1 + Z1_sqr)\n R1k2 = np.sqrt(Ak2 + Z1_sqr)\n R2k1 = np.sqrt(Ak1 + Z2_sqr)\n R2k2 = np.sqrt(Ak2 + Z2_sqr)\n Ak1 = np.sqrt(Ak1)\n Ak2 = np.sqrt(Ak2)\n Bk1 = np.sqrt(Qk1**2 + p_sqr)\n Bk2 = np.sqrt(Qk2**2 + p_sqr)\n E1k1 = R1k1*Bk1\n E1k2 = R1k2*Bk2\n E2k1 = R2k1*Bk1\n E2k2 = R2k2*Bk2\n # Simplifying these arctans with, e.g., (Z2 - Z1)*arctan2(Qk2*p -\n # Qk1*p, p*p + Qk2*Qk1) doesn't work because of the restrictions\n # regarding the angles for that identity. The regression tests\n # fail for some points by a large amount.\n kernel += (Z2 - Z1)*(np.arctan2(Qk2, p) - np.arctan2(Qk1, p))\n kernel += Z2*(np.arctan2(Z2*Qk1, R2k1*p) -\n np.arctan2(Z2*Qk2, R2k2*p))\n kernel += Z1*(np.arctan2(Z1*Qk2, R1k2*p) -\n np.arctan2(Z1*Qk1, R1k1*p))\n Ck1 = Qk1*Ak1\n Ck2 = Qk2*Ak2\n # dummy helps prevent zero division and log(0) errors (that's why I\n # need to add it twice)\n # Simplifying these two logs with a single one is not worth it\n # because it would introduce two pow operations.\n kernel += 0.5*p*Ak1/(Bk1 + dummy)*np.log(\n (E1k1 - Ck1)*(E2k1 + Ck1)/((E1k1 + Ck1)*(E2k1 - Ck1) + dummy) +\n dummy)\n kernel += 0.5*p*(Ak2/(Bk2 + dummy))*np.log(\n (E2k2 - Ck2)*(E1k2 + Ck2)/((E2k2 + Ck2)*(E1k2 - Ck2) + dummy) +\n dummy)\n res += kernel*density\n res *= G*SI2MGAL\n return res", "def create_flux_vector_pms_gr(self):\n soma_prod = 0\n soma_inj = 0\n lim4 = 1e-4\n store_velocity = {}\n store_flux = {}\n for primal in self.primals:\n #1\n primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n primal_id = self.ident_primal[primal_id]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n for volume in fine_elems_in_primal:\n #2\n flux = {}\n velocity = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #3\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n if adj not in fine_elems_in_primal:\n #4\n pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n #3\n else:\n #4\n pvol = self.mb.tag_get_data(self.pcorr_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pcorr_tag, adj, flat=True)[0]\n #3\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)/(self.mi)\n keq2 = keq\n keq = keq*(np.dot(self.A, uni))\n pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))\n q = (grad_p)*keq - grad_z*keq*self.gama\n print((grad_p)*keq)\n print(- grad_z*keq*self.gama)\n print(q)\n print(self.store_flux_pf_gr[volume][tuple(unit)])\n print('\\n')\n import pdb; pdb.set_trace()\n\n if gid_adj > gid_vol:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n else:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n\n flux[tuple(unit)] = q\n velocity[tuple(unit)] = v\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n\n #2\n # print(gid_vol)\n # print(velocity)\n # print('\\n')\n # import pdb; pdb.set_trace()\n store_flux[volume] = flux\n self.mb.tag_set_data(self.flux_fine_pms_tag, volume, sum(flux.values()))\n # flt = sum(flux.values())\n # if volume not in self.wells_inj and volume not in self.wells_prod:\n # lim4 = 1e-7\n # if abs(flt) > lim4:\n # print(gid_vol)\n # print(flt)\n # import pdb; pdb.set_trace()\n # flt = sum(flux.values())\n store_velocity[volume] = velocity\n\n for volume in set(self.all_fine_vols) - set(self.wells):\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n values = store_flux[volume].values()\n if sum(values) > lim4:\n print('fluxo multiescala nao esta dando conservativo')\n print('gid:{0}'.format(gid))\n print(sum(values))\n import pdb; pdb.set_trace()\n\n with open('fluxo_multiescala_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat= True)[0]\n values = store_flux[volume].values()\n if volume in self.wells_inj:\n soma_inj += sum(values)\n else:\n soma_prod += sum(values)\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(soma_inj))\n arq.write('soma_prod:{0}\\n'.format(soma_prod))\n\n return store_flux", "def dispersion(self, k):\n return 2*self.material.gamma*np.sqrt(self.material.z)", "def Schechter_M_z_M200c(M, redshift, M200c):\n\treturn 0.4 * n.log(10.) * 10**logPhi_evol(redshift, mass_2_richness(M200c, redshift)) * 10**(0.4 * (M_s_evol(redshift, mass_2_richness(M200c, redshift)) - M) * (alpha_evol(redshift, mass_2_richness(M200c, redshift)) + 1)) * n.e**( -10** ( 0.4 * (M_s_evol(redshift,mass_2_richness(M200c, redshift)) - M)))", "def get_hbls_hbbl(self):\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w']\n z_u_r = self.grid_dict['z_u_r']\n u = self.u\n v = self.v\n \n v_upts = TTTW_func.v2u(v)\n Hz = z_u_w[:,1:] - z_u_w[:,:-1]\n\n\n\n # CALCULATE swr_frac\n self.swr_frac = TTTW_func.lmd_swr_frac(self.grid_dict)\n\n\n # WHOLE THING HAPPENS IN j loop through y-indices\n \n # INITIALIZE ARRAYS\n self.kmo = np.zeros([Ly])\n self.Cr = np.zeros([Ly])\n self.kbl = np.empty([Ly],dtype='int')\n self.C_h_MO = np.zeros([Ly])\n self.Cr = np.zeros([Ly,N+1]) # sum term\n self.FC = np.zeros([Ly,N+1])\n self.swdk_r = np.zeros([Ly,N+1])\n \n self.zscale = np.zeros([Ly,N])\n self.Kern = np.zeros([Ly,N])\n\n \n # --> LOOP THROUGH Y-INDICES\n for j in range(Ly):\n if self.LIMIT_MO_DEPTH:\n self.kmo[j] = 0\n self.C_h_MO[j] = self.C_MO *self.ustar[j]**3/self.vonKar\n \n self.kbl[j] = 0\n self.Cr[j,-1] = 0 # set top Cr\n self.Cr[j,0] = 0 # set bottom Cr\n \n # SEARCH FOR MIXED LAYER DEPTH\n self.FC[j,-1] = 0.\n\n\n # ---> LOOP TOP TO BOTTOM (FORTRAN ==> k=N-1,1,-1)\n for k in range(N-1,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n \n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n self.zscale[j,k_w] = zscale\n if self.LMD_KPP:\n if self.LMD_BKPP:\n zscaleb = z_u_r[j,k_r] - z_u_w[j,0]\n Kern = zscale * zscaleb**2 / ( (zscale + self.epssfcs*self.hbls_old[j]) * (zscaleb**2+(self.epssfcb**2*self.hbbl_old[j]**2)))\n else:\n Kern = zscale / (zscale + (self.epssfcs*self.hbls_old[j]))\n else:\n Kern = 1.\n \n\n\n self.Kern[j,k_w] = Kern\n self.FC[j,k_w] = self.FC[j,k_w+1] + Kern * (\\\n ( ( u[j,k_r+1] - u[j,k_r] )**2 + ( v_upts[j,k_r+1] - v_upts[j,k_r])**2 ) \\\n / (Hz[j,k_r] + Hz[j,k_r+1]) \\\n - 0.5 * ( Hz[j,k_r] + Hz[j,k_r+1]) * (self.Ri_inv * self.bvf[j,k_w] + self.C_Ek*self.f[j]*self.f[j]))\n\n\n #\t\tLOOP THAT FINDS BL DEPTH ##\n #----> LOOP TOP TO BOTTOM (start at free surface, w-level surface) \n \n if self.LMD_KPP:\n #swdk_r only used in this function so don't need to be class attribute\n # but for testing make it an attribute to see what it is\n \n # fortran equivlanet ===> k=N,1,-1 \n for k in range(N,0,-1):\n # INDEX MAP\n k_r = k-1\n k_w = k\n\n ###################################################################### \n self.swdk_r[j,k_w] = np.sqrt( self.swr_frac[j,k_w] * self.swr_frac[j,k_w-1])\n zscale = z_u_w[j,N] - z_u_r[j,k_r]\n Bfsfc = self.Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])\n \n self.bvf_max = np.sqrt(np.max([0,self.bvf[j,k_w-1]]))\n \n # CALCULATE TURBULENT VELOCITY SCALE FOR TRACERS\n \t\t\t self.ws = self.lmd_wscale_ws_only(Bfsfc, zscale,self.hbls_old[j],self.ustar[j])\n \n self.Vtsq = self.Vtc * self.ws* self.bvf_max + self.V0\n \n\n self.Cr[j,k_w] = self.FC[j,k_w] + self.Vtsq\n \n\n #######################################################################\n \n # SEARCH FOR hbls vertical level #\n '''\n kbl is specified at vertical w-level (via Cr which is at\n vertical w-levels)\n '''\n if self.kbl[j] == 0 and self.Cr[j,k_w] < 0:\n self.kbl[j] = k_w\n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] == 0 and Bfsfc*(z_u_w[j,N] - z_u_r[j,k_r]) > self.C_h_MO[j]:\n self.kmo[j] = k_w\n\n \n #--> still in j-loop\n #######################################################\n \n # \t\tGET SURFACE BOUNDARY LAYER DEPTH # \n self.hbls[j] = z_u_w[j,N] - z_u_w[j,0] + self.eps # set hbls as depth of entire water column\n if self.kbl[j] > 0:\n k_w = self.kbl[j]\n k_r = k_w - 1 \n if k_w == N: # set hbls at the surface btwn w- and rho-levels at surface\n self.hbls[j] = z_u_w[j,N] - z_u_r[j,N-1]\n \n else:\n self.hbls[j] = z_u_w[j,N] - ( z_u_r[j,k_r] * self.Cr[j,k_w+1] - z_u_r[j,k_r+1] * self.Cr[j,k_w]) / \\\n (self.Cr[j,k_w+1] - self.Cr[j,k_w])\n \n if self.LIMIT_MO_DEPTH:\n if self.kmo[j] > 0:\n k_w = self.kmo[j]\n k_r = k_w-1\n if k_w == N:\n z_up = z_u_w[j,N]\n cff_up = np.max([0,Bo[j]])\n else:\n z_up = z_r[j,k_w+1]\n cff_up = np.max([0, Bo[j] + self.Bosol[j]*(1-self.swdk_r[j,(k_w-1)+1])])\n \n cff_dn = np.max([0,Bo[j] + self.Bosol[j] * (1-self.swdk_r[j,k_w])]) \n h_MO = z_u_w[j,N] + self.C_h_MO[j] * ( cff_up*z_up - cff_dn * z_u_r[j,k_r] ) \\\n / ( cff_up * cff_dn * (z_up - z_u_r[j,k_r]) ) \\\n + self.C_h_MO[j] * (cff_dn - cff_up)\n\n self.hbls[j] = np.min([self.hbls[j],np.max([h_MO,0])])\n\n\n\n #### GET BOTTOM BOUNDARY LAYER DEPTH #######\n if self.LMD_BKPP:\n self.kbl[j] = 0 # reset Cr at bottom and kbl for BKPP\n self.Cr[j,0] = 0.\n self.FC[j,0] = 1.5 * self.FC[j,1] - 0.5 * self.FC[j,2] # linear extrapolation\n \n #---> LOOP BOTTOM TO TOP\n # FIND kbl for BBL\n for k in range(1,N+1):\n k_r = k-1\n k_w = k \n self.Cr[j,k_w] = self.FC[j,k_w] - self.FC[j,0]\n \n # LOOK FOR FIRST ZERO CROSSING FROM BOTTOM UP\n if self.kbl[j] == 0 and self.Cr[j,k_w] > 0:\n self.kbl[j] = k_w \n \n\n self.hbbl[j] = z_u_w[j,N] - z_u_w[j,0] # total depth\n if self.kbl[j] > 0 :\n k_w = self.kbl[j] \n k_r = k_w -1\n if k_w == 1: # NO BBL CASE\n self.hbbl[j] = z_u_r[j,0] - z_u_w[j,0] #in between bottom rho and w-level\n else:\n self.hbbl[j] = ( z_u_r[j,k_r-1] * self.Cr[j,k_w] - z_u_r[j,k_r] * self.Cr[j,k_w-1]) / \\\n (self.Cr[j,k_w] - self.Cr[j,k_w-1]) - z_u_w[j,0]", "def dispersion(self, k):\n return np.sqrt(\n 1 + (\n (k*self.material.hbar_m)**2 / (2*self.material.m_star_m) -\n self.material.z\n )**2\n )", "def sigmai_dep(ptem, psal, pref):\n zr4 = 4.8313e-4\n zd =-2.042967e-2\n zrau0 = 1000.e0\n \n sigmai_dep_out = zeros(psal.shape)\n \n # ?? for whatever reason sqrt(abs(psal)) seems to kick up a fuss when arrays\n # exceed a certain size...??? otherwise this could be vectorised\n # TODO: if pref is a number, broadcast it into a 2d field\n \n for jj in range(psal.shape[0]): # python indexing\n for ji in range(psal.shape[1]):\n \n ztem = ptem[jj, ji]\n zsal = psal[jj, ji]\n zws = sqrt( abs(psal[jj, ji]) )\n \n # Compute the volumic mass of pure water at atmospheric pressure.\n zr1 = ( ( ( ( (6.536332e-9 * ztem - 1.120083e-6) * ztem + 1.001685e-4 )\n * ztem - 9.095290e-3 ) * ztem + 6.793952e-2 ) * ztem + 999.842594e0\n )\n\n # Compute the seawater volumic mass at atmospheric pressure.\n zr2 = ( ( ( ( 5.3875e-9 * ztem - 8.2467e-7) * ztem + 7.6438e-5)\n * ztem - 4.0899e-3) * ztem + 0.824493e0\n )\n\n zr3 = (-1.6546e-6 * ztem + 1.0227e-4) * ztem - 5.72466e-3\n\n # Compute the potential volumic mass (referenced to the surface).\n zrhop = (zr4 * zsal + zr3 * zws + zr2) * zsal + zr1\n\n # Compute the compression terms.\n ze = (-3.508914e-8 * ztem - 1.248266e-8) * ztem - 2.595994e-6\n\n zbw = (1.296821e-6 * ztem - 5.782165e-9) * ztem + 1.045941e-4\n\n zb = zbw + ze * zsal\n\n zc = (-7.267926e-5 * ztem + 2.598241e-3) * ztem + 0.1571896e0\n\n zaw = ( ( (5.939910e-6 * ztem + 2.512549e-3) * ztem - 0.1028859e0 ) \n * ztem - 4.721788e0\n )\n\n za = (zd * zws + zc) * zsal + zaw\n\n zb1 = (-0.1909078e0 * ztem + 7.390729e0) * ztem - 55.87545e0\n\n za1 = ( ( (2.326469e-3 * ztem + 1.553190e0) * ztem - 65.00517e0)\n * ztem + 1044.077e0\n )\n\n zkw = ( ( ( (-1.361629e-4 * ztem - 1.852732e-2) * ztem - 30.41638e0)\n * ztem + 2098.925e0) * ztem + 190925.60\n )\n\n zk0 = (zb1 * zws + za1) * zsal + zkw\n\n # Compute the potential density anomaly.\n sigmai_dep_out[jj, ji] = ( zrhop / (1.0e0 - pref / \n ( zk0 - pref * (za - pref * zb) ) )\n - zrau0\n )\n \n return sigmai_dep_out", "def sat_vap_dens(nz, T, SWVD, plot=False):\r\n rho_v = np.zeros(nz)\r\n rho_v_dT = np.zeros(nz)\r\n if SWVD == \"Libbrecht\":\r\n rho_v = (\r\n np.exp(-T_ref_L / T) / (f * T) * (a0 + a1 * (T - 273) + a2 * (T - 273) ** 2)\r\n ) # [kg/m^3] Water vapor density\r\n rho_v_dT = (\r\n np.exp(-T_ref_L / T)\r\n / (f * T ** 2)\r\n * (\r\n (a0 - a1 * 273 + a2 * 273 ** 2) * (T_ref_L / T - 1)\r\n + (a1 - a2 * 2 * 273) * T_ref_L\r\n + a2 * T ** 2 * (T_ref_L / T + 1)\r\n )\r\n ) # [kg/m^3/K]\r\n elif SWVD == \"Calonne\":\r\n x = (L_Cal * mH2O) / (rho_i * kB)\r\n rho_v = rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n rho_v_dT = x / T ** 2 * rho_ref * np.exp(x * ((1 / T_ref_C) - (1 / T)))\r\n\r\n elif SWVD == \"Hansen\":\r\n\r\n rho_v = (\r\n (10.0 ** (c1 / T + c2 * np.log(T) / np.log(10) + c3 * T + c4 * T ** 2 + c5))\r\n * c6\r\n / R_v\r\n / T\r\n )\r\n rho_v_dT = (\r\n rho_v\r\n * np.log(10)\r\n * (-c1 / T ** 2 + c2 / (T * np.log(10)) + c3 + 2 * c4 * T)\r\n - rho_v / T\r\n )\r\n else:\r\n raise ValueError(\"Saturation water vapor density not available\")\r\n if plot:\r\n fig1 = plt.plot(T, rho_v)\r\n plt.title(\"Water vapor density with respect to temperature\")\r\n plt.show(fig1)\r\n fig2 = plt.plot(T, rho_v_dT)\r\n plt.title(\"Derivative of water vapor density with respect to temperature\")\r\n plt.show(fig2)\r\n return rho_v, rho_v_dT", "def solid_surface_density_CL2013(M, a):\n return solid_surface_density(M, a, a)", "def findzpd(self):\n dc=0.5*self.rms*self.ndstep\n #fixed at 0.1 of the dispersion\n dd=0.1*self.ws.coef[1]\n\n #set upt he docef values\n dcoef=self.ws.coef*0.0\n dcoef[0]=dc\n dcoef[1]=dd\n self.ws=st.findxcor(self.xarr, self.farr, self.swarr, self.sfarr, self.ws, \n dcoef=dcoef, ndstep=self.ndstep, best=False, inttype='interp')\n self.plotArt()\n self.redraw_canvas()", "def main_gamma_ray_loop(\n num_decays,\n model,\n plasma,\n time_steps=10,\n time_end=80.0,\n grey_opacity=-1,\n spectrum_bins=500,\n time_space=\"log\",\n photoabsorption_opacity=\"tardis\",\n pair_creation_opacity=\"tardis\",\n seed=1,\n path_to_decay_data=\"~/Downloads/tardisnuclear/decay_radiation.h5\",\n positronium_fraction=0.0,\n):\n # Note: not best numpy practice, but works better in numba than the alternatives\n np.random.seed(seed)\n\n # Enforce cgs\n outer_velocities = model.v_outer.to(\"cm/s\").value\n inner_velocities = model.v_inner.to(\"cm/s\").value\n ejecta_density = model.density.to(\"g/cm^3\").value\n ejecta_volume = model.volume.to(\"cm^3\").value\n ejecta_velocity_volume = (\n 4 * np.pi / 3 * (outer_velocities**3.0 - inner_velocities**3.0)\n )\n time_explosion = model.time_explosion.to(\"s\").value\n number_of_shells = model.no_of_shells\n raw_isotope_abundance = model.raw_isotope_abundance.sort_values(\n by=[\"atomic_number\", \"mass_number\"], ascending=False\n )\n\n shell_masses = ejecta_volume * ejecta_density\n\n time_start = time_explosion\n time_end *= u.d.to(u.s)\n\n assert (\n time_start < time_end\n ), \"Error, simulation start time greater than end time!\"\n\n if time_space == \"log\":\n times = np.zeros(time_steps + 1)\n\n # log time steps\n for i in range(time_steps + 1):\n times[i] = (\n np.log(time_start)\n + (np.log(time_end) - np.log(time_start)) / time_steps * i\n )\n times[i] = np.exp(times[i])\n else:\n times = np.linspace(time_start, time_end, time_steps + 1)\n\n dt_array = np.diff(times)\n effective_time_array = np.array(\n [np.sqrt(times[i] * times[i + 1]) for i in range(time_steps)]\n )\n\n # Use isotopic number density\n for atom_number in plasma.isotope_number_density.index.get_level_values(0):\n values = plasma.isotope_number_density.loc[atom_number].values\n if values.shape[1] > 1:\n plasma.number_density.loc[atom_number] = np.sum(values, axis=0)\n else:\n plasma.number_density.loc[atom_number] = values\n\n # Calculate electron number density\n electron_number_density = (\n plasma.number_density.mul(plasma.number_density.index, axis=0)\n ).sum()\n\n electron_number_density_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n mass_density_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n electron_number = (electron_number_density * ejecta_volume).to_numpy()\n\n inv_volume_time = np.zeros(\n (len(ejecta_velocity_volume), len(effective_time_array))\n )\n\n # Pre-calculate quantities as they change with time\n for i, t in enumerate(effective_time_array):\n inv_volume_time[:, i] = (1.0 / ejecta_velocity_volume) / (t**3.0)\n mass_density_time[:, i] = shell_masses * inv_volume_time[:, i]\n electron_number_density_time[:, i] = (\n electron_number * inv_volume_time[:, i]\n )\n\n energy_df_rows = np.zeros((number_of_shells, time_steps))\n\n # Calculate number of packets per shell based on the mass of isotopes\n number_of_isotopes = plasma.isotope_number_density * ejecta_volume\n total_number_isotopes = number_of_isotopes.sum(axis=1)\n\n inventories = raw_isotope_abundance.to_inventories()\n all_isotope_names = get_all_isotopes(raw_isotope_abundance)\n all_isotope_names.sort()\n\n gamma_ray_lines = get_nuclear_lines_database(path_to_decay_data)\n\n taus = {}\n parents = {}\n gamma_ray_line_array_list = []\n average_energies_list = []\n average_positron_energies_list = []\n\n for i, isotope in enumerate(all_isotope_names):\n nuclide = rd.Nuclide(isotope)\n taus[isotope] = nuclide.half_life() / np.log(2)\n child = nuclide.progeny()\n if child is not None:\n for c in child:\n if rd.Nuclide(c).half_life(\"readable\") != \"stable\":\n parents[c] = isotope\n\n energy, intensity = setup_input_energy(\n gamma_ray_lines[\n gamma_ray_lines.Isotope == isotope.replace(\"-\", \"\")\n ],\n \"g\",\n )\n gamma_ray_line_array_list.append(np.stack([energy, intensity]))\n average_energies_list.append(np.sum(energy * intensity))\n positron_energy, positron_intensity = setup_input_energy(\n gamma_ray_lines[\n gamma_ray_lines.Isotope == isotope.replace(\"-\", \"\")\n ],\n \"bp\",\n )\n average_positron_energies_list.append(\n np.sum(positron_energy * positron_intensity)\n )\n\n # Construct Numba typed dicts\n gamma_ray_line_arrays = {}\n average_energies = {}\n average_positron_energies = {}\n\n for iso, lines in zip(all_isotope_names, gamma_ray_line_array_list):\n gamma_ray_line_arrays[iso] = lines\n\n for iso, energy, positron_energy in zip(\n all_isotope_names, average_energies_list, average_positron_energies_list\n ):\n average_energies[iso] = energy\n average_positron_energies[iso] = positron_energy\n\n # urilight chooses to have 0 as the baseline for this calculation\n # but time_start may also be valid in which case decay time is time_end - time_start\n total_energy_list = []\n\n for shell, inv in enumerate(inventories):\n decayed_energy = {}\n total_decays = inv.cumulative_decays(time_end)\n for nuclide in total_decays:\n if nuclide in parents and nuclide != \"Co-56\" and nuclide != \"Co-57\":\n parent = parents[nuclide]\n if parent in parents:\n parent = parents[parent]\n decayed_energy[parent] += (\n total_decays[nuclide]\n * average_energies[nuclide]\n * shell_masses[shell]\n )\n else:\n decayed_energy[nuclide] = (\n total_decays[nuclide]\n * average_energies[nuclide]\n * shell_masses[shell]\n )\n\n total_energy_list.append(decayed_energy)\n\n total_energy = pd.DataFrame(total_energy_list)\n\n total_energy_columns = total_energy.columns.to_list()\n\n total_energy = total_energy[\n sorted(\n total_energy_columns, key=get_nuclide_atomic_number, reverse=True\n )\n ]\n\n energy_per_mass = total_energy.divide(\n (raw_isotope_abundance * shell_masses).T.to_numpy(),\n axis=0,\n )\n\n # Time averaged energy per mass for constant packet count\n average_power_per_mass = energy_per_mass / (time_end - time_start)\n\n energy_per_mass_norm = energy_per_mass.divide(\n energy_per_mass.sum(axis=1), axis=0\n ) # .cumsum(axis=1)\n\n decayed_packet_count = num_decays * number_of_isotopes.divide(\n total_number_isotopes, axis=0\n )\n\n packets_per_isotope = (\n (energy_per_mass_norm * decayed_packet_count.T.values)\n .round()\n .fillna(0)\n .astype(int)\n )\n\n print(\"Total gamma-ray energy\")\n print(total_energy.sum().sum() * u.keV.to(\"erg\"))\n\n print(\"Total positron energy\")\n print(total_energy[\"Co-56\"].sum(axis=0) * 0.0337 * u.keV.to(\"erg\"))\n\n # Taking iron group to be elements 21-30\n # Used as part of the approximations for photoabsorption and pair creation\n # Dependent on atomic data\n iron_group_fraction_per_shell = model.abundance.loc[(21):(30)].sum(axis=0)\n\n number_of_packets = packets_per_isotope.sum().sum()\n print(\"Total packets:\", number_of_packets)\n\n packet_energy = total_energy.sum().sum() / number_of_packets\n\n print(\"Energy per packet\", packet_energy)\n\n # Need to update volume for positron deposition to be time-dependent\n print(\"Initializing packets\")\n (\n packets,\n energy_df_rows,\n energy_plot_df_rows,\n energy_plot_positron_rows,\n ) = initialize_packets(\n packets_per_isotope,\n packet_energy,\n gamma_ray_line_arrays,\n positronium_fraction,\n inner_velocities,\n outer_velocities,\n inv_volume_time,\n times,\n energy_df_rows,\n effective_time_array,\n taus,\n parents,\n average_positron_energies,\n inventories,\n average_power_per_mass,\n )\n\n print(\"Total positron energy from packets\")\n print((energy_df_rows).sum().sum() * u.eV.to(\"erg\"))\n\n total_cmf_energy = 0\n total_rf_energy = 0\n\n for p in packets:\n total_cmf_energy += p.energy_cmf\n total_rf_energy += p.energy_rf\n\n print(\"Total CMF energy\")\n print(total_cmf_energy)\n\n # Below is the Artis compensation for their method of packet rejection\n \"\"\"\n energy_ratio = total_energy.sum().sum() / total_cmf_energy\n\n print(\"Energy ratio\")\n print(energy_ratio)\n \n for p in packets:\n p.energy_cmf *= energy_ratio\n p.energy_rf *= energy_ratio\n\n for e in energy_df_rows:\n e *= energy_ratio\n \n for row in energy_plot_df_rows:\n row[1] *= energy_ratio\n \"\"\"\n print(\"Total RF energy\")\n print(total_rf_energy)\n\n energy_bins = np.logspace(2, 3.8, spectrum_bins)\n energy_out = np.zeros((len(energy_bins - 1), time_steps))\n\n # Process packets\n (\n energy_df_rows,\n energy_plot_df_rows,\n energy_out,\n deposition_estimator,\n ) = gamma_packet_loop(\n packets,\n grey_opacity,\n photoabsorption_opacity,\n pair_creation_opacity,\n electron_number_density_time,\n mass_density_time,\n inv_volume_time,\n iron_group_fraction_per_shell.to_numpy(),\n inner_velocities,\n outer_velocities,\n times,\n dt_array,\n effective_time_array,\n energy_bins,\n energy_df_rows,\n energy_plot_df_rows,\n energy_out,\n )\n\n # DataFrame of energy information\n energy_plot_df = pd.DataFrame(\n data=energy_plot_df_rows,\n columns=[\n \"packet_index\",\n \"energy_input\",\n \"energy_input_r\",\n \"energy_input_time\",\n \"energy_input_type\",\n \"compton_opacity\",\n \"photoabsorption_opacity\",\n \"total_opacity\",\n ],\n )\n\n # DataFrame of positron energies\n energy_plot_positrons = pd.DataFrame(\n data=energy_plot_positron_rows,\n columns=[\n \"packet_index\",\n \"energy_input\",\n \"energy_input_r\",\n \"energy_input_time\",\n ],\n )\n\n # DataFrame of estimated deposition\n # Multiply dataframes by inv_volume_time array\n # if per unit volume is needed\n energy_estimated_deposition = (\n pd.DataFrame(data=deposition_estimator, columns=times[:-1])\n ) / dt_array\n\n # Energy is eV/s\n energy_df = pd.DataFrame(data=energy_df_rows, columns=times[:-1]) / dt_array\n\n final_energy = 0\n for p in packets:\n final_energy += p.energy_rf\n\n print(\"Final energy to test for conservation\")\n print(final_energy)\n\n escape_energy = pd.DataFrame(\n data=energy_out, columns=times[:-1], index=energy_bins\n )\n\n return (\n energy_df,\n energy_plot_df,\n escape_energy,\n decayed_packet_count,\n energy_plot_positrons,\n energy_estimated_deposition,\n )", "def force_12(alpha, r1, r2, R_particle, eps_particle, k, eps_out, eps_in,\n fiber_radius, nmin, nmax, kzimax, E0_mod, nmin_sc, nmax_sc, case):\n\n dr = 1 / k * 1e-5\n dz = dr\n dtheta = 1e-5\n\n p1 = dipole_moment(1, r1, r2, R_particle, eps_particle, k, eps_out, eps_in,\n fiber_radius, nmin, nmax, kzimax,\n E0_mod, nmin_sc, nmax_sc, case)\n p1c = p1.conjugate()\n\n # Fr\n if alpha == 0:\n r1plusdr = r1 + np.array([dr, 0, 0])\n r1minusdr = r1 - np.array([dr, 0, 0])\n Eplusr = total_loc_efield(1, r1plusdr, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n Eminusr = total_loc_efield(1, r1minusdr, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n grad_r = (Eplusr - Eminusr) / (2 * dr)\n\n return(0.5 * np.dot(p1c, grad_r).real)\n # Ftheta\n elif alpha == 1:\n r1plusdtheta = r1 + np.array([0, dtheta, 0])\n r1minusdtheta = r1 - np.array([0, dtheta, 0])\n\n Eplustheta = total_loc_efield(1, r1plusdtheta, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n Eminustheta = total_loc_efield(1, r1minusdtheta, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n grad_theta = (Eplustheta - Eminustheta) / (r1[0] * 2 * dtheta)\n\n return(0.5 * np.dot(p1c, grad_theta).real)\n # Fz\n elif alpha == 2:\n r1plusdz = r1 + np.array([0, 0, dz])\n r1minusdz = r1 - np.array([0, 0, dz])\n\n Eplusz = total_loc_efield(1, r1plusdz, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n Eminusz = total_loc_efield(1, r1minusdz, r2, k, case, nmin, nmax, kzimax,\n fiber_radius, eps_out, eps_in, E0_mod,\n nmin_sc, nmax_sc, R_particle, eps_particle)\n grad_z = (Eplusz - Eminusz) / (2 * dz)\n\n return(0.5 * np.dot(p1c, grad_z).real)\n else:\n print('alpha is out of range!')\n return(0)", "def compute_desired_velocity(self):\n mask_red = (self.image_red == 255) \\\n *(self.image_green == 0) \\\n *(self.image_blue == 0)\n ind_red = sp.where( mask_red )\n phi = sp.ones(self.image_red.shape)\n phi[ind_red] = 0\n phi = sp.ma.MaskedArray(phi, mask=self.mask)\n numpy.set_printoptions(threshold=sys.maxsize)\n self.door_distance = skfmm.distance(phi, dx=self.pixel_size)\n tmp_dist = self.door_distance.filled(9999)\n grad = sp.gradient(tmp_dist,edge_order=2)\n grad_X = -grad[1]/self.pixel_size\n grad_Y = -grad[0]/self.pixel_size\n norm = sp.sqrt(grad_X**2+grad_Y**2)\n norm = (norm>0)*norm+(norm==0)*0.001\n self.desired_velocity_X = self.vmax * (grad_X/norm)\n self.desired_velocity_Y = self.vmax * (grad_Y/norm)\n '''plt.subplot(1,2,1)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.subplot(1,2,2)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.show()'''\n return self.door_distance, self.desired_velocity_X, self.desired_velocity_Y", "def calculate_pressure_layers(P_surface = 100000,P_Cutoff = 0.00001):\n layers = np.ceil(-np.log(P_Cutoff/P_surface)) \n return [float(\"%.3g\"%x) for x in np.exp(-np.arange(layers))*P_surface]", "def calculate_muscl_fluxes(densities, pressures, velocities, gamma,\n mass_ratios, specific_heats, molar_masses, dt_over_dx):\n # Get half step densities\n limiter = UltraBeeLimiter()\n half_step_densities_L = np.zeros(len(densities) - 2)\n half_step_velocities_L = np.zeros(half_step_densities_L.shape)\n half_step_pressures_L = np.zeros(half_step_densities_L.shape)\n half_step_mass_ratios_L = np.zeros((len(densities) - 2, len(specific_heats)))\n half_step_densities_R = np.zeros(half_step_densities_L.shape)\n half_step_velocities_R = np.zeros(half_step_densities_L.shape)\n half_step_pressures_R = np.zeros(half_step_densities_L.shape)\n half_step_mass_ratios_R = np.zeros(half_step_mass_ratios_L.shape)\n for i, dens in enumerate(half_step_densities_L):\n idx = i + 1\n\n # Calculate slopes\n left_slopes = dict()\n left_slopes[\"rho\"] = (densities[idx] - densities[idx - 1]) / 2\n left_slopes[\"mom\"] = (densities[idx] * velocities[idx] - densities[idx - 1] * velocities[idx - 1]) / 2\n cell_energy = 0.5 * densities[idx] * velocities[idx] * velocities[idx] + pressures[idx] / (gamma[idx] - 1)\n behind_energy = 0.5 * densities[idx - 1] * velocities[idx - 1] * velocities[idx - 1] + pressures[idx - 1] / (gamma[idx - 1] - 1)\n left_slopes[\"energy\"] = (cell_energy - behind_energy) / 2\n\n right_slopes = dict()\n right_slopes[\"rho\"] = (densities[idx + 1] - densities[idx]) / 2\n right_slopes[\"mom\"] = (densities[idx + 1] * velocities[idx + 1] - densities[idx] * velocities[idx]) / 2\n forward_energy = 0.5 * densities[idx + 1] * velocities[idx + 1] * velocities[idx + 1] + pressures[idx + 1] / (gamma[idx + 1] - 1)\n right_slopes[\"energy\"] = (forward_energy - cell_energy) / 2\n\n average_density_slope, average_momentum_slope, average_energy_slope = limiter.calculate_limited_slopes(left_slopes, right_slopes)\n\n # Interpolate left and right densities\n left_density = densities[idx] - average_density_slope\n left_momentum = densities[idx] * velocities[idx] - average_momentum_slope\n left_energy = cell_energy - average_energy_slope\n left_mass_ratios = mass_ratios[idx, :]\n assert left_density > 0, left_density\n assert left_energy > 0, left_energy\n assert np.isclose(1.0, left_mass_ratios.sum(), 1e-14)\n\n right_density = densities[idx] + average_density_slope\n right_momentum = densities[idx] * velocities[idx] + average_momentum_slope\n right_energy = cell_energy + average_energy_slope\n right_mass_ratios = mass_ratios[idx, :]\n assert right_density > 0, right_density\n assert right_energy > 0, right_energy\n assert np.isclose(1.0, right_mass_ratios.sum(), 1e-14)\n\n # Perform half step flux\n left_velocity = left_momentum / left_density\n left_density_flux = left_momentum\n left_internal_energy = left_energy - 0.5 * left_momentum * left_velocity\n left_pressure = left_internal_energy * (gamma[idx] - 1)\n left_momentum_flux = left_momentum * left_velocity + left_pressure\n left_energy_flux = (left_energy + left_pressure) * left_velocity\n\n right_velocity = right_momentum / right_density\n right_density_flux = right_momentum\n right_internal_energy = right_energy - 0.5 * right_momentum * right_velocity\n right_pressure = right_internal_energy * (gamma[idx] - 1)\n right_momentum_flux = right_momentum * right_velocity + right_pressure\n right_energy_flux = (right_energy + right_pressure) * right_velocity\n\n half_step_density_flux = (left_density_flux - right_density_flux) * dt_over_dx * 0.5\n half_step_momentum_flux = (left_momentum_flux - right_momentum_flux) * dt_over_dx * 0.5\n half_step_energy_flux = (left_energy_flux - right_energy_flux) * dt_over_dx * 0.5\n\n state = ThermodynamicState1D(left_pressure, left_density, left_velocity, gamma[idx], left_mass_ratios)\n state.update_states(half_step_density_flux,\n half_step_momentum_flux,\n half_step_energy_flux,\n specific_heats, molar_masses)\n half_step_densities_L[i] = state.rho\n half_step_velocities_L[i] = state.u\n half_step_pressures_L[i] = state.p\n half_step_mass_ratios_L[i, :] = state.mass_ratios\n\n state = ThermodynamicState1D(right_pressure, right_density, right_velocity, gamma[idx], right_mass_ratios)\n state.update_states(half_step_density_flux,\n half_step_momentum_flux,\n half_step_energy_flux,\n specific_heats, molar_masses)\n half_step_densities_R[i] = state.rho\n half_step_velocities_R[i] = state.u\n half_step_pressures_R[i] = state.p\n half_step_mass_ratios_R[i, :] = state.mass_ratios\n\n # Calculate final fluxes\n density_fluxes = np.zeros(len(half_step_densities_R) - 1)\n momentum_fluxes = np.zeros(len(half_step_densities_R) - 1)\n total_energy_fluxes = np.zeros(len(half_step_densities_R) - 1)\n mass_ratio_fluxes = np.zeros((len(half_step_densities_R) - 1, mass_ratios.shape[1]))\n\n for i, dens_flux in enumerate(density_fluxes):\n solver = IterativeRiemannSolver()\n\n # Generate left and right states from cell averaged values\n left_state = ThermodynamicState1D(half_step_pressures_R[i],\n half_step_densities_R[i],\n half_step_velocities_R[i],\n gamma[i],\n half_step_mass_ratios_L[i, :])\n right_state = ThermodynamicState1D(half_step_pressures_L[i + 1],\n half_step_densities_L[i + 1],\n half_step_velocities_L[i + 1],\n gamma[i + 1],\n half_step_mass_ratios_R[i + 1, :])\n\n # Solve Riemann problem for star states\n p_star, u_star = solver.get_star_states(left_state, right_state)\n\n # Calculate fluxes using solver sample function\n p_flux, u_flux, rho_flux, is_left = solver.sample(0.0, left_state, right_state, p_star, u_star)\n\n # Store fluxes in array\n mass_ratio_fluxes[i, :] = left_state.mass_ratios if is_left else right_state.mass_ratios\n flux_gamma = left_state.gamma if is_left else right_state.gamma\n density_fluxes[i] = rho_flux * u_flux\n momentum_fluxes[i] = rho_flux * u_flux * u_flux + p_flux\n e_tot = p_flux / (flux_gamma - 1) + 0.5 * rho_flux * u_flux * u_flux\n total_energy_fluxes[i] = (p_flux + e_tot) * u_flux\n\n return density_fluxes, momentum_fluxes, total_energy_fluxes, mass_ratio_fluxes", "def compare_averages_shell_pspec_dft():\n\n select_radius = 5. #degrees\n\n Nside=256\n Npix = 12 * Nside**2\n Omega = 4*np.pi/float(Npix)\n\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420/freqs - 1.\n\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n\n dV = comoving_voxel_volume(Z[Nfreq/2], dnu, Omega)\n variances = []\n means = []\n pks = []\n\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n\n steps = range(10,110,10)\n vmin,vmax = min(steps),max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n\n for n in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, select_radius, freqs=freqs, Nkbins=Nkbins, N_sections=n, cosmo=True, method='dft', error=False)\n variances.append(np.var(pk[0:Nkbins-5]))\n means.append(np.mean(pk[0:Nkbins-5]))\n pks.append(pk)\n ax0.plot(kbins, pk, label=str(n), color=colormap(normalize(n)))\n\n ax0.axhline(y=dV*sig**2, color='k', lw=2.0)\n# ax0.legend()\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable,label=r'Number of snapshots', ax=ax0)\n ax0.set_ylabel(r\"P(k) [mK$^2$ Mpc$^{3}]$\")\n ax0.set_xlabel(r\"k [Mpc$^{-1}]$\")\n ax1.plot(steps, np.array(variances), label=\"Variance\")\n ax1.set_ylabel(r\"Variance(P(k)) [mK$^4$ Mpc$^{6}]$\")\n ax1.set_xlabel(u\"Number of 5° snapshots\")\n ax3.plot(steps, means, label=\"Mean\")\n ax3.set_ylabel(r\"Mean(P(k)) [mK$^2$ Mpc$^{3}]$\")\n ax3.set_xlabel(u\"Number of 5° snapshots\")\n ax1.legend()\n ax3.legend()\n im = ax2.imshow(np.array(pks)[:,0:Nkbins-5], aspect='auto')#, norm=mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n print('Fractional deviation: ', np.mean(np.abs(pk - dV*sig**2)))\n pl.show()", "def get_variables(self, z0, u_inf):\n # Get the ambient data from the CTD profile\n Ta, Sa, P = self.profile.get_values(z0, ['temperature', 'salinity',\n 'pressure'])\n rho = seawater.density(Ta, Sa, P)\n \n # Compute the properties of each dispersed-phase particle\n us = np.zeros(len(self.particles))\n rho_p = np.zeros(len(self.particles))\n m_p = np.zeros(len(self.particles))\n B_p = np.zeros(len(self.particles))\n for i in range(len(self.particles)):\n m0 = self.particles[i].m0\n T0 = self.particles[i].T0\n m_p[i] = np.sum(m0) * self.particles[i].nb0\n if m_p[i] > 0.:\n # Particles exist, get properties. Make sure the algorithm \n # uses the dirty bubble properties since this is supposed\n # to be the rise velocity averaged over the whole plume.\n us[i], rho_p[i]= self.particles[i].properties(m0, T0, P, Sa, \n Ta, np.inf)[0:2]\n B_p[i] = (rho - rho_p[i]) / rho * 9.81 * (m_p[i] / rho_p[i])\n else:\n # Particles dissolved, set to ambient conditions\n us[i] = 0.\n rho_p[i] = rho\n B_p[i] = 0.\n \n # Select the correct slip velocity\n u_slip = us[0]\n for i in range(len(self.particles) - 1):\n if B_p[i+1] > B_p[i]:\n u_slip = us[i+1]\n \n # Compute the total buoyancy flux\n B = np.sum(B_p)\n \n # Get the ambient buoyancy frequency\n N = self.profile.buoyancy_frequency(z0)\n \n # Return the governing parameters\n return (B, N, u_slip, u_inf)", "def get_effective_mass():\n\n H_BAR = 6.582119514e-16 # eV*s\n M_0 = 9.10938356e-31 # kg\n N_KPTS = 6 # Number of k-points included in the parabola.\n\n spin_up = Spin(1)\n\n band_structure = Vasprun('vasprun.xml').get_band_structure()\n\n # Locations of CBM and VBM in band_structure.bands\n cbm_band_index = band_structure.get_cbm()['band_index'][spin_up][0]\n cbm_kpoint_index = band_structure.get_cbm()['kpoint_index'][0]\n\n vbm_band_index = band_structure.get_vbm()['band_index'][spin_up][0]\n vbm_kpoint_index = band_structure.get_vbm()['kpoint_index'][0]\n\n k = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n E = {'electron': {'left': [], 'right': []},\n 'hole': {'left': [], 'right': []}}\n\n e_ref_coords = band_structure.kpoints[cbm_kpoint_index]._ccoords\n h_ref_coords = band_structure.kpoints[vbm_kpoint_index]._ccoords\n\n for n in range(-N_KPTS, 1):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['left'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['left'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['left'].append(e_energy)\n E['hole']['left'].append(h_energy)\n\n for n in range(1, 1 + N_KPTS):\n e_coords = band_structure.kpoints[cbm_kpoint_index + n]._ccoords\n h_coords = band_structure.kpoints[vbm_kpoint_index + n]._ccoords\n\n k['electron']['right'].append(\n ((e_coords[0] - e_ref_coords[0])**2 +\n (e_coords[1] - e_ref_coords[1])**2 +\n (e_coords[2] - e_ref_coords[2])**2)**0.5\n )\n k['hole']['right'].append(\n ((h_coords[0] - h_ref_coords[0])**2 +\n (h_coords[1] - h_ref_coords[1])**2 +\n (h_coords[2] - h_ref_coords[2])**2)**0.5\n )\n\n e_energy = band_structure.bands[\n spin_up][cbm_band_index][cbm_kpoint_index + n]\n h_energy = band_structure.bands[\n spin_up][vbm_band_index][vbm_kpoint_index + n]\n\n E['electron']['right'].append(e_energy)\n E['hole']['right'].append(h_energy)\n\n # 2nd order fits\n e_l_fit = np.poly1d(\n np.polyfit(k['electron']['left'], E['electron']['left'], 2))\n e_r_fit = np.poly1d(\n np.polyfit(k['electron']['right'], E['electron']['right'], 2))\n h_l_fit = np.poly1d(\n np.polyfit(k['hole']['left'], E['hole']['left'], 2))\n h_r_fit = np.poly1d(\n np.polyfit(k['hole']['right'], E['hole']['right'], 2))\n\n # Curvatures\n e_l_curvature = e_l_fit.deriv().deriv()[0]\n e_r_curvature = e_r_fit.deriv().deriv()[0]\n h_l_curvature = h_l_fit.deriv().deriv()[0]\n h_r_curvature = h_r_fit.deriv().deriv()[0]\n\n # Unit conversion\n e_m_eff_l = 10 * ((H_BAR ** 2) / e_l_curvature) / M_0\n e_m_eff_r = 10 * ((H_BAR ** 2) / e_r_curvature) / M_0\n h_m_eff_l = -10 * ((H_BAR ** 2) / h_l_curvature) / M_0\n h_m_eff_r = -10 * ((H_BAR ** 2) / h_r_curvature) / M_0\n\n return {'electron': {'left': e_m_eff_l, 'right': e_m_eff_r},\n 'hole': {'left': h_m_eff_l, 'right': h_m_eff_r}}", "def evolve(self):\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n gradp_x = self.cc_data.get_var(\"gradp_x\")\n gradp_y = self.cc_data.get_var(\"gradp_y\")\n\n # note: the base state quantities do not have valid ghost cells\n beta0 = self.base[\"beta0\"]\n beta0_edges = self.base[\"beta0-edges\"]\n\n rho0 = self.base[\"rho0\"]\n\n phi = self.cc_data.get_var(\"phi\")\n\n myg = self.cc_data.grid\n\n # ---------------------------------------------------------------------\n # create the limited slopes of rho, u and v (in both directions)\n # ---------------------------------------------------------------------\n limiter = self.rp.get_param(\"lm-atmosphere.limiter\")\n\n ldelta_rx = reconstruction.limit(rho, myg, 1, limiter)\n ldelta_ux = reconstruction.limit(u, myg, 1, limiter)\n ldelta_vx = reconstruction.limit(v, myg, 1, limiter)\n\n ldelta_ry = reconstruction.limit(rho, myg, 2, limiter)\n ldelta_uy = reconstruction.limit(u, myg, 2, limiter)\n ldelta_vy = reconstruction.limit(v, myg, 2, limiter)\n\n # ---------------------------------------------------------------------\n # get the advective velocities\n # ---------------------------------------------------------------------\n\n \"\"\"\n the advective velocities are the normal velocity through each cell\n interface, and are defined on the cell edges, in a MAC type\n staggered form\n\n n+1/2\n v\n i,j+1/2\n +------+------+\n | |\n n+1/2 | | n+1/2\n u + U + u\n i-1/2,j | i,j | i+1/2,j\n | |\n +------+------+\n n+1/2\n v\n i,j-1/2\n\n \"\"\"\n\n # this returns u on x-interfaces and v on y-interfaces. These\n # constitute the MAC grid\n if self.verbose > 0:\n print(\" making MAC velocities\")\n\n # create the coefficient to the grad (pi/beta) term\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n # create the source term\n source = self.aux_data.get_var(\"source_y\")\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n rhoprime = self.make_prime(rho, rho0)\n source.v()[:, :] = rhoprime.v()*g/rho.v()\n self.aux_data.fill_BC(\"source_y\")\n\n _um, _vm = lm_interface.mac_vels(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source)\n\n u_MAC = ai.ArrayIndexer(d=_um, grid=myg)\n v_MAC = ai.ArrayIndexer(d=_vm, grid=myg)\n\n # ---------------------------------------------------------------------\n # do a MAC projection to make the advective velocities divergence\n # free\n # ---------------------------------------------------------------------\n\n # we will solve D (beta_0^2/rho) G phi = D (beta_0 U^MAC), where\n # phi is cell centered, and U^MAC is the MAC-type staggered\n # grid of the advective velocities.\n\n if self.verbose > 0:\n print(\" MAC projection\")\n\n # create the coefficient array: beta0**2/rho\n # MZ!!!! probably don't need the buf here\n coeff.v(buf=1)[:, :] = 1.0/rho.v(buf=1)\n coeff.v(buf=1)[:, :] = coeff.v(buf=1)*beta0.v2d(buf=1)**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi-MAC\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi-MAC\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi-MAC\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi-MAC\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # MAC velocities are edge-centered. div{beta_0 U} is cell-centered.\n div_beta_U.v()[:, :] = \\\n beta0.v2d()*(u_MAC.ip(1) - u_MAC.v())/myg.dx + \\\n (beta0_edges.v2dp(1)*v_MAC.jp(1) -\n beta0_edges.v2d()*v_MAC.v())/myg.dy\n\n # solve the Poisson problem\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-12)\n\n # update the normal velocities with the pressure gradient -- these\n # constitute our advective velocities. Note that what we actually\n # solved for here is phi/beta_0\n phi_MAC = self.cc_data.get_var(\"phi-MAC\")\n phi_MAC[:, :] = mg.get_solution(grid=myg)\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n coeff_x = myg.scratch_array()\n b = (3, 1, 0, 0) # this seems more than we need\n coeff_x.v(buf=b)[:, :] = 0.5*(coeff.ip(-1, buf=b) + coeff.v(buf=b))\n\n coeff_y = myg.scratch_array()\n b = (0, 0, 3, 1)\n coeff_y.v(buf=b)[:, :] = 0.5*(coeff.jp(-1, buf=b) + coeff.v(buf=b))\n\n # we need the MAC velocities on all edges of the computational domain\n # here we do U = U - (beta_0/rho) grad (phi/beta_0)\n b = (0, 1, 0, 0)\n u_MAC.v(buf=b)[:, :] -= \\\n coeff_x.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.ip(-1, buf=b))/myg.dx\n\n b = (0, 0, 0, 1)\n v_MAC.v(buf=b)[:, :] -= \\\n coeff_y.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.jp(-1, buf=b))/myg.dy\n\n # ---------------------------------------------------------------------\n # predict rho to the edges and do its conservative update\n # ---------------------------------------------------------------------\n _rx, _ry = lm_interface.rho_states(myg.ng, myg.dx, myg.dy, self.dt,\n rho, u_MAC, v_MAC,\n ldelta_rx, ldelta_ry)\n\n rho_xint = ai.ArrayIndexer(d=_rx, grid=myg)\n rho_yint = ai.ArrayIndexer(d=_ry, grid=myg)\n\n rho_old = rho.copy()\n\n rho.v()[:, :] -= self.dt*(\n # (rho u)_x\n (rho_xint.ip(1)*u_MAC.ip(1) - rho_xint.v()*u_MAC.v())/myg.dx +\n # (rho v)_y\n (rho_yint.jp(1)*v_MAC.jp(1) - rho_yint.v()*v_MAC.v())/myg.dy)\n\n self.cc_data.fill_BC(\"density\")\n\n # update eint as a diagnostic\n eint = self.cc_data.get_var(\"eint\")\n gamma = self.rp.get_param(\"eos.gamma\")\n eint.v()[:, :] = self.base[\"p0\"].v2d()/(gamma - 1.0)/rho.v()\n\n # ---------------------------------------------------------------------\n # recompute the interface states, using the advective velocity\n # from above\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" making u, v edge states\")\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 2.0/(rho.v() + rho_old.v())\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n _ux, _vx, _uy, _vy = \\\n lm_interface.states(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source,\n u_MAC, v_MAC)\n\n u_xint = ai.ArrayIndexer(d=_ux, grid=myg)\n v_xint = ai.ArrayIndexer(d=_vx, grid=myg)\n u_yint = ai.ArrayIndexer(d=_uy, grid=myg)\n v_yint = ai.ArrayIndexer(d=_vy, grid=myg)\n\n # ---------------------------------------------------------------------\n # update U to get the provisional velocity field\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" doing provisional update of u, v\")\n\n # compute (U.grad)U\n\n # we want u_MAC U_x + v_MAC U_y\n advect_x = myg.scratch_array()\n advect_y = myg.scratch_array()\n\n advect_x.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(u_xint.ip(1) - u_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(u_yint.jp(1) - u_yint.v())/myg.dy\n\n advect_y.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(v_xint.ip(1) - v_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(v_yint.jp(1) - v_yint.v())/myg.dy\n\n proj_type = self.rp.get_param(\"lm-atmosphere.proj_type\")\n\n if proj_type == 1:\n u.v()[:, :] -= (self.dt*advect_x.v() + self.dt*gradp_x.v())\n v.v()[:, :] -= (self.dt*advect_y.v() + self.dt*gradp_y.v())\n\n elif proj_type == 2:\n u.v()[:, :] -= self.dt*advect_x.v()\n v.v()[:, :] -= self.dt*advect_y.v()\n\n # add the gravitational source\n rho_half = 0.5*(rho + rho_old)\n rhoprime = self.make_prime(rho_half, rho0)\n source[:, :] = rhoprime*g/rho_half\n self.aux_data.fill_BC(\"source_y\")\n\n v[:, :] += self.dt*source\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n if self.verbose > 0:\n print(\"min/max rho = {}, {}\".format(self.cc_data.min(\"density\"), self.cc_data.max(\"density\")))\n print(\"min/max u = {}, {}\".format(self.cc_data.min(\"x-velocity\"), self.cc_data.max(\"x-velocity\")))\n print(\"min/max v = {}, {}\".format(self.cc_data.min(\"y-velocity\"), self.cc_data.max(\"y-velocity\")))\n\n # ---------------------------------------------------------------------\n # project the final velocity\n # ---------------------------------------------------------------------\n\n # now we solve L phi = D (U* /dt)\n if self.verbose > 0:\n print(\" final projection\")\n\n # create the coefficient array: beta0**2/rho\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n mg.init_RHS(div_beta_U/self.dt)\n\n # use the old phi as our initial guess\n phiGuess = mg.soln_grid.scratch_array()\n phiGuess.v(buf=1)[:, :] = phi.v(buf=1)\n mg.init_solution(phiGuess)\n\n # solve\n mg.solve(rtol=1.e-12)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of p and update the velocities\n # this differs depending on what we projected.\n gradphi_x, gradphi_y = mg.get_solution_gradient(grid=myg)\n\n # U = U - (beta_0/rho) grad (phi/beta_0)\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= self.dt*coeff.v()*gradphi_x.v()\n v.v()[:, :] -= self.dt*coeff.v()*gradphi_y.v()\n\n # store gradp for the next step\n\n if proj_type == 1:\n gradp_x.v()[:, :] += gradphi_x.v()\n gradp_y.v()[:, :] += gradphi_y.v()\n\n elif proj_type == 2:\n gradp_x.v()[:, :] = gradphi_x.v()\n gradp_y.v()[:, :] = gradphi_y.v()\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n self.cc_data.fill_BC(\"gradp_x\")\n self.cc_data.fill_BC(\"gradp_y\")\n\n # increment the time\n if not self.in_preevolve:\n self.cc_data.t += self.dt\n self.n += 1", "def wfc3Dispersion(xc, yc, subarray=256):\n coord0 = (1014 - subarray) // 2\n xc = xc + coord0\n yc = yc + coord0\n DLDP0 = [8949.40742544, 0.08044032819916265]\n DLDP1 = [44.97227893276267,\n 0.0004927891511929662,\n 0.0035782416625653765,\n -9.175233345083485e-7,\n 2.2355060371418054e-7, -9.258690000316504e-7]\n # calculate field dependent dispersion coefficient\n p0 = DLDP0[0] + DLDP0[1] * xc\n p1 = DLDP1[0] + DLDP1[1] * xc + DLDP1[2] * yc + \\\n DLDP1[3] * xc**2 + DLDP1[4] * xc * yc + DLDP1[5] * yc**2\n dx = np.arange(1014) - xc\n wavelength = (p0 + dx * p1)\n if subarray < 1014:\n i0 = (1014 - subarray) // 2\n wavelength = wavelength[i0: i0 + subarray]\n return wavelength", "def z_offline(ctx, w, k=0):\n s = ctx.mpf('0.5')+ctx.j*w\n s1 = s\n s2 = ctx.conj(1-s1)\n wpinitial = ctx.prec\n ctx.prec = 35\n # X see II Section 3.21 (109) and (110)\n # M1 see II Section 3.21 (111) and (112)\n if (ctx._re(s1) >= 0):\n M1 = 2*ctx.sqrt(ctx._im(s1)/(2 * ctx.pi))\n X = ctx.sqrt(abs(s1))\n else:\n X = (2*ctx.pi)**(ctx._re(s1)-1) * abs(1-s1)**(0.5-ctx._re(s1))\n M1 = 4 * ctx._im(s1)*X\n # M2 see II Section 3.21 (111) and (112)\n if (ctx._re(s2) >= 0):\n M2 = 2*ctx.sqrt(ctx._im(s2)/(2 * ctx.pi))\n else:\n M2 = 4 * ctx._im(s2)*(2*ctx.pi)**(ctx._re(s2)-1)*abs(1-s2)**(0.5-ctx._re(s2))\n # T see II Section 3.21 Prop. 27\n T = 2*abs(ctx.siegeltheta(w))\n # defining some precisions\n # see II Section 3.22 (115), (116), (117)\n aux1 = ctx.sqrt(X)\n aux2 = aux1*(M1+M2)\n aux3 = 3 +wpinitial\n wpbasic = max(6, 3+ctx.mag(T), ctx.mag(aux2*(26+2*T))+aux3)\n wptheta = max(4,ctx.mag(2.04*aux2)+aux3)\n wpR = ctx.mag(4*aux1)+aux3\n # now the computations\n ctx.prec = wptheta\n theta = ctx.siegeltheta(w)\n ctx.prec = wpR\n xrz, yrz = Rzeta_simul(ctx,s,k)\n pta = 0.25 + 0.5j*w\n ptb = 0.25 - 0.5j*w\n if k > 0: ps1 = 0.25*(ctx.psi(0,pta)+ctx.psi(0,ptb)) - ctx.ln(ctx.pi)/2\n if k > 1: ps2 = (1j/8)*(ctx.psi(1,pta)-ctx.psi(1,ptb))\n if k > 2: ps3 = (-1./16)*(ctx.psi(2,pta)+ctx.psi(2,ptb))\n if k > 3: ps4 = (-1j/32)*(ctx.psi(3,pta)-ctx.psi(3,ptb))\n ctx.prec = wpbasic\n exptheta = ctx.expj(theta)\n if k == 0:\n zv = exptheta*xrz[0]+yrz[0]/exptheta\n j = ctx.j\n if k == 1:\n zv = j*exptheta*(xrz[1]+xrz[0]*ps1)-j*(yrz[1]+yrz[0]*ps1)/exptheta\n if k == 2:\n zv = exptheta*(-2*xrz[1]*ps1-xrz[0]*ps1**2-xrz[2]+j*xrz[0]*ps2)\n zv =zv + (-2*yrz[1]*ps1-yrz[0]*ps1**2-yrz[2]-j*yrz[0]*ps2)/exptheta\n if k == 3:\n zv1 = -3*xrz[1]*ps1**2-xrz[0]*ps1**3-3*xrz[2]*ps1+j*3*xrz[1]*ps2\n zv1 = (zv1+ 3j*xrz[0]*ps1*ps2-xrz[3]+xrz[0]*ps3)*j*exptheta\n zv2 = 3*yrz[1]*ps1**2+yrz[0]*ps1**3+3*yrz[2]*ps1+j*3*yrz[1]*ps2\n zv2 = j*(zv2 + 3j*yrz[0]*ps1*ps2+ yrz[3]-yrz[0]*ps3)/exptheta\n zv = zv1+zv2\n if k == 4:\n zv1 = 4*xrz[1]*ps1**3+xrz[0]*ps1**4 + 6*xrz[2]*ps1**2\n zv1 = zv1-12j*xrz[1]*ps1*ps2-6j*xrz[0]*ps1**2*ps2-6j*xrz[2]*ps2\n zv1 = zv1-3*xrz[0]*ps2*ps2+4*xrz[3]*ps1-4*xrz[1]*ps3-4*xrz[0]*ps1*ps3\n zv1 = zv1+xrz[4]+j*xrz[0]*ps4\n zv2 = 4*yrz[1]*ps1**3+yrz[0]*ps1**4 + 6*yrz[2]*ps1**2\n zv2 = zv2+12j*yrz[1]*ps1*ps2+6j*yrz[0]*ps1**2*ps2+6j*yrz[2]*ps2\n zv2 = zv2-3*yrz[0]*ps2*ps2+4*yrz[3]*ps1-4*yrz[1]*ps3-4*yrz[0]*ps1*ps3\n zv2 = zv2+yrz[4]-j*yrz[0]*ps4\n zv = exptheta*zv1+zv2/exptheta\n ctx.prec = wpinitial\n return zv", "def _surface_runoff(self, SWi, saturation, field_capacity, whc, rf_coeff, geo_dict=None):\n\n saturation[saturation < 0] = np.nan\n field_capacity[field_capacity < 0] = np.nan\n whc[whc < 0] = np.nan\n\n # total runoff based on water left in soil after SAT-FC\n sat_fc = saturation - field_capacity\n Rf1 = SWi - whc\n # if runoff is < 0, make it 0\n Rf = np.zeros(SWi.shape)\n rf_boolean = (Rf1 >= 0)\n Rf[rf_boolean] = Rf1[rf_boolean]\n\n # Surface runoff\n SRf = np.zeros(SWi.shape)\n # SRf = if rf <= sat_fc, make it (rf * rf_coeff)(35% of the runoff value), else (rf - sat_fc) + (rf_coeff * sat_fc)\n SRf_boolean = (Rf <= sat_fc)\n SRf[SRf_boolean] = Rf[SRf_boolean] * rf_coeff\n SRf[~SRf_boolean] = (Rf[~SRf_boolean] - sat_fc[~SRf_boolean]) + rf_coeff * sat_fc[~SRf_boolean]\n # Deep Drainage\n # DDrain occurs if SWi > WHC, amount of DDrain is SAT <> WHC with a maximum DDrain of SAT - WHC\n DDrain = Rf - SRf\n\n return DDrain, SRf", "def solid_surface_density_S2014_given_observed_catalog(sss_per_sys, max_core_mass=10.):\n Mstar_obs = np.repeat(sss_per_sys['Mstar_obs'][:,None], np.shape(sss_per_sys['P_obs'])[1], axis=1)[sss_per_sys['P_obs'] > 0] # flattened array of stellar masses repeated for each planet\n a_obs_per_sys = gen.a_from_P(sss_per_sys['P_obs'], sss_per_sys['Mstar_obs'][:,None])\n a_obs = a_obs_per_sys[sss_per_sys['P_obs'] > 0]\n radii_obs = sss_per_sys['radii_obs'][sss_per_sys['P_obs'] > 0]\n core_mass_obs = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(radii_obs)\n core_mass_obs[core_mass_obs > max_core_mass] = max_core_mass\n sigma_obs = solid_surface_density_S2014(core_mass_obs, radii_obs, a_obs, Mstar=Mstar_obs)\n return sigma_obs, core_mass_obs, a_obs", "def contrast_curve_main(data, fwhm, instrument, position=None):\n # assign plate scale\n plate_scale_dict = {\"PHARO\": 0.025, \"ShARCS\": 0.0333}\n\n plate_scale = plate_scale_dict[instrument]\n\n #set radius_size so that radius is no larger than 1\"\n radius_size = np.min([1./plate_scale, fwhm])\n\n#DO NOT TAKE ABSOLUTE VALUE!\n contrast_result = contrast_curve_core(\n data, plate_scale, fwhm=fwhm, radius_size=radius_size, center=position\n )\n separation = contrast_result[0]\n means = contrast_result[1]\n stds = contrast_result[2]\n\n center_flux = run_ap_phot(data, fwhm, position=position)\n\n # intiialize the \"fake im fluxes\" with the central aperture flux.\n all_seps = [0]\n fake_im_fluxes = [center_flux[0]]\n fake_im_stds = [center_flux[1]]\n\n fake_ims = []\n\n for i, (all_mean, all_std) in enumerate(zip(means, stds)):\n # initialize fake fluxes for a given annulus\n fake_im_fluxes_an = []\n n_annuli = 12\n for j in range(n_annuli):\n mean = all_mean[j]\n std = all_std[j]\n x, y = np.meshgrid(np.arange(-1000, 1000), np.arange(-1000, 1000)) #was 100x100; CDD made larger for poor FWHMs\n dst = np.sqrt(x * x + y * y)\n\n # Initializing sigma and muu: size of fake injected source\n sigma = fwhm\n muu = 0.0\n\n bg_std = std\n\n noise_image = make_noise_image(\n (2000, 2000), distribution=\"gaussian\", mean=mean, stddev=bg_std\n ) #Was 200x200, but that's too small for some images because the sky annulus falls outside the fake image for high FWHM.\n # Calculating Gaussian array. tuned to a total STD=5\n fake = (\n 7 * std * np.exp(-((dst - muu) ** 2 / (2.0 * sigma**2)))\n + noise_image\n + 3\n )\n\n flux, err = run_ap_phot(fake, fwhm)\n\n # rescale to a full std of 5\n fixscale = (flux / err) / 5\n\n flux = flux / fixscale\n fake_im_fluxes_an += [flux]\n fake_im_fluxes += [np.nanmedian(fake_im_fluxes_an)]\n fake_im_stds += [np.nanstd(fake_im_fluxes_an)]\n all_seps += [separation[i]]\n\n fake_im_fluxes = np.array(fake_im_fluxes)\n\n err = 2.5 * np.log10(1.0 + (fake_im_stds / fake_im_fluxes))\n\n#DELETE THIS\n# indices = np.arange(len(fake_im_fluxes))\n# separation = fwhm * plate_scale * indices\n\n contrast = -2.5 * np.log10(fake_im_fluxes / center_flux[0])\n\n #Save contrast curve as a pandas DataFrame\n df = pd.DataFrame({'arcsec': all_seps, 'dmag': contrast, 'dmrms': err})\n\n return df #separation, contrast, err", "def calc_enginprops(self):\n # Let's assemble the ABD matrix even if it is not required\n ABD = np.bmat([[self.A, self.B], [self.B, self.D]])\n ABD_inv = np.linalg.inv(ABD)\n # We would use the whole matrix. This gives results similar to elamX and considers poisson effects\n A_inv = ABD_inv[0:3, 0:3]\n self.Ex = 1 / (self.total_t * A_inv[0, 0]) # It is 2 * t because we need total thickness\n self.Ey = 1 / (self.total_t * A_inv[1, 1])\n self.Gxy = 1 / (self.total_t * A_inv[2, 2])\n self.poissonxy = - A_inv[0,1] / A_inv[0, 0]\n # Flexural stiffness properties\n self.zx = 0.0\n self.zy = 0.0\n zx_dem = 0.0\n zy_dem = 0.0\n self.EIx = 0.0\n self.EIy = 0.0\n z = 0.0\n # Calculate neutral axis in direction x and y\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n z += t / 2.0\n self.zx += Ex * t * z\n zx_dem += Ex * t\n self.zy += Ey * t * z\n zy_dem += Ey * t\n z += t / 2.0\n self.zx = self.zx / zx_dem\n self.zy = self.zy / zy_dem\n # Calculate EI in direction x and y\n z = 0.0\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n Gxy = 1 / S_bar[2, 2]\n z += t / 2.0\n self.EIx += Ex * (t**3 / 12 + t * (z - self.zx)**2)\n self.EIy += Ey * (t**3 / 12 + t * (z - self.zy)**2)\n self.GA += Gxy * t\n z += t / 2.0\n return self.Ex, self.Ey, self.Gxy, self.poissonxy", "def create_flux_vector_pf_gr(self):\n t0 = time.time()\n\n verif_local = 1\n lim4 = 1e-4\n soma = 0\n soma2 = 0\n soma3 = 0\n store_flux_pf = {}\n\n for volume in self.all_fine_vols:\n #1\n flux = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n pvol = self.mb.tag_get_data(self.pf_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #2\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n padj = self.mb.tag_get_data(self.pf_tag, adj, flat=True)[0]\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n altura = centroid_adj[2]\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n z = uni[2]\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)\n keq = keq*(np.dot(self.A, uni))/(self.mi)\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n\n q = (grad_p)*keq - grad_z*keq*self.gama\n flux[tuple(unit)] = q\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n #1\n store_flux_pf[volume] = flux\n flt = sum(flux.values())\n # print(gid_vol)\n # print(flt)\n # print(store_flux_pf)\n # print('\\n')\n # import pdb; pdb.set_trace()\n self.mb.tag_set_data(self.flux_fine_pf_tag, volume, flt)\n soma += flt\n if abs(flt) > lim4 and volume not in self.wells:\n verif_local = 0\n print('nao esta dando conservativo na malha fina')\n print(gid_vol)\n print(flt)\n import pdb; pdb.set_trace()\n soma_prod = []\n soma_inj = []\n with open('fluxo_malha_fina_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]\n values = store_flux_pf[volume].values()\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n\n # print('gid:{0}'.format(gid))\n # print('valor:{0}'.format(sum(values)))\n if volume in self.wells_inj:\n soma_inj.append(sum(values))\n else:\n soma_prod.append(sum(values))\n # print('\\n')\n soma2 += sum(values)\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(sum(soma_inj)))\n arq.write('soma_prod:{0}'.format(sum(soma_prod)))\n\n print('soma_inj:{0}'.format(sum(soma_inj)))\n print('soma_prod:{0}'.format(sum(soma_prod)))\n\n print('soma2 : {0}'.format(soma2))\n if abs(soma2) > lim4:\n print('nao esta dando conservativo globalmente')\n import pdb; pdb.set_trace()\n\n # print('saiu de def create_flux_vector_pf')\n print('\\n')\n\n tf = time.time()\n # import pdb; pdb.set_trace()\n return store_flux_pf", "def sh( values ):\n # ECMWF normalizes the spherical harmonic coeffs differently than NCEP.\n # (m=0,n=0 is global mean, instead of sqrt(2)/2 times global mean)\n fld = 2.*values/np.sqrt(2.)\n \n #------SPLITTING IT UP IN AN IMAGARY AND REAL PART--------\n fldr = fld[ 0::2 ] #annenhver verdi fra 0\n fldi = fld[ 1::2 ] #annenhver verdi fra 1\n fldn = np.zeros( fldr.shape, 'F' ) #blir halvparten så stor som orginale fld\n fldn.real = fldr #legges da til i fldn vectoren\n fldn.imag = fldi\n #----------------------------------------------------------\n \n nlons = 360 #Have a feeling it probably is number of values like grid val\n nlats = 1280 #web sais it shourld be 180.. wellwell, seems to work\n s = spharm.Spharmt( nlons, nlats ) \n \n data = s.spectogrd( fldn ) #Hvis nlats = 180, så feiler denne delen pga hvordan formelen fungerer..\n \n lons = ( 360./nlons ) * np.arange( nlons )\n lats = 90.-( 180./( nlats - 1 ) ) * np.arange( nlats )\n lons, lats = np.meshgrid( lons, lats )\n \n #stack grids side-by-side (in longitiudinal direction), so\n # any range of longitudes (between -360 and 360) may be plotted on a world map.\n lons = np.concatenate(( lons - 360, lons ), 1 )\n lats = np.concatenate(( lats, lats ), 1 )\n data = np.concatenate(( data, data ), 1 )\n \n return lats, lons, data", "def fglidingHST_PL(xy, v, NL, KL, BM, Mm, params):\n I1 = params['I1']\n I3 = params['I3']\n l = params['l']\n g = params['g']\n k = params['k']\n\n try:\n NP, NN = np.shape(NL)\n except:\n '''There is only one particle'''\n NP = 1\n NN = 0\n\n X = xy[:, 0].ravel() # .reshape(NP,1);\n Y = xy[:, 1].ravel() # .reshape(NP,1);\n dX = xy[:, 2].ravel() # .reshape(NP,1);\n dY = xy[:, 3].ravel() # .reshape(NP,1);\n vX = v[:, 0].ravel() # .reshape(NP,1);\n vX = v[:, 1].ravel() # .reshape(NP,1);\n vdX = v[:, 2].ravel() # .reshape(NP,1);\n vdY = v[:, 3].ravel() # .reshape(NP,1);\n\n phi = np.arctan2(dY, dX)\n # print 'xy = ', xy\n # print 'v = ', v\n\n # Note: w3 = vpsi + vphi*np.cos(theta)\n w3 = params['w3']\n\n # SPRING FORCE\n vecx = np.array([[KL[i, j] * (xy[i, 0] - xy[NL[i, j], 0]) for j in range(NN)] for i in range(NP)])\n vecy = np.array([[KL[i, j] * (xy[i, 1] - xy[NL[i, j], 1]) for j in range(NN)] for i in range(NP)])\n mag = np.sqrt(vecx ** 2 + vecy ** 2)\n # KLnoz = KL.copy() #no zeros\n # KLnoz[KLnoz ==0] = 1. #same value as mag[mag==0], so that stretch=0 for those\n stretch = mag - BM\n mag[mag == 0.] = 1. # avoid divide by zero error\n # print(stretch)\n springx = k * np.sum(stretch * vecx / mag, axis=-1)\n springy = k * np.sum(stretch * vecy / mag, axis=-1)\n # print 'stretch = ', stretch\n\n # add them up\n FX = - springx.ravel() # .reshape(NP,1)\n FY = - springy.ravel() # .reshape(NP,1)\n\n # Set force on fixed particles to zero\n if 'BIND' in params:\n if len(params['BIND']) > 0:\n FX[params['BIND']] = 0.\n FY[params['BIND']] = 0.\n\n # Transform into A frame\n Fx = FX * np.cos(phi) + FY * np.sin(phi)\n Fy = -FX * np.sin(phi) + FY * np.cos(phi)\n\n # print '\\n Fx =', Fx\n\n # POLAR COORDINATES (delta, phi)\n delta = np.sqrt(dX ** 2 + dY ** 2)\n v_delta = vdX * np.cos(phi) + vdY * np.sin(phi)\n v_phi = -vdX * np.sin(phi) + vdY * np.cos(phi)\n\n # VERTICAL REACTION FORCE\n gn = Mm * (g * l * I1 + I1 * (vdX ** 2 + vdY ** 2) \\\n + I3 * w3 * v_phi * delta \\\n - l ** 2 * delta * Fx) / (l * I1 + Mm * l * delta ** 2)\n\n # print 'gn = ', gn\n\n # EULER EQUATIONS\n dv_phi = (1. / I1) * (-l ** 2 * Fy - I3 * w3 * v_delta)\n dv_delta = (1. / I1) * (-l * gn * delta - l ** 2 * Fx + I3 * w3 * v_phi)\n\n d_vdX = dv_delta * np.cos(phi) - dv_phi * np.sin(phi)\n d_vdY = dv_delta * np.sin(phi) + dv_phi * np.cos(phi)\n\n # SPRING EQUATIONS\n # print 'dvtheta =', dvtheta\n qx = dv_delta - v_delta ** 2 * delta / l ** 2\n qy = dv_phi\n qX = qx * np.cos(phi) - qy * np.sin(phi)\n qY = qx * np.sin(phi) + qy * np.cos(phi)\n d_vX = (FX / Mm) - qX\n d_vY = (FY / Mm) - qY\n\n # print 'check d_vX = ', d_vX\n\n if params['BCtype'] == 'excite':\n if params['excite_continue']:\n # print 'exciting'\n d = params['amplitude']\n freq = params['frequency']\n x0_BIND = params['x0_BIND']\n y0_BIND = params['y0_BIND']\n BIND = params['BIND']\n w3 = params['w3'][BIND]\n\n nu = freq\n phi_BIND = (np.arctan2(dY[BIND], dX[BIND]) + nu * params['h'])[0]\n # print 'phi_BIND =', phi_BIND\n\n d_vX[BIND] = d * nu ** 2 * np.cos(phi_BIND)\n d_vY[BIND] = d * nu ** 2 * np.sin(phi_BIND)\n d_vdX[BIND] = -d * nu ** 2 * np.cos(phi_BIND)\n d_vdY[BIND] = -d * nu ** 2 * np.sin(phi_BIND)\n\n elif 'BIND' in params:\n if len(params['BIND']) > 0:\n # ftx[params['BIND'],0:2] = [0.,0.]\n d_vX[params['BIND']] = 0.\n d_vY[params['BIND']] = 0.\n\n # print 'shapes = ', np.shape(dvX), np.shape(dvY),np.shape(dvtheta),np.shape(dvphi),np.shape(dvpsi)\n ftx = np.dstack((d_vX, d_vY, d_vdX, d_vdY))[0]\n # print 'Resulting second derivative: ', ftx[1,:]\n # ftx_exact = fglidingHST_exact(xy, v, NL, KL, BM, Mm, params)\n # print 'gn = ', gn\n # print 'ftx = ', ftx\n # print 'v_delta = ', v_delta\n # print 'v_phi = ', v_phi\n # print 'dv_delta = ', dv_delta\n # print 'dv_phi = ', dv_phi\n # print 'qx = ', qx\n # print 'qy = ', qy\n # print 'ftx_exact = ', ftx_exact\n\n return ftx", "def rb_nfw(m200,c,z):\n\n #Setting up cosmology\n rho0=1.4876862e+11;\n omegam=0.238000;\n msun=1.98892e+33;\n delta_vir=200.;\n G=6.6730003e-08;\n kmpsToCmps = 1.0*10.**(5.);\n Rvir=200.;\n kpc2cm=3.086*10.**(21);\n \n deltac = (delta_vir/3.)*( (c**3.)/( np.log(1.+c) - (c / (1.+c))));\n rho_crit =rho0*omegam*(1.+z)**3.;\n r200 =(m200/delta_vir / rho_crit / (4.*np.pi/3.) )**0.33333 * 1000. ;\n v200 = ((6.67e-8 * m200 * msun / (r200* 3.086*10.**(21.)) )**0.5)/1e5 ;\n \n r =np.linspace(1.,3.*r200,500); # kpc\n rs = r200 / c; \n ss=(((r/rs)*(1.+(r/rs))**2.)*1000.**3);\n rho = (rho_crit * deltac)/(ss); \n M_r = 4.*np.pi* integrate.cumtrapz((r**2)*rho, r,initial=0.)\n \n x = r/r200 ;\n tab=1./x*(np.log(1.+c*x)-c*x/(1.+c*x))/(np.log(1.+c)-c/(1.+c));\n vcirc = v200*(tab)**0.5 ;\n maxvcirc = np.max(vcirc) ;\n q=np.where((vcirc == np.max(vcirc)));\n maxvcircr = r[q];\n \n \n # Now compute V_Esc as per nfw.pro Binney & Tremaine equation 2.31\n Phi_new = r * 0.0;\n vesc = r * 0.0 ;\n for ir in range(2,len(r)-4):\n term1 = (np.trapz(rho[0:ir]*(r[0:ir]**2.),x=r[0:ir])/(r[ir]))* msun; \n term2 = np.trapz(rho[ir:len(r)]*r[ir:len(r)],x=r[ir:len(r)])*msun; \n Phi_new[ir] = -4. *np.pi*6.67e-8*(term1 + term2)/3.086e21 ;\n vesc[ir] = ((2. * np.abs(Phi_new[ir]))**0.5) / 1e5 ; # See Binney & Tremaine (2-22) \n \n\n # Chage Units to do velocity dispersion calculations\n rcm=r*kpc2cm;\n\n #M_r in gram\n M_r_gram=M_r*msun;\n\n Phi=G*integrate.cumtrapz((M_r_gram/rcm**(2)),rcm,initial=0);\n \n Phi=Phi*(1./((1e5)**2.));#%km^2/s^2\n Phi_out=np.max(Phi);\n\n k=0.41;\n a=0.29;\n\n sig = np.sqrt(a *(( Phi/Phi_out)**(k))*(Phi_out -Phi));\n \n nfw={}\n qqqt=np.where((vesc==0.))\n vesc[qqqt]=1e-99\n\n nfw[\"m200\"]=m200;\n nfw[\"c\"]=c;\n nfw[\"r200\"]=r200;\n nfw[\"v200\"]=v200;\n nfw[\"maxvcirc\"]=maxvcirc;\n nfw[\"maxvcircr\"]=maxvcircr;\n nfw[\"r\"]=r;\n nfw[\"rho\"]=rho;\n nfw[\"vcirc\"]=vcirc;\n nfw[\"M_r\"]=M_r;\n nfw[\"sig_v\"]=sig;\n nfw[\"vesc\"]=vesc;\n \n return nfw", "def compute_tsky_hot( xv, yv, hv, thot, tcold):\n\n nData = len(yv) \n epsilons = np.full( nData, EPSILON)\n tsys = np.zeros(nData) # initialize arrays\n\n Z = np.zeros(nData)\n oneMZ = np.zeros(nData)\n # For full Temp calibration, a spectrum taken at high elevation away from \n # The galactic plan is used. For this program the cold spectrum must be\n # the spectrum being calibrated. See the M command for comparision\n epsilons = np.full( nData, EPSILON)\n yv = np.maximum( yv, epsilons)\n hv = np.maximum( hv, epsilons)\n # comput the cold/hot ratio\n Z = yv/hv\n oneMZ = np.full( nData, 1.) - Z\n oneMZ = np.maximum( oneMZ, epsilons)\n\n # the cold, receiver, temperature is this function\n tsys = ((Z*thot) - tcold)/oneMZ\n \n n6 = int(nData/6)\n n56 = 5*n6\n\n tsysmedian = np.median( tsys[n6:n56])\n\n tsky = np.zeros(nData) # initialize arrays\n S = np.zeros(nData) # initialize arrays\n\n # The system gain S is computed assuming a tsys is the cold load\n S = np.full( nData, tsysmedian+thot)/hv\n # scale the observed instensity in counts to Kelvins.\n tsky = S*yv\n\n return tsky", "def dewT_2_q_magnus(ds, var):\n A1, B1, C1 = 17.625, 243.04, 610.94\n vpsl = C1 * np.exp(A1 * (ds[var['dew']] - 273.15) / (B1 + (ds[var['dew']] - 273.15)))\n wsl = eps0 * vpsl / (ds[var['pressure']] - vpsl)\n ds[var['spec_h']] = wsl / (1 + wsl)\n return ds", "def SCF(N, R, Zeta1, Zeta2, Za, Zb, G):\n Crit = 1e-11 # Convergence critera\n Maxit = 250 # Maximum number of iterations\n Iter = 0\n\n ######## STEP 1. Guess an initial density matrix ########\n # Use core hamiltonian for initial guess of F, I.E. (P=0)\n P = np.zeros([2, 2])\n\n Energy = 0.0\n\n while (Iter < Maxit):\n Iter += 1\n print(Iter)\n\n ######## STEP 2. calculate the Fock matrix ########\n # Form two electron part of Fock matrix from P\n G = np.zeros([2, 2]) # This is the two electron contribution in the equations above\n for i in range(2):\n for j in range(2):\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])\n\n # Add core hamiltonian H^CORE to get fock matrix\n F = H + G\n\n # Calculate the electronic energy\n Energy = np.sum(0.5 * P * (H + F))\n\n print('Electronic energy = ', Energy)\n\n ######## STEP 3. Calculate F' (remember S^-1/2 is X and S^1/2 is X.T) ########\n G = np.matmul(F, X)\n Fprime = np.matmul(X.T, G)\n\n ######## STEP 4. Solve the eigenvalue problem ########\n # Diagonalise transformed Fock matrix\n Diag(Fprime, Cprime, E)\n\n ######## STEP 5. Calculate the molecular orbitals coefficients ########\n # Transform eigen vectors to get matrix C\n C = np.matmul(X, Cprime)\n\n ######## STEP 6. Calculate the new density matrix from the old P ########\n Oldp = np.array(P)\n P = np.zeros([2, 2])\n\n # Form new density matrix\n for i in range(2):\n for j in range(2):\n # Save present density matrix before creating a new one\n for k in range(1):\n P[i, j] += 2.0 * C[i, k] * C[j, k]\n\n ######## STEP 7. Check to see if the energy has converged ########\n Delta = 0.0\n # Calculate delta the difference between the old density matrix Old P and the new P\n Delta = (P - Oldp)\n Delta = np.sqrt(np.sum(Delta ** 2) / 4.0)\n print(\"Delta\", Delta)\n\n # Check for convergence\n if (Delta < Crit):\n # Add nuclear repulsion to get the total energy\n Energytot = Energy + Za * Zb / R\n print(\"Calculation converged with electronic energy:\", Energy)\n print(\"Calculation converged with total energy:\", Energytot)\n print(\"Density matrix\", P)\n print(\"Mulliken populations\", np.matmul(P, S))\n print(\"Coeffients\", C)\n\n break", "def g2dfwhm(img):\n npix = img.shape[0]\n rowCen,colCen = adaptiveCentroid(img,1.1/scale)\n row,col = np.mgrid[0:npix,0:npix]\n row = row - rowCen\n col = col - colCen\n A0,sigmac0 = moments(img)\n sigmar0 = sigmac0\n rho0 = 0.\n B0 = 0.\n p0=np.array([sigmac0,sigmar0,rho0,A0, B0])\n def residualg2d(p,x,y,xc,yc,I):\n sigmax,sigmay,rho,A,B = p\n Ierr = np.sqrt(abs(I))+0.00001 # to avoid those = 0, add a small number \n res = (gaussian2d(x,y,xc,yc,sigmax,sigmay,rho,A,B) - I)/Ierr\n return res.flatten()\n p = leastsq(residualg2d,p0,args=(col,row,colCen,rowCen,img))[0]\n sigmac,sigmar,rho,A,B = p\n Mcc = sigmac**2\n Mrr = sigmar**2\n Mrc = rho**2*Mcc*Mrr\n M20 = Mrr + Mcc\n M22 = complex(Mcc - Mrr,2*Mrc)\n whiskerLength = np.sqrt(np.abs(M22))\n lambdap = 0.5*(M20 + abs(M22))\n lambdam = 0.5*(M20 - abs(M22))\n fwhm_g2d = np.sqrt(2.*np.log(2.))*(np.sqrt(lambdap)+np.sqrt(lambdam))\n #fwhm = np.sqrt(M20/2.)*2.35482*scale\n return A, B, whiskerLength, fwhm_g2d", "def solid_surface_density_RC2014_given_physical_catalog(sssp_per_sys, max_core_mass=10.):\n mult_all = sssp_per_sys['Mtot_all']\n a_all_2p = []\n mult_all_2p = []\n sigma_all_2p = []\n for i in np.arange(len(mult_all))[mult_all > 1]: # only consider multi-planet systems\n a_sys = sssp_per_sys['a_all'][i]\n core_mass_sys = np.copy(sssp_per_sys['mass_all'][i][a_sys > 0])\n core_mass_sys[core_mass_sys > max_core_mass] = max_core_mass\n a_sys = a_sys[a_sys > 0]\n a_all_2p += list(a_sys)\n mult_all_2p += [len(a_sys)]*len(a_sys)\n sigma_all_2p += list(solid_surface_density_system_RC2014(core_mass_sys, a_sys))\n a_all_2p = np.array(a_all_2p)\n mult_all_2p = np.array(mult_all_2p)\n sigma_all_2p = np.array(sigma_all_2p)\n return sigma_all_2p, a_all_2p, mult_all_2p", "def _full_relativistic_loss(eps, eta, t):\n # Surrounding medium, eta terms for surface loss\n lmb2_eta = Theta2 - eta * ThetaE2 * beta2\n lmb_eta = np.lib.scimath.sqrt(lmb2_eta)\n phi2_eta = lmb2_eta + ThetaE2\n\n # Thin layer, epsilon terms for surface loss\n lmb2_eps = Theta2 - eps * ThetaE2 * beta2\n lmb_eps = np.lib.scimath.sqrt(lmb2_eps) # should be > 0.\n phi2_eps = lmb2_eps + ThetaE2\n\n # Combined term for relativistic surface loss\n phi2_eps_eta = Theta2 + ThetaE2 * (1. - (eps + eta) * beta2)\n\n # Thickness dependent terms for surface loss\n de = t * Psurf\n sin_de = np.sin(de)\n cos_de = np.cos(de)\n txy = np.tanh(lmb_eps * de / ThetaE)\n lplus = lmb_eta * eps + lmb_eps * eta * txy\n lminus = lmb_eta * eps + lmb_eps * eta / txy\n\n # \"Relativistic surface plasmon\"\n A1 = phi2_eps_eta**2. / eps / eta\n A2 = sin_de**2. / lplus + cos_de**2. / lminus\n A = A1 * A2\n # Guided light mode 1\n B1 = beta2 * lmb_eta * ThetaE * phi2_eps_eta / eta\n B2 = (1. / lplus - 1. / lminus) * 2. * sin_de * cos_de\n B = B1*B2\n # Guided light mode 2\n C1 = - beta2**2. * lmb_eta * lmb_eps * ThetaE2\n C2 = cos_de**2. * txy / lplus\n C3 = sin_de**2. / txy / lminus\n C = C1 * (C2 + C3)\n\n # Build relativistic surface loss\n Ps1 = 2 * Theta2 * (eps - eta)**2. / phi2_eta**2. / phi2_eps**2.\n Ps2 = hbar/momentum\n Ps3 = A + B + C\n Ps = Ps1 * Ps2 * Ps3\n\n # Build relativistic bulk loss\n Pv = t * (1. - (eps*beta2)) / eps / phi2_eps\n\n # Calculate P and Pvol (volume only)\n P = Pcoef * np.imag(Pv - Ps)\n Pvol = Pcoef * np.imag(Pv)\n return (P + 1j*Pvol)", "def __init__(self):\n # Set constants\n self.fromHztoeV = 6.58e-16\n self.gramstoeV = 1 / ( 1.78 * 1e-33)\n self.mtoev = 1/(1.97 * 1e-7) \n self.H0 = cosmo.H(0).value * 1e3 / (1e3 * const.kpc.value) #expressed in 1/s\n self.rhocritical = cosmo.critical_density(0).value * self.gramstoeV /(1e-2)**3 # eV/m**3\n self.Om0 = cosmo.Om0 #total matter \n self.OLambda0 = cosmo.Ode0 # cosmological constant\n self.DM0 = self.Om0 - cosmo.Ob0 # dark matter\n self.evtonJoule = 1.60218 * 1e-10 # from eV to nJ\n self.evtoJoule = 1.60218 * 1e-19 # from eV to J\n PSgal1h = np.loadtxt(\"/Users/andreacaputo/Desktop/Phd/AxionDecayCrossCorr/Codes/NIRB_PS/PS_GALl_1h.dat\")\n PSgal2h = np.loadtxt(\"/Users/andreacaputo/Desktop/Phd/AxionDecayCrossCorr/Codes/NIRB_PS/PS_GALl_2h.dat\")\n self.Mpc = 1e3 * const.kpc.value\n self.zmin = 0.001\n self.zmax = 30.001\n self.zbins = 301\n self.h = cosmo.h\n self.z_vect = np.linspace(self.zmin, self.zmax, self.zbins)\n self.k_vect = PSgal1h[:,0]* self.h\n self.Power1h = PSgal1h[:,1:]/(self.h**3)\n self.Power2h = PSgal2h[:,1:]/(self.h**3)\n self.Power = self.Power1h + self.Power2h\n self.Praw_prova1h = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power1h))\n self.Praw_prova2h = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power2h))\n self.Praw_prova = interp2d(self.k_vect, self.z_vect, np.transpose(self.Power))", "def model_prem(r):\n\n\t#- normalised radius\n\tx = r / 6371000.0\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\t#- upper crust\n\tif (r >= 6356000.0):\n\t\trho = 2.6\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 3.2\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- lower crust\n\telif (r >= 6346000.6) & (r < 6356000.0):\n\t\trho = 2.9\n\t\tvpv = 6.8\n\t\tvph = vpv\n\t\tvsv = 3.9\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- LID\n\telif (r >= 6291000.0) & (r < 6346000.6):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- LVZ\n\telif (r >= 6151000.0) & (r < 6291000.0):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- Transition zone 1\n\telif (r >= 5971000.0) & (r < 6151000.0):\n\t\trho = 7.1089 - 3.8045 * x\n\t\tvpv = 20.3926 - 12.2569 * x\n\t\tvph = vpv\n\t\tvsv = 8.9496 - 4.4597 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 2\n\telif (r >= 5771000.0) & (r < 5971000.0):\n\t\trho = 11.2494 - 8.0298 * x\n\t\tvpv = 39.7027 - 32.6166 * x\n\t\tvph = vpv\n\t\tvsv = 22.3512 - 18.5856 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 3\n\telif (r >= 5701000.0) & (r < 5771000.0):\n\t\trho = 5.3197 - 1.4836 * x\n\t\tvpv = 19.0957 - 9.8672 * x\n\t\tvph = vpv\n\t\tvsv = 9.9839 - 4.9324 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 1\n\telif (r >= 5600000.0) & (r < 5701000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 29.2766 - 23.6027 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 22.3459 - 17.2473 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- Lower mantle 2\n\telif (r >= 3630000.0) & (r < 5600000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 24.9520 - 40.4673 * x + 51.4832 * x**2 - 26.6419 * x**3\n\t\tvph = vpv\n\t\tvsv = 11.1671 - 13.7818 * x + 17.4575 * x**2 - 9.2777 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 3\n\telif (r >= 3480000.0) & (r < 3630000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 15.3891 - 5.3181 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 6.9254 + 1.4672 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Outer core\n\telif (r >= 1221000.5) & (r < 3480000.0):\n\t\trho = 12.5815 - 1.2638 * x - 3.6426 * x**2 - 5.5281 * x**3\n\t\tvpv = 11.0487 - 4.0362 * x + 4.8023 * x**2 - 13.5732 * x**3\n\t\tvph = vpv\n\t\tvsv = 0.0\n\t\tvsh = 0.0\n\t\teta = 1.0\n\n\t#- Inner Core\n\telif (r >= 0.0) & (r < 1221000.5):\n\t\trho = 13.0885 - 8.8381 * x**2\n\t\tvpv = 11.2622 - 6.3640 * x**2\n\t\tvph = vpv\n\t\tvsv = 3.6678 - 4.4475 * x**2\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N", "def SH_surface_plots(n_max=6,figsize=(15,15),fs=15,saveA=True,show=False,dpi=400,vis_type='real'):\n\n N = 100j\n\n for n in range(n_max+1):\n for m in range(n+1):\n plt.close('all')\n print(\"working on Y_%s^%s\" % (n,m) )\n\n PHI,THETA = np.mgrid[0:2*np.pi:N*2, 0:np.pi:N]\n if vis_type == 'real':\n R = sp.sph_harm(m,n,PHI,THETA).real\n if vis_type == 'modulus':\n r = sp.sph_harm(m,n,PHI,THETA)\n R = r * r.conjugate()\n if vis_type == 'unit':\n R = sp.sph_harm(m,n,PHI,THETA).real + 1\n\n X = np.abs(R) * np.sin(THETA) * np.cos(PHI)\n Y = np.abs(R) * np.sin(THETA) * np.sin(PHI)\n Z = np.abs(R) * np.cos(THETA)\n\n norm = colors.Normalize()\n fig, ax = plt.subplots(subplot_kw=dict(projection='3d'), figsize=(14,10))\n sm = cm.ScalarMappable(cmap=cm.seismic)\n ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=cm.seismic(norm(R)))\n ax.set_title('real$(Y^%s_%s)$' % (m,n), fontsize=fs)\n ax.set_aspect(1)\n sm.set_array(R)\n fig.colorbar(sm, shrink=0.8)\n\n if saveA:\n fig.savefig('images/%s/%s_%s.png' % (vis_type,n,m), dpi=dpi)\n if show:\n plt.show()\n\n # print(\"\\n only +m values are used.\")\n # for n in range(n_max+1):\n # for m in range(n+1):\n # plt.close('all')\n # print(\"\\n n,m = %s,%s\" % (n,m) )\n #\n # R,X,Y,Z = harmonics(m,n)\n #\n # fig = plt.figure(figsize=figsize)\n # ax = plt.subplot(projection='3d')\n # ax.set_aspect(1)\n # ax.set_title(\"n: %s m: %s\" % (n,m), fontsize=fs+2)\n # ax.plot_surface(X,Y,Z,\\\n # cmap = cm.seismic,\n # norm = colors.Normalize( vmin=np.min(R),vmax=np.max(R) )\\\n # )\n #\n # if saveA:\n # fig.savefig('images/%s_%s.png' % (n,m), dpi=dpi)\n # if show:\n # plt.show()", "def airy_and_slicer(surface, wavelength, scale_mas, PSF_window, N_window):\n\n # Print message to know we are updating the cache\n print('Recalculating Airy Pattern for %.3f microns' % wavelength)\n\n # Plate scales [Px, Py] for each spaxel scale in mm / arcsec,\n # depending on the surface [IS: Image Slicer, DET: Detector]\n plate_scales = {'IS': {4.0: [125, 250], 60.0: [16.67, 16.67]},\n 'DET': {4.0: [3.75, 7.5], 60.0: [0.5, 0.5]}}\n plate_x = plate_scales[surface][scale_mas][0]\n plate_y = plate_scales[surface][scale_mas][1]\n\n # We know how many Microns the pixels of the Geometric PSF span [PSF_window / N_window]\n pix_sampling = PSF_window / N_window # micron at the detector plane\n # Using the plate scale we calculate how many m.a.s each of those pixels have to span\n pix_scale_x = pix_sampling / plate_x # milliarcsec / pixel\n pix_scale_y = pix_sampling / plate_y # milliarcsec / pixel\n\n # Calculate the relative size of the pupil aperture needed to ensure the PSF is\n # sampled with the given pix_scale at the focal plane\n ELT_DIAM = 39\n MILIARCSECS_IN_A_RAD = 206265000\n pix_rad_x = pix_scale_x / MILIARCSECS_IN_A_RAD # radians / pixel\n pix_rad_y = pix_scale_y / MILIARCSECS_IN_A_RAD\n RHO_APER_x = pix_rad_x * ELT_DIAM / (wavelength * 1e-6)\n RHO_APER_y = pix_rad_y * ELT_DIAM / (wavelength * 1e-6)\n RHO_OBSC_x = 0.30 * RHO_APER_x # ELT central obscuration\n RHO_OBSC_y = 0.30 * RHO_APER_y # ELT central obscuration\n\n # Sanity check\n PIX_RAD_x = RHO_APER_x * wavelength / ELT_DIAM * 1e-6\n PIX_RAD_y = RHO_APER_y * wavelength / ELT_DIAM * 1e-6\n PIX_MAS_x = PIX_RAD_x * MILIARCSECS_IN_A_RAD\n PIX_MAS_y = PIX_RAD_y * MILIARCSECS_IN_A_RAD\n\n # Define the ELT pupil mask. Note that we use a central obscuration too\n N = 2048\n x = np.linspace(-1, 1, N)\n xx, yy = np.meshgrid(x, x)\n\n # To get the anamorphic scaling we define the equation for an ellipse\n rho = np.sqrt((xx / RHO_APER_x) ** 2 + (yy / RHO_APER_y) ** 2)\n\n # (1) Propagate to the Image Slicer Focal plane\n elt_mask = (RHO_OBSC_x / RHO_APER_x < rho) & (rho < 1.0)\n pupil = elt_mask * np.exp(1j * elt_mask)\n image_electric = fftshift(fft2(pupil))\n\n if surface == 'IS':\n # print(\"IS\")\n # We are already at the Image Slicer, don't do anything else\n min_pix, max_pix = N // 2 - N_window // 2, N // 2 + N_window // 2\n final_psf = (np.abs(image_electric))**2\n final_psf /= np.max(final_psf)\n crop_psf = final_psf[min_pix:max_pix, min_pix:max_pix]\n\n elif surface == 'DET':\n # print(\"DET\")\n # (1.1) Add slicer effect by masking\n # We mask the PSF covering a band of size 1x SPAXEL, depending on the scale\n # If we have 4x4 mas, then we cover a band of 4 mas over the PSF\n x_min, x_max = -N/2 * PIX_MAS_x, N/2 * PIX_MAS_x\n y_min, y_max = -N/2 * PIX_MAS_y, N/2 * PIX_MAS_y\n x_slice = np.linspace(x_min, x_max, N, endpoint=True)\n y_slice = np.linspace(y_min, y_max, N, endpoint=True)\n x_grid, y_grid = np.meshgrid(x_slice, y_slice)\n slicer_mask = np.abs(y_grid) < scale_mas / 2\n\n # ## Show the PSF both in [mas] space where it should be circular and in [pixel] space where it should be anamorphic\n # fig, ax = plt.subplots(1, 1)\n # img1 = ax.imshow((np.abs(image_electric))**2, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # # plt.colorbar(img1, ax=ax)\n # ax.set_title(r'Airy Pattern | %.1f mas scale | Wavelength: %.3f $\\mu$m' % (scale_mas, wavelength))\n # ax.set_xlabel(r'X [mas]')\n # ax.set_ylabel(r'Y [mas]')\n # ax.set_xlim([-10, 10])\n # ax.set_ylim([-10, 10])\n #\n # fig, ax = plt.subplots(1, 1)\n # img1 = ax.imshow((np.abs(image_electric))**2, extent=[-N/2, N/2, -N/2, N/2], cmap='bwr')\n # ax.set_title(r'Airy Pattern | %.1f mas scale | Wavelength: %.3f $\\mu$m' % (scale_mas, wavelength))\n # ax.set_xlabel(r'Pixels [ ]')\n # ax.set_ylabel(r'Pixels [ ]')\n # ax.set_xlim([-100, 100])\n # ax.set_ylim([-100, 100])\n\n # plt.show()\n\n # (2) Propagate the masked electric field to Pupil Plane\n pup_grating = ifft2(fftshift(slicer_mask * image_electric))\n # (2.1) Add pupil mask, this time without the central obscuration\n aperture_mask = rho < 1.0\n\n # (3) Propagate back to Focal Plane\n final_focal = fftshift(fft2(aperture_mask * pup_grating))\n final_psf = (np.abs(final_focal))**2\n final_psf /= np.max(final_psf)\n\n # (4) Crop the PSF to fit to the necessary window to ease the convolutions\n min_pix, max_pix = N//2 - N_window//2, N//2 + N_window//2\n crop_psf = final_psf[min_pix:max_pix, min_pix:max_pix]\n\n # If we want to show the plots for Documentation\n\n # fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n # psf_airy = (np.abs(image_electric))**2\n # img1 = ax1.imshow(psf_airy, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # ax1.axhline(y=scale_mas/2, linestyle='--', color='black')\n # ax1.axhline(y=-scale_mas/2, linestyle='--', color='black')\n # ax1.set_xlabel(r'X [mas]')\n # ax1.set_ylabel(r'Y [mas]')\n # ax1.set_xlim([-15, 15])\n # ax1.set_ylim([-15, 15])\n # ax1.set_title(r'Airy Pattern | Slicer Mask %.1f mas' % scale_mas)\n #\n # img2 = ax2.imshow(aperture_mask * (np.abs(pup_grating)**2), extent=[-1, 1, -1, 1], cmap='bwr')\n # ax2.set_title(r'Pupil Plane | Aperture Mask')\n # ax2.set_xlim([-0.25, 0.25])\n # ax2.set_ylim([-0.25, 0.25])\n #\n # img3 = ax3.imshow(final_psf, extent=[x_min, x_max, y_min, y_max], cmap='bwr')\n # ax3.set_xlabel(r'X [mas]')\n # ax3.set_ylabel(r'Y [mas]')\n # ax3.set_xlim([-15, 15])\n # ax3.set_ylim([-15, 15])\n # ax3.set_title(r'Diffraction Effects')\n # plt.show()\n\n return crop_psf", "def weak_lensing_kernel(cosmo, pzs, z, ell):\n z = np.atleast_1d(z)\n zmax = max([pz.zmax for pz in pzs])\n # Retrieve comoving distance corresponding to z\n chi = bkgrd.radial_comoving_distance(cosmo, z2a(z))\n\n # Extract the indices of pzs that can be treated as extended distributions,\n # and the ones that need to be treated as delta functions.\n pzs_extended_idx = [\n i for i, pz in enumerate(pzs) if not isinstance(pz, rds.delta_nz)\n ]\n pzs_delta_idx = [i for i, pz in enumerate(pzs) if isinstance(pz, rds.delta_nz)]\n # Here we define a permutation that would put all extended pzs at the begining of the list\n perm = pzs_extended_idx + pzs_delta_idx\n # Compute inverse permutation\n inv = np.argsort(np.array(perm, dtype=np.int32))\n\n # Process extended distributions, if any\n radial_kernels = []\n if len(pzs_extended_idx) > 0:\n\n @vmap\n def integrand(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n # Stack the dndz of all redshift bins\n dndz = np.stack([pzs[i](z_prime) for i in pzs_extended_idx], axis=0)\n return dndz * np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(simps(integrand, z, zmax, 256) * (1.0 + z) * chi)\n # Process single plane redshifts if any\n if len(pzs_delta_idx) > 0:\n\n @vmap\n def integrand_single(z_prime):\n chi_prime = bkgrd.radial_comoving_distance(cosmo, z2a(z_prime))\n return np.clip(chi_prime - chi, 0) / np.clip(chi_prime, 1.0)\n\n radial_kernels.append(\n integrand_single(np.array([pzs[i].params[0] for i in pzs_delta_idx]))\n * (1.0 + z)\n * chi\n )\n # Fusing the results together\n radial_kernel = np.concatenate(radial_kernels, axis=0)\n # And perfoming inverse permutation to put all the indices where they should be\n radial_kernel = radial_kernel[inv]\n\n # Constant term\n constant_factor = 3.0 * const.H0 ** 2 * cosmo.Omega_m / 2.0 / const.c\n # Ell dependent factor\n ell_factor = np.sqrt((ell - 1) * (ell) * (ell + 1) * (ell + 2)) / (ell + 0.5) ** 2\n return constant_factor * ell_factor * radial_kernel", "def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]", "def kelvin_effect(pres, surft, temp, mw_ba, dcell):\n volm = mw_ba/1e3 # approximation: using density 1000 kg/m3\n return pres*exp(-4*surft*volm/(dcell*gas_constant*temp))", "def test_double_ended_wls_fix_gamma_estimate_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 500\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 100)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n rst = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n rast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real) - 1)\n )\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n alpha -= alpha[0] # the first x-index is where to start counting\n dalpha = dalpha_p - dalpha_m\n alpha2 = x * dalpha\n\n # to ensure the st, rst, ast, rast were correctly defined.\n np.testing.assert_allclose(alpha2, alpha, atol=1e-15, rtol=0)\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"rst\": ([\"x\", \"time\"], rst),\n \"rast\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.35 * cable_len)],\n \"warm\": [slice(0.67 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_double_ended(\n sections=sections,\n st_var=1e-12,\n ast_var=1e-12,\n rst_var=1e-12,\n rast_var=1e-12,\n method=\"wls\",\n solver=\"sparse\",\n fix_gamma=(gamma, 0.0),\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)\n assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=9)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=6)\n assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=6)\n assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=6)\n\n pass", "def solid_surface_density_CL2013_given_observed_catalog(sss_per_sys, max_core_mass=10.):\n a_obs_per_sys = gen.a_from_P(sss_per_sys['P_obs'], sss_per_sys['Mstar_obs'][:,None])\n a_obs = a_obs_per_sys[sss_per_sys['P_obs'] > 0]\n radii_obs = sss_per_sys['radii_obs'][sss_per_sys['P_obs'] > 0]\n core_mass_obs = generate_planet_mass_from_radius_Ning2018_table_above_lognormal_mass_earthlike_rocky_below_vec(radii_obs)\n core_mass_obs[core_mass_obs > max_core_mass] = max_core_mass\n sigma_obs = solid_surface_density_CL2013(core_mass_obs, a_obs)\n return sigma_obs, core_mass_obs, a_obs", "def main():\n \n # Particle in SHO - c.f. Mocz & Succi (2015) Fig. 2\n # parameters\n n = 100 # number of particles\n dt = 0.02 # timestep\n nt = 100 # number of timesteps\n nt_setup = 400 # number of timesteps to set up simulation\n n_out = 25 # plot solution every nout steps\n b = 4 # velocity damping for acquiring initial condition\n m = 1/n # mass of SPH particle ( m * n = 1 normalizes |wavefunction|^2 to 1)\n h = 40/n # smoothing length\n t = 0. # time\n\n # plot potential\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n fig = plt.plot(xx, 0.5*xx**2, linewidth=5, color=[0.7, 0.7, 0.9])\n \n # initialize\n x = np.linspace(-3.0, 3.0, num=n)\n x = np.reshape(x,(n,1))\n u = np.zeros((n,1))\n \n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h )\n\n # get v at t=-0.5*dt for the leap frog integrator using Euler's method\n u_mhalf = u - 0.5 * dt * a\n\n # main loop (time evolution)\n for i in np.arange(-nt_setup, nt): # negative time (t<0, i<0) is used to set up initial conditions\n\n # leap frog\n u_phalf = u_mhalf + a*dt\n x = x + u_phalf*dt\n u = 0.5*(u_mhalf+u_phalf)\n u_mhalf = u_phalf\n if (i >= 0):\n t = t + dt\n print(\"%.2f\" % t)\n \n if (i == -1 ): # switch off damping before t=0\n u = np.zeros((n,1)) + 1.0\n u_mhalf = u\n b = 0 # switch off damping at time t=0\n \n # update densities, pressures, accelerations\n rho = density( x, m, h )\n P = pressure( x, rho, m, h )\n a = acceleration( x, u, m, rho, P, b, h)\n \n # plot solution every n_out steps\n if( (i >= 0) and (i % n_out) == 0 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n rr_exact = 1./np.sqrt(np.pi) * np.exp(-(xx-np.sin(t))**2/2.)**2\n fig = plt.plot(xx, rr_exact, linewidth=2, color=[.6, .6, .6])\n fig = plt.plot(xx, rr, linewidth=2, color=[1.*i/nt, 0, 1.-1.*i/nt], label='$t='+\"%.2f\" % t +'$')\n # plot the t<0 damping process for fun\n if( i==-nt_setup or i==-nt_setup*3/4 or i==-nt_setup/2 ):\n xx = np.linspace(-4.0, 4.0, num=400)\n xx = np.reshape(xx,(xx.size,1))\n rr = probeDensity(x, m, h, xx)\n fig = plt.plot(xx, rr, linewidth=1, color=[0.9, 0.9, 0.9])\n \n plt.legend()\n plt.xlabel('$x$')\n plt.ylabel('$|\\psi|^2$')\n plt.axis([-2, 4, 0, 0.8])\n plt.savefig('solution.pdf', aspect = 'normal', bbox_inches='tight', pad_inches = 0)\n plt.close()", "def contract_tenors(self):\n\n\tself.r_outer_r[:,:,0,1,:] = self.r_outer_r[:,:,0,1,:]/(1. - self.k_dot_r[0,1,:])\n\tself.r_outer_r[:,:,0,2,:] = self.r_outer_r[:,:,0,2,:]/(1. - self.k_dot_r[0,2,:])\n\t\n\tself.r_outer_r[:,:,1,0,:] = self.r_outer_r[:,:,1,0,:]/(1. - self.k_dot_r[1,0,:])\n\tself.r_outer_r[:,:,1,2,:] = self.r_outer_r[:,:,1,2,:]/(1. - self.k_dot_r[1,2,:])\n\t\n\tself.r_outer_r[:,:,2,0,:] = self.r_outer_r[:,:,2,0,:]/(1. - self.k_dot_r[2,0,:])\n\tself.r_outer_r[:,:,2,1,:] = self.r_outer_r[:,:,2,1,:]/(1. - self.k_dot_r[2,1,:])\n\n\tself.delta_l = np.zeros((3,3,self.N),dtype=np.complex_)\n \n\tself.delta_l[0,1,:] = get_l(self,0,1)\n\tself.delta_l[1,0,:] = get_l(self,1,0)\n\t\n\tself.delta_l[0,2,:] = get_l(self,0,2)\n\tself.delta_l[2,0,:] = get_l(self,2,0)\n\t\n\tself.delta_l[1,2,:] = get_l(self,1,2)\n\tself.delta_l[2,1,:] = get_l(self,2,1)\n \n\treturn", "def calculate_godunov_fluxes(densities, pressures, vel_x, vel_y, gamma):\n density_fluxes = np.zeros((densities.shape[0] - 1, densities.shape[1] - 1, 2))\n momentum_flux_x = np.zeros(density_fluxes.shape)\n momentum_flux_y = np.zeros(density_fluxes.shape)\n total_energy_fluxes = np.zeros(density_fluxes.shape)\n\n i_length, j_length = np.shape(densities)\n for i in range(i_length - 1):\n for j in range(j_length - 1):\n solver = IterativeRiemannSolver()\n\n # Generate left and right states from cell averaged values\n left_state = ThermodynamicState1D(pressures[i, j], densities[i, j], vel_x[i, j], gamma[i, j])\n right_state = ThermodynamicState1D(pressures[i + 1, j], densities[i + 1, j], vel_x[i + 1, j], gamma[i + 1, j])\n\n # Solve Riemann problem for star states\n p_star, u_star = solver.get_star_states(left_state, right_state)\n\n # Calculate fluxes using solver sample function\n p_flux, u_flux, rho_flux, is_left = solver.sample(0.0, left_state, right_state, p_star, u_star)\n\n # Store fluxes in array\n v_y = vel_y[i, j] if is_left else vel_y[i + 1, j]\n flux_gamma = left_state.gamma if is_left else right_state.gamma\n density_fluxes[i, j - 1, 0] = rho_flux * u_flux\n momentum_flux_x[i, j - 1, 0] = rho_flux * u_flux * u_flux + p_flux\n momentum_flux_y[i, j - 1, 0] = rho_flux * u_flux * v_y\n e_tot = p_flux / (flux_gamma - 1) + 0.5 * rho_flux * u_flux * u_flux + 0.5 * rho_flux * v_y ** 2\n total_energy_fluxes[i, j - 1, 0] = (p_flux + e_tot) * u_flux\n\n # Generate left and right states from cell averaged values\n left_state = ThermodynamicState1D(pressures[i, j], densities[i, j], vel_y[i, j], gamma[i, j])\n right_state = ThermodynamicState1D(pressures[i, j + 1], densities[i, j + 1], vel_y[i, j + 1], gamma[i, j + 1])\n\n # Solve Riemann problem for star states\n p_star, v_star = solver.get_star_states(left_state, right_state)\n\n # Calculate fluxes using solver sample function\n p_flux, v_flux, rho_flux, is_left = solver.sample(0.0, left_state, right_state, p_star, v_star)\n\n # Store fluxes in array\n v_x = vel_x[i, j] if is_left else vel_x[i, j + 1]\n flux_gamma = left_state.gamma if is_left else right_state.gamma\n density_fluxes[i - 1, j, 1] = rho_flux * v_flux\n momentum_flux_x[i - 1, j, 1] = rho_flux * v_x * v_flux\n momentum_flux_y[i - 1, j, 1] = rho_flux * v_flux * v_flux + p_flux\n e_tot = p_flux / (flux_gamma - 1) + 0.5 * rho_flux * v_flux * v_flux + 0.5 * rho_flux * v_x ** 2\n total_energy_fluxes[i - 1, j, 1] = (p_flux + e_tot) * v_flux\n\n return density_fluxes, momentum_flux_x, momentum_flux_y, total_energy_fluxes", "def run_grav(self):\n\n # Solucao direta\n self.prod_w = []\n self.prod_o = []\n t0 = time.time()\n # self.set_volumes_in_primal()\n self.set_sat_in()\n self.set_lamb_2()\n self.set_global_problem_vf_3_gr1_bif()\n self.Pf = self.solve_linear_problem(self.trans_fine, self.b, len(self.all_fine_vols_ic))\n self.organize_Pf()\n del self.Pf\n self.mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf_all))\n del self.Pf_all\n self.test_conservation_fine()\n # self.store_flux_pf_gr_bif = self.create_flux_vector_pf_gr_bif_1()\n\n \"\"\"\n ################################################################\n # Solucao Multiescala\n self.calculate_restriction_op_2()\n self.calculate_prolongation_op_het()\n self.organize_op()\n self.Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(self.trilOR, self.trans_fine, self.nf_ic), self.trilOP, self.nf_ic), self.nc, self.nc)\n self.Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf_ic, self.b), self.nc)\n self.Pc = self.solve_linear_problem(self.Tc, self.Qc, self.nc)\n self.set_Pc()\n self.Pms = self.multimat_vector(self.trilOP, self.nf_ic, self.Pc)\n\n del self.trilOP\n del self.trilOR\n del self.Tc\n del self.Qc\n del self.Pc\n\n self.organize_Pms()\n del self.Pms\n self.mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms_all))\n del self.Pms_all\n self.erro()\n\n self.test_conservation_coarse_gr()\n # self.Neuman_problem_6_gr()\n # self.store_flux_pms_gr = self.create_flux_vector_pms_gr()\n ####################################################################\n \"\"\"\n\n\n\n\n\n\n\n print('acaboooou')\n self.mb.write_file('new_out_bif_gr.vtk')\n\n\n shutil.copytree(self.caminho1, self.pasta)", "def excitation_force(w, draft, radius, water_depth):\n k = w**2 / 9.81\n ka = k * radius\n kd = k * draft\n kh = k * water_depth\n\n rho = 1025\n g = 9.81\n\n # XXX check this!\n f1 = -1j * (jn(1, ka) - jnd(1, ka) * hankel2(1, ka) / hankel2d(1, ka))\n #f1 = -1j * (jn(1, ka) - jnd(1, ka) * hankel1(1, ka) / hankel1d(1, ka))\n M = (kd*sinh(kh-kd) + cosh(kh-kd) - cosh(kh)) / (k**2 * cosh(kh))\n F = (-sinh(kh-kd) + sinh(kh)) / (k * cosh(kh))\n\n zs = zeros_like(F, dtype=np.complex)\n X = np.c_[F, zs, zs, zs, M, zs]\n X *= (-rho * g * pi * radius) * 2 * f1[:, newaxis]\n\n return X", "def fdspring(xy, v, NL, KL, BM, Mm, beta):\n NP, nn = np.shape(NL)\n if np.shape(xy)[1] == 2:\n '''2D version'''\n vecx = np.array([[KL[i, j] * (xy[i, 0] - xy[NL[i, j], 0]) for j in range(nn)] for i in range(NP)])\n vecy = np.array([[KL[i, j] * (xy[i, 1] - xy[NL[i, j], 1]) for j in range(nn)] for i in range(NP)])\n mag = np.sqrt(vecx ** 2 + vecy ** 2)\n # KLnoz = KL.copy() #no zeros\n # KLnoz[KLnoz ==0] = 1. --> same value as mag[mag==0], so that stretch=0 for those\n stretch = mag - BM\n mag[mag == 0.] = 1. # avoid divide by zero error\n # print(stretch)\n dxvec = np.sum(stretch * vecx / mag, axis=-1) / Mm\n dyvec = np.sum(stretch * vecy / mag, axis=-1) / Mm\n # damping term\n damp_dv = np.array([beta / Mm[i] * v[i] for i in range(NP)])\n # add them up\n ftx = -np.hstack((dxvec.reshape(NP, 1), dyvec.reshape(NP, 1))) - damp_dv\n else:\n '''3D version'''\n vecx = np.array([[KL[i, j] * (xy[i, 0] - xy[NL[i, j], 0]) for j in range(nn)] for i in range(NP)])\n vecy = np.array([[KL[i, j] * (xy[i, 1] - xy[NL[i, j], 1]) for j in range(nn)] for i in range(NP)])\n vecz = np.array([[KL[i, j] * (xy[i, 2] - xy[NL[i, j], 2]) for j in range(nn)] for i in range(NP)])\n mag = np.sqrt(vecx ** 2 + vecy ** 2 + vecz ** 2)\n # KLnoz = KL.copy() #no zeros\n # KLnoz[KLnoz ==0] = 1. #same value as mag[mag==0], so that stretch=0 for those\n stretch = mag - BM\n mag[mag == 0.] = 1. # avoid divide by zero error\n dxvec = np.sum(stretch * vecx / mag, axis=-1) / Mm\n dyvec = np.sum(stretch * vecy / mag, axis=-1) / Mm\n dzvec = np.sum(stretch * vecz / mag, axis=-1) / Mm\n # damping term\n damp_dv = np.array([beta / Mm[i] * v[i] for i in range(NP)])\n # add them up\n ftx = -np.hstack((dxvec.reshape(NP, 1), dyvec.reshape(NP, 1), dyvec.reshape(NP, 1))) - damp_dv\n return ftx", "def Schechter_M_z(M, redshift, richness):\n\treturn 0.4 * n.log(10.) * 10**logPhi_evol(redshift, richness) * 10**(0.4 * (M_s_evol(redshift, richness) - M) * (alpha_evol(redshift, richness) + 1)) * n.e**( -10** ( 0.4 * (M_s_evol(redshift,richness) - M)))", "def heat_vaporization_func(ts):\n heat_vaporization = np.copy(ts).astype(np.float64)\n heat_vaporization -= 273.15\n heat_vaporization *= -0.00236\n heat_vaporization += 2.501\n heat_vaporization *= 1E6\n return heat_vaporization.astype(np.float32)", "def get_sn2005ek(colorplt=False):\n z = 0.016551\n ebv = 0.210\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 53639.9\n print (\"adopt r band t_max from Drout+13\")\n \n # tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\\t')\n # tb = tb.drop(columns=[\"Unnamed: 6\"])\n \n mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,\n 53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,\n 53654.2, 53655.2, 53656.2, 53657.2])\n \n Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,\n 20.07, np.nan, 20.67, 20.90, 21.05, np.nan,\n 21.74, np.nan, np.nan, np.nan])\n \n Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07, \n 0.07, np.nan, 0.04, 0.04, 0.04, np.nan,\n 0.12, np.nan, np.nan, np.nan])\n \n Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,\n 18.93, 19.48, 19.63, 19.86, 19.98, 20.35,\n 20.60, 20.74, 20.88, 21.22])\n \n Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,\n 0.02, 0.06, 0.03, 0.03, 0.04, 0.05, \n 0.08, 0.10, 0.08, 0.13])\n \n Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18, \n np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,\n 20.08, np.nan, 20.47, np.nan])\n \n Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,\n np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,\n 0.05, np.nan, 0.08, np.nan])\n\n Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71, \n np.nan, 18.13, 18.26, 18.51, 18.61, 18.74, \n 19.01, np.nan, 19.47, np.nan])\n \n Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,\n np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,\n 0.05, np.nan, 0.06, np.nan])\n \n mymjds = np.hstack([mjds, mjds, mjds, mjds])\n mymags = np.hstack([Bmags, Vmags, Rmags, Imags])\n myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])\n myfilts = np.hstack([ np.repeat(\"B\", len(Bmags)),\n np.repeat(\"V\", len(Bmags)),\n np.repeat(\"R\", len(Rmags)),\n np.repeat(\"I\", len(Imags)) ])\n ix = ~np.isnan(mymags)\n tb = pd.DataFrame({'mjd': mymjds[ix],\n 'mag': mymags[ix],\n 'emag': myemags[ix],\n \"filter\": myfilts[ix]})\n \n ixB = tb['filter'].values==\"B\"\n ixV = tb['filter'].values==\"V\"\n ixR = tb['filter'].values==\"R\"\n ixI = tb['filter'].values==\"I\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixV] = 5430\n tb['wave'].values[ixR] = 6349\n tb['wave'].values[ixI] = 8797\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['B', 'R', 'I']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"R\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"B\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"R\"]\n itb = tbsub[tbsub[\"filter\"].values==\"I\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"BmR\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"RmI\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb", "def build_rhs():\n\n def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n \"\"\"Computes the divergence of the velocity field.\"\"\"\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]\n\n def add_factor(\n v,\n factor,\n ):\n return [factor * v_i for v_i in v]\n\n b_terms = {\n _B_TERM_SOURCE_RHO: add_factor(src_rho, inv_dt),\n }\n if isinstance(rho_info, ConstantDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(rho_info.rho, states['u'], states['v'], states['w']),\n inv_dt * rho_info.rho),\n _B_TERM_DRHO_DT: [\n tf.zeros_like(src_rho_i) for src_rho_i in src_rho\n ],\n })\n\n elif isinstance(rho_info, VariableDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(1.0, states['rho_u'], states['rho_v'], states['rho_w']),\n inv_dt),\n _B_TERM_DRHO_DT:\n add_factor(rho_info.drho_dt, inv_dt),\n })\n\n else:\n raise ValueError('`rho_info` has to be either `ConstantDensityInfo` or '\n '`VariableDensityInfo`.')\n\n # pylint: disable=g-complex-comprehension\n return [(div_i + drho_dt_i - src_rho_i)\n for div_i, drho_dt_i, src_rho_i in zip(\n b_terms[_B_TERM_DIV],\n b_terms[_B_TERM_DRHO_DT],\n b_terms[_B_TERM_SOURCE_RHO],\n )], b_terms\n # pylint: enable=g-complex-comprehension", "def carsurf_loop(config):\n site_names = config[\"site_names\"][0]\n print(site_names)\n total_sites = len(site_names)\n\n dx = config[\"dx\"].copy()\n\n # how many hours back should footprints be calculated?\n # roughly, how long should time_before_observation be?\n length = config[\"length\"].copy()\n # I think this is the right way to adjust it\n # We have particle releases at the beginning and end of the `OBS_WINDOW`\n # both of these will need `length` bins back to put observations in\n footprint_nbins = math.ceil((length + OBS_WINDOW) / FLUX_WINDOW)\n # how many days before the first day of the month the simulation goes\n # how far back did LPD calculate trajectories?\n lag = int(config[\"lag\"])\n\n site_alt = config[\"alt\"][0].copy()\n site_lon = config[\"lon\"][0].copy()\n site_lat = config[\"lat\"][0].copy()\n\n dimx = int(config[\"dimx\"])\n dimy = int(config[\"dimy\"])\n\n days_tot = config[\"num_days\"]\n\n out_dir = config[\"outdir\"][0].decode(\"ascii\")\n\n year = int(config[\"year\"][0])\n month = int(config[\"month\"][0])\n\n simulation_earliest_obs = datetime.datetime(year, month, 1)\n # technically the start of the first observation of the next month\n simulation_latest_obs = (simulation_earliest_obs +\n dateutil.relativedelta.relativedelta(months=+1))\n simulation_zero = (simulation_earliest_obs -\n datetime.timedelta(days=lag))\n # obs_time_bounds = dateutil.rrule.rrule(\n # dateutil.rrule.HOURLY, dtstart=simulation_earliest_obs,\n # interval=OBS_WINDOW, until=simulation_latest_obs,\n # cache=True)\n n_obs_bins = ((simulation_latest_obs - simulation_earliest_obs) //\n datetime.timedelta(hours=OBS_WINDOW))\n\n print(\"Simulation zero: \", simulation_zero)\n print(\"Earliest release time:\", simulation_earliest_obs)\n print(\"Last release time: \", simulation_latest_obs)\n\n def obs_var_to_index(sec_since_start):\n \"\"\"Get the index for the bin.\n\n Parameters\n ----------\n bin: int\n\n Returns\n -------\n int\n The index in the NetCDF file created\n 0 is the beginning of the simulation,\n at the end of the time window.\n \"\"\"\n sec_since_first_obs = (sec_since_start -\n (simulation_earliest_obs -\n simulation_zero).total_seconds())\n bin_num = int(sec_since_first_obs // (SECONDS_PER_HOUR * OBS_WINDOW))\n\n # alternate: netCDF4.numtodate(sec_since_start, lpdm_obs_time_unit)\n # - simulation_unit\n # // datetime.timedelta(hours=OBS_WINDOW)\n\n # use time at the end of the window, not the start\n return n_obs_bins - bin_num\n\n print(\"Bin index for last release: \",\n obs_var_to_index((simulation_latest_obs -\n simulation_zero).total_seconds()))\n print(\"Bin index for first release:\",\n obs_var_to_index((simulation_earliest_obs -\n simulation_zero).total_seconds()))\n\n # int is more precise than float for this range (up to 4 billion)\n # as we can only have a million particles at a time\n # (for now, run_lprm maxnp)\n # this should also be faster\n # final = np.zeros((total_sites, length, dimy, dimx),\n # dtype=np.int32)\n\n # list of cubes with influence function\n # final_list = collections.deque((), config[\"lpdm_terase\"]//3600)\n # list of release times corresponding to those cubes\n # release_times = collections.deque((), config[\"lpdm_terase\"]//3600)\n # file_name_list = collections.deque((), config[\"lpdm_terase\"]//3600)\n\n wrf_out = read_wrf_grid(config[\"wrf_file\"][0])\n\n # LPDM works in minutes for the most part\n time_unit = \"minutes since {start:{date_fmt:s}}\".format(\n start=simulation_zero, date_fmt=UDUNITS_DATE)\n\n print(\"About to create file\")\n ds = netCDF4.Dataset(\n os.path.join(\n out_dir,\n \"LPDM_{year:04d}_{month:02d}_{flux_window:02d}\"\n \"hrly_{dx:03d}km_molar_footprints.nc4\".format(\n year=year, month=month, flux_window=FLUX_WINDOW,\n dx=int(dx))),\n \"w\", format=\"NETCDF4\")\n set_global_attributes(ds)\n\n ds.time_coverage_start = simulation_earliest_obs.strftime(ACDD_DATE)\n ds.time_coverage_end = simulation_latest_obs.strftime(ACDD_DATE)\n ds.time_coverage_duration = \"P0000-01-00T00:00:00\"\n ds.time_coverage_resolution = \"P0000-00-00T{obs_window:02d}:00:00\".format(\n obs_window=OBS_WINDOW)\n\n infl_fun_var = set_up_file(\n ds, total_sites, footprint_nbins,\n dimy, dimx, wrf_out, time_unit, site_names)\n\n ds.variables[\"site_lats\"][:] = site_lat\n ds.variables[\"site_lons\"][:] = site_lon\n ds.variables[\"site_heights\"][:] = site_alt\n print(\"Created file\")\n # ds.variables[\"site_names\"][:] = np.char.ljust(\n # site_names, int(site_names.dtype.str[2:]), \" \")\n\n # loop over input files\n # loop goes backward in time from first file output to last\n for step, current_time in zip(\n range(int(HOURS_PER_DAY * days_tot), 0, -FLUX_WINDOW),\n reversed(tuple(dateutil.rrule.rrule(\n dateutil.rrule.HOURLY,\n simulation_zero,\n FLUX_WINDOW,\n until=simulation_latest_obs)))):\n print(\"Day: \", step // HOURS_PER_DAY - 1, step/HOURS_PER_DAY,\n \"\\tHour: \", step % HOURS_PER_DAY)\n\n # which file to open first (r_{first_file:d}m.dat)\n first_file = step * MINUTES_PER_HOUR\n\n # end of the period for flux integration\n # earliest file to open? (minutes)\n # now unused.\n # end_flights = first_file - length * MINUTES_PER_HOUR\n\n print(\"Current output time:\", current_time)\n\n # set up the cube to receive the data\n # current_time = simulation_zero + datetime.timedelta(minutes=flights)\n\n # LPDM output codes release time in seconds\n # in a given file, we will have observations from the current time\n # forward for lag days (fluxes influence future obs)\n\n # last release we care about\n # oldest particles in the first file this iteration\n last_obs = next_larger_multiple(\n (min(current_time + datetime.timedelta(hours=float(length)),\n simulation_latest_obs) -\n simulation_zero).total_seconds(),\n OBS_WINDOW * SECONDS_PER_HOUR)\n print(\"Last release in this iteration:\",\n simulation_zero + datetime.timedelta(seconds=last_obs))\n # first release we care about\n # newest particles in the last file this iteration\n first_obs = next_smaller_multiple(\n (max(current_time - datetime.timedelta(hours=FLUX_WINDOW),\n simulation_earliest_obs) -\n simulation_zero).total_seconds(),\n OBS_WINDOW * SECONDS_PER_HOUR)\n print(\"First release in this iteration:\",\n simulation_zero + datetime.timedelta(seconds=first_obs))\n print(\"Last release should be no later than:\",\n simulation_zero + datetime.timedelta(seconds=first_obs) +\n datetime.timedelta(hours=FLUX_WINDOW))\n\n n_obs_bins_here = (last_obs - first_obs) // SECONDS_PER_HOUR // OBS_WINDOW\n\n # new_cube = create_vars...\n # final_list.append(new_cube)\n # file_name_list.append(\n # \"INFUN_{date:M%m_D%d_H%H}.nc4\".format(date=current_time))\n # release_times.append(current_time)\n\n # go through the files for the hour\n # increase the dtype if LPDM maxnp * n_files_per_hour\n # goes above about 3 billion\n # length should be footprint_nbins\n flux_window_data = np.zeros((dimx, dimy, total_sites, n_obs_bins_here),\n dtype=np.int16)\n file_per_hour = int(config[\"num_file_per_h\"])\n minutes_per_file = MINUTES_PER_HOUR // file_per_hour\n\n flux_time_var = ds.variables[\"flux_time\"]\n flux_time_bounds_var = ds.variables[\"flux_time_bnds\"]\n\n obs_time_var = ds.variables[\"observation_time\"]\n obs_time_bounds_var = ds.variables[\"observation_time_bnds\"]\n\n print(\"Reading LPD output\")\n\n # loop over flux files in this window\n for i in range(FLUX_WINDOW):\n for minute in range(MINUTES_PER_HOUR, 0, -minutes_per_file):\n # get the data from the file\n # Probably in C\n data = np.genfromtxt(\n os.path.join(\n config[\"indir\"][0].decode(\"ascii\"),\n \"r_{fli:d}m.dat\".format(\n fli=(first_file - i * MINUTES_PER_HOUR - minute))),\n # number of lines determined from file\n skip_header=1,\n # particle id not needed\n usecols=(1, 2, 3, 4, 5),\n )\n\n # given as x, y, z, site, obs_time?\n # obs_time in seconds, apparently\n mins = ( 0, 0, 0,\n 1, first_obs)\n maxs = (float(dimx*dx), float(dimy*dx), CLOSE_TO_GROUND,\n total_sites, last_obs)\n\n # probably in C\n binned_data, bin_desc = np.histogramdd(\n data, bins=(dimx, dimy, 1, total_sites, n_obs_bins_here),\n # also kind of cheating\n range=np.column_stack((mins, maxs))\n )\n del data\n\n # drop the z dimension from the counts\n # flux_window_data += np.asanyarray(binned_data[:,:,0,:,:],\n # dtype=np.int32)\n # binned_data is a float array,\n # so need unsafe casting to bring back integer counts\n # C\n np.add(flux_window_data, binned_data[:,:,0,:,:],\n out=flux_window_data, casting=\"unsafe\")\n del binned_data, bin_desc\n\n print(\"Read LPD output; writing data\")\n\n # find the indicies where the data should go\n # no data for particles released before first_obs yet\n # problem does not seem to be here, given range semantics\n obs_start = obs_var_to_index(first_obs)\n obs_end = obs_var_to_index(last_obs)\n # print(obs_end, obs_start)\n\n # simplify the logic and write all times\n # it's a 1-D coord with bounds\n all_dates = tuple(dateutil.rrule.rrule(\n dateutil.rrule.HOURLY,\n simulation_zero + datetime.timedelta(seconds=first_obs),\n OBS_WINDOW,\n until=simulation_zero + datetime.timedelta(seconds=last_obs)))\n # print(all_dates[0], all_dates[-1])\n # print(simulation_zero + datetime.timedelta(seconds=first_obs))\n # print(simulation_zero + datetime.timedelta(seconds=last_obs))\n\n # observation_time is monotone decreasing by design\n # so the index for the chronologically last time will be\n # lower than that of the chronologically earlier time\n for obs_ind, obs_time_val in zip(\n range(obs_end, obs_start),\n reversed(all_dates)):\n # print(\"time is\", obs_time_val, \"mapping to index\",\n # obs_var_to_index((obs_time_val -\n # simulation_zero).total_seconds()),\n # \"\\nIndex being used:\", obs_ind)\n obs_time_var[obs_ind] = netCDF4.date2num(\n obs_time_val,\n time_unit, CALENDAR)\n obs_time_bounds_var[obs_ind, :] = netCDF4.date2num(\n (obs_time_val,\n obs_time_val - datetime.timedelta(hours=OBS_WINDOW)),\n time_unit, CALENDAR)\n print(\"Wrote obs times\")\n\n # get loop invariants\n curr_flux_time = netCDF4.date2num(current_time, time_unit, CALENDAR)\n curr_flux_bounds = netCDF4.date2num((\n current_time,\n current_time - datetime.timedelta(hours=FLUX_WINDOW)),\n time_unit, CALENDAR)\n print(curr_flux_time, time_unit, \"corresponds to\", current_time)\n\n # first obs is at simulation_earliest_obs\n # we are looking at times from current_time\n # to current_time - FLUX_WINDOW\n # first obs is in this window if obs_start == n_obs_bins\n # if obs_start - obs_end is less than footprint_nbins,\n # need to start writing at time_back=difference\n write_offset = footprint_nbins - ((obs_start - obs_end) *\n OBS_WINDOW // FLUX_WINDOW)\n if obs_end == 0:\n # the other time this can occur (the beginning)\n write_offset = 0\n print(\"Writing data with an offset of\", write_offset)\n\n # now add the data to the file\n # reversing a range is rather annoying.\n for obs_bin_num, obs_ind in enumerate(\n reversed(range(obs_end, obs_start))):\n # np.transpose reverses all dimensions if no spec given\n data_part = np.transpose(flux_window_data[:,:,:,obs_bin_num])\n\n # final_list[travel_time][:,-travel_time,:,:] = (\n # CONVERSION_FACTOR * data_part)\n # dataset = netCDF4.Dataset(file_name_list[travel_time], \"a\")\n # infl_fun = dataset.variables[\"H\"]\n\n print(infl_fun_var.shape, obs_start, obs_ind, obs_end,\n data_part.shape)\n\n # This should support OBS_WINDOW != FLUX_WINDOW, in as\n # much generality as necessary.\n print(obs_start - obs_end, n_obs_bins, obs_bin_num)\n\n back_bin_num = obs_bin_num * OBS_WINDOW // FLUX_WINDOW\n infl_fun_var[obs_ind, :, back_bin_num+write_offset, :, :] = data_part\n flux_time_var[obs_ind, back_bin_num+write_offset] = curr_flux_time\n flux_time_bounds_var[\n obs_ind, back_bin_num+write_offset, :] = curr_flux_bounds\n del data_part\n ds.sync()\n del flux_window_data\n del curr_flux_time, curr_flux_bounds\n print(\"Data written\")\n\n # if len(final_list) == final_list.maxlen:\n # # no more data to be added to the cube\n # # time to save it and free the memory\n # finished_cube = final_list.popleft()\n # release_time = release_times.popleft()", "def invert_simple(forward, meas, geom):\n\n surface = forward.surface\n RT = forward.RT\n instrument = forward.instrument\n\n vswir_present = False\n if any(forward.surface.wl < 2600):\n vswir_present = True \n\n tir_present = False\n if any(forward.surface.wl > 2600):\n tir_present = True \n\n # First step is to get the atmosphere. We start from the initial state\n # and estimate atmospheric terms using traditional heuristics.\n x = forward.init.copy()\n x_surface, x_RT, x_instrument = forward.unpack(x)\n\n if vswir_present:\n x[forward.idx_RT] = heuristic_atmosphere(RT, instrument, \n x_RT, x_instrument, meas, geom)\n\n # Now, with atmosphere fixed, we can invert the radiance algebraically\n # via Lambertian approximations to get reflectance\n x_surface, x_RT, x_instrument = forward.unpack(x)\n rfl_est, Ls_est, coeffs = invert_algebraic(surface, RT,\n instrument, x_surface, x_RT,\n x_instrument, meas, geom)\n\n # Condition thermal part on the VSWIR portion. Only works for\n # Multicomponent surfaces. Finds the cluster nearest the VSWIR heuristic\n # inversion and uses it for the TIR suface initialization.\n if tir_present:\n tir_idx = np.where(forward.surface.wl > 3000)[0]\n\n if vswir_present:\n x_surface_temp = x_surface.copy()\n x_surface_temp[:len(rfl_est)] = rfl_est\n mu = forward.surface.xa(x_surface_temp, geom)\n rfl_est[tir_idx] = mu[tir_idx]\n else:\n rfl_est = 0.03 * np.ones(len(forward.surface.wl))\n\n # Now we have an estimated reflectance. Fit the surface parameters.\n x_surface[forward.idx_surface] = forward.surface.fit_params(rfl_est, geom)\n\n # Find temperature of emissive surfaces\n if tir_present:\n\n # Estimate the total radiance at sensor, leaving out surface emission\n # Radiate transfer calculations could take place at high spectral resolution\n # so we upsample the surface reflectance\n rfl_hi = forward.upsample(forward.surface.wl, rfl_est)\n rhoatm, sphalb, transm, solar_irr, coszen, transup = coeffs\n\n L_atm = RT.get_L_atm(x_RT, geom)\n L_down_transmitted = RT.get_L_down_transmitted(x_RT, geom)\n L_total_without_surface_emission = \\\n L_atm + L_down_transmitted * rfl_hi / (1. - sphalb * rfl_hi)\n\n # These tend to have high transmission factors; the emissivity of most\n # materials is nearly 1 for these bands, so they are good for\n # initializing the surface temperature.\n clearest_wavelengths = [10125., 10390.00, 10690.00]\n\n # This is fragile if other instruments have different wavelength\n # spacing or range\n clearest_indices = [np.argmin(np.absolute(RT.wl - w))\n for w in clearest_wavelengths]\n\n # Error function for nonlinear temperature fit\n def err(z):\n T = z\n emissivity = forward.surface.emissivity_for_surface_T_init\n Ls_est, d = emissive_radiance(emissivity, T,\n forward.surface.wl[clearest_indices])\n resid = transup[clearest_indices] * Ls_est + \\\n L_total_without_surface_emission[clearest_indices] - \\\n meas[clearest_indices]\n return sum(resid**2)\n\n # Fit temperature, set bounds, and set the initial values\n idx_T = forward.surface.surf_temp_ind\n Tinit = np.array([forward.surface.init[idx_T]])\n Tbest = minimize(err, Tinit).x\n T = max(forward.surface.bounds[idx_T][0]+eps,\n min(Tbest, forward.surface.bounds[idx_T][1]-eps))\n x_surface[idx_T] = Tbest\n forward.surface.init[idx_T] = T\n\n # Update the full state vector\n x[forward.idx_surface] = x_surface\n\n # We record these initial values in the geometry object - the only\n # \"stateful\" part of the retrieval\n geom.x_surf_init = x[forward.idx_surface]\n geom.x_RT_init = x[forward.idx_RT]\n\n return x", "def InitialCondition():\n maxX = getX(C.N + 1,C.N+1,C.alpha_max)\n y0 = np.zeros(maxX,dtype=complex)\n for i in range(0, C.N+2):\n for j in range(0, C.N+2):\n for alpha in [1]:\n\n X = getX(i, j, alpha)\n\n y0[X] = 1./2./C.N * (1-delta(i, C.N+1))*(1-delta(j, C.N+1))+1./2*delta(i, C.N+1)*delta(j, C.N+1) +\\\n 1./2./(C.N)**0.5 * ((1-delta(i, C.N+1)) *\n delta(j, C.N+1)+(1-delta(j, C.N+1))*delta(i, C.N+1))", "def prepare_fg(\n self, times, wavelength, spectra, stellar, intensities, telluric, area=None\n ):\n\n if area is None:\n orb = Orbit(self.star, self.planet)\n area = orb.stellar_surface_covered_by_planet(times)\n\n model = stellar * telluric\n\n # Normalize the profile of the observations\n profile = np.nanmean(spectra, axis=1)\n model_profile = np.nanmean(model, axis=1)\n norm = profile / model_profile\n\n # Normalize the spectrum\n # model = stellar * telluric * norm[:, None]\n # profile = np.median(spectra, axis=0)\n # model_profile = np.median(model, axis=0)\n\n # nm = np.nanmedian(profile / model_profile)\n # norm *= nm\n\n # model = stellar * telluric * norm[:, None]\n # diff = spectra - model\n\n # model = np.nanmedian(spectra, axis=0)\n\n # f = -(\n # # np.nan_to_num(intensities) *\n # self.area_atmosphere\n # / self.area_planet\n # * area[:, None]\n # # * np.nan_to_num(telluric, nan=1)\n # * norm[:, None]\n # )\n # f = np.nan_to_num(intensities) * np.nan_to_num(telluric, nan=1) * norm[:, None]\n area *= self.area_atmosphere / self.area_planet\n f = -np.nan_to_num(intensities, nan=1) * area[:, None]\n if hasattr(f, \"to_value\"):\n f = f.to_value(1)\n\n # g = spectra - stellar * telluric * norm[:, None]\n # if self.n_sysrem is not None:\n # g = sysrem(g, self.n_sysrem)\n\n g = spectra\n if self.n_sysrem is not None:\n # Use SVD directly instead of Sysrem\n g = sysrem(spectra, self.n_sysrem)\n # u, s, vh = np.linalg.svd(spectra, full_matrices=False)\n # s[: self.n_sysrem] = 0\n # s[80:] = 0\n # ic = (u * s) @ vh\n # g = ic\n else:\n # g = spectra - stellar * telluric * norm[:, None]\n gen = np.random.default_rng()\n tmp = sysrem(spectra, 5)\n g = gen.normal(\n loc=np.nanmean(tmp), scale=np.nanstd(tmp), size=spectra.shape\n )\n # g *= np.nanstd() # std of random is 1 (in theory)\n\n # norm = np.nanstd(g, axis=0)\n # f /= norm\n # g /= norm\n\n # plt.imshow(g, aspect=\"auto\", origin=\"lower\")\n # plt.xlabel(\"Wavelength\")\n # plt.ylabel(\"Time\")\n # plt.title(f\"N_Sysrem: {self.n_sysrem}\")\n # plt.savefig(f\"spectra_sysrem_{self.n_sysrem}.png\")\n\n return wavelength, f, g", "def simulated_reflectivity(pressure, temperature, vapor_mixing_ratio, liquid_mixing_ratio, snow_mixing_ratio=None,\n graupel_mixing_ratio=None, use_varint=False, use_liqskin=False):\n # Set values for constants with variable intercept\n R1 = 1e-15\n RON = 8e6\n RON2 = 1e10\n SON = 2e7\n GON = 5e7\n RON_MIN = 8e6\n RON_QR0 = 0.00010\n RON_DELQR0 = 0.25*RON_QR0\n RON_CONST1R = (RON2-RON_MIN)*0.5\n RON_CONST2R = (RON2+RON_MIN)*0.5\n\n # set constant intercepts\n rno_l = 8e6\n rno_s = 2e7\n rno_g = 4e6\n\n qvapor = da.clip(vapor_mixing_ratio, 0., None)\n qliquid = da.clip(liquid_mixing_ratio, 0., None)\n\n # If qgraupel but not qsnow, set qgraupel = qsnow\n if snow_mixing_ratio is None:\n if graupel_mixing_ratio is None:\n qsnow = da.zeros_like(qliquid)\n qgraupel = da.zeros_like(qliquid)\n else:\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n qsnow = da.zeros_like(graupel_mixing_ratio)\n qsnow[temperature <= 273.15] = qgraupel[temperature <= 273.15]\n else:\n qsnow = da.clip(snow_mixing_ratio, 0., None)\n qgraupel = da.clip(graupel_mixing_ratio, 0., None)\n\n # density for liquid, snow, and graupel (kg m-3)\n rho_l = 1000. # liquid\n rho_i = 100. # snow\n rho_g = 400. # graupel\n\n # constant evaluation of gamma distribution\n gamma = 720.\n\n # Alpha constant\n alpha = 0.224\n\n # constant multiplication factors\n factor_l = gamma * 1e18 * (1./(np.pi*rho_l))**1.75\n s = gamma * 1e18 * (1./(np.pi*rho_i))**1.75 * (rho_i/rho_l)**2 * alpha\n g = gamma * 1e18 * (1./(np.pi*rho_g))**1.75 * (rho_g/rho_l)**2 * alpha\n\n # calculate virtual temperature\n virtual_t = virtual_temperature(temperature, qvapor)\n\n # dry gas constant\n Rd = 287.\n rho_air = pressure/(Rd*virtual_t)\n\n # adjust for brightband if use_liqskin=True\n if use_liqskin:\n raise NotImplementedError('Liquid skin correction not implemented')\n # factor_s = da.full_like(temperature, s)\n # factor_g = da.full_like(temperature, g)\n # try:\n # factor_s[temperature >= 273.15] = factor_s[temperature >= 273.15] / da.array([alpha])\n # factor_g[temperature >= 273.15] = factor_g[temperature >= 273.15] / da.array([alpha])\n # except ValueError:\n # factor_s = s\n # factor_g = g\n else:\n factor_s = s\n factor_g = g\n\n # calculate variable intercept if use_varint=True\n if use_varint:\n raise NotImplementedError('Variable intercepts not yet implemented')\n # temp_c = da.clip(temperature-273.15, temperature.min(), -0.001)\n # sonv = MIN(2.0D8, 2.0D6*EXP(-0.12D0*temp_c))\n #\n # gonv = gon\n # IF (qgr(i,j,k) .GT. R1) THEN\n # gonv = 2.38D0 * (PI*RHO_G/(rhoair*qgr(i,j,k)))**0.92D0\n # gonv = MAX(1.D4, MIN(gonv,GON))\n # END IF\n #\n # ronv = RON2\n # IF (qra(i,j,k) .GT. R1) THEN\n # ronv = RON_CONST1R*TANH((RON_QR0 - qra(i,j,k))/RON_DELQR0) + RON_CONST2R\n # END IF\n else:\n ronv = rno_l\n sonv = rno_s\n gonv = rno_g\n\n # Total equivalent reflectivity factor (z_e, in mm^6 m^-3) is\n # the sum of z_e for each hydrometeor species:\n z_e = (((factor_l*(rho_air*qliquid)**1.75)/(ronv**.75)) +\n ((factor_s*(rho_air*qsnow)**1.75)/(sonv**.75)) +\n ((factor_g*(rho_air*qgraupel)**1.75)/(gonv**.75)))\n\n # Adjust small values of Z_e so that dBZ is no lower than -30\n z_e = da.clip(z_e, .001, None)\n\n # Convert to dBZ\n dbz = 10.*da.log10(z_e)\n return dbz", "def test_double_ended_wls_fix_alpha_fix_gamma_estimate_synthetic():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 500\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 100)\n ts_cold = np.ones(nt) * 4.0\n ts_warm = np.ones(nt) * 20.0\n\n C_p = 15246\n C_m = 2400.0\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * x[:, None])\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n ast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * x[:, None])\n / (np.exp(gamma / temp_real) - 1)\n )\n rst = (\n C_p\n * np.exp(-(dalpha_r + dalpha_p) * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real)\n / (np.exp(gamma / temp_real) - 1)\n )\n rast = (\n C_m\n * np.exp(-(dalpha_r + dalpha_m) * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real) - 1)\n )\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n alpha -= alpha[0] # the first x-index is where to start counting\n\n ds = DataStore(\n {\n \"st\": ([\"x\", \"time\"], st),\n \"ast\": ([\"x\", \"time\"], ast),\n \"rst\": ([\"x\", \"time\"], rst),\n \"rast\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n\n sections = {\n \"cold\": [slice(0.0, 0.5 * cable_len)],\n \"warm\": [slice(0.5 * cable_len, cable_len)],\n }\n\n # WLS\n ds.calibration_double_ended(\n sections=sections,\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method=\"wls\",\n solver=\"sparse\",\n fix_gamma=(gamma, 0.0),\n fix_alpha=(alpha, np.zeros_like(alpha)),\n )\n\n assert_almost_equal_verbose(ds.gamma.values, gamma, decimal=18)\n assert_almost_equal_verbose(ds.alpha.values, alpha, decimal=18)\n assert_almost_equal_verbose(ds.tmpf.values, temp_real - 273.15, decimal=11)\n assert_almost_equal_verbose(ds.tmpb.values, temp_real - 273.15, decimal=11)\n assert_almost_equal_verbose(ds.tmpw.values, temp_real - 273.15, decimal=11)\n\n pass", "def contrast_curve_core(\n star_data,\n plate_scale,\n fwhm=1,\n radius_size=None,\n center=None,\n):\n\n # make copy of data array\n data = star_data.copy()\n\n# data = np.abs(data) #DO NOT DO THIS!!!! It's making the standard deviation too small later.\n\n ################## establish center ########\n\n x, y = np.indices((data.shape))\n\n if type(center) == type(None):\n center = np.array(\n [(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0]\n )\n\n if type(radius_size) == type(None):\n radius_size = fwhm\n\n ########## set up radial coordinate system ########\n\n radii = np.sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2)\n radii = radii.astype(np.int64)\n\n ones = np.ones_like(data)\n\n number_of_a = int(radii.max() / radius_size)\n\n pie_edges = np.arange(0, 390, 30)\n\n ######## set up aperture array ##########\n center_ap = CircularAperture([center[0], center[1]], radius_size)\n\n all_apers, all_apers_areas, all_masks = (\n [center_ap],\n [center_ap.area],\n [center_ap.to_mask(method=\"exact\")],\n )\n\n all_data, all_weights = [all_masks[0].multiply(data)], [\n all_masks[0].multiply(ones)\n ]\n\n all_stds = [twoD_weighted_std(all_data[0], all_weights[0])]\n\n ######## construct the apertures of the annuli #######\n sigma_clip = SigmaClip(sigma=3.0)\n bkgrms = StdBackgroundRMS(sigma_clip)\n\n medians = np.zeros((number_of_a, len(pie_edges) - 1))\n stds = np.zeros((number_of_a, len(pie_edges) - 1))\n seps = np.zeros(number_of_a)\n for j in range(int(number_of_a)):\n r_in = j * radius_size + fwhm\n r_out = j * radius_size + radius_size + fwhm\n seps[j] = (r_in+r_out)/2.*plate_scale\n\n # terminate if completely outside 10 arcseconds\n if (r_in * plate_scale) > 10:\n break\n\n # create aperture\n aper = CircularAnnulus(\n [center[0], center[1]],\n r_in=r_in,\n r_out=r_out,\n )\n\n # multiply the data by the aperture mask and store it\n all_apers.append(aper)\n all_apers_areas.append(aper.area)\n mask = aper.to_mask(method=\"exact\")\n all_masks.append(mask)\n mask_data = mask.multiply(data)\n\n mask_weight = mask.multiply(ones)\n\n for i, pie_edge_near in enumerate(pie_edges[:-1]):\n pie_edge_far = pie_edges[i + 1]\n mask_data_new = mask_data.copy()\n mask_data_new = check_boundaries(\n mask_data_new, pie_edge_near, pie_edge_far\n )\n medians[j, i] = np.nanmedian(mask_data_new)\n mask_data_masked = mask_data_new[~np.isnan(mask_data_new)]\n\n mean, std = meanclip(mask_data_masked, 3, converge_num=0.2)\n stds[j, i] = std\n\n #Return only the medians and stds for distances within the desired range\n seps = seps[0:j]\n medians = medians[0:j,:]\n stds = stds[0:j,:]\n return seps, medians, stds", "def drfl_dsurfaceb(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))", "def distribute(self, date_time, air_temp, vapor_pressure=None,\n dew_point=None, cloud_factor=None):\n\n self._logger.debug('%s Distributing thermal' % date_time)\n\n # calculate clear sky thermal\n if self.clear_sky_method == 'marks1979':\n cth = np.zeros_like(air_temp, dtype=np.float64)\n envphys_c.ctopotherm(\n air_temp, dew_point,\n self.dem,\n self.sky_view_factor,\n cth,\n self.config['marks1979_nthreads'])\n\n elif self.clear_sky_method == 'dilley1998':\n cth = clear_sky.Dilly1998(air_temp, vapor_pressure/1000)\n\n elif self.clear_sky_method == 'prata1996':\n cth = clear_sky.Prata1996(air_temp, vapor_pressure/1000)\n\n elif self.clear_sky_method == 'angstrom1918':\n cth = clear_sky.Angstrom1918(air_temp, vapor_pressure/1000)\n\n # terrain factor correction\n if (self.sky_view_factor is not None) and \\\n (self.clear_sky_method != 'marks1979'):\n # apply (emiss * skvfac) + (1.0 - skvfac) to the longwave\n cth = cth * self.sky_view_factor + (1.0 - self.sky_view_factor) * \\\n STEF_BOLTZ * air_temp**4\n\n # make output variable\n self.thermal_clear = cth.copy()\n\n # correct for the cloud factor\n # ratio of measured/modeled solar indicates the thermal correction\n if self.correct_cloud:\n if self.cloud_method == 'garen2005':\n cth = cloud.Garen2005(cth,\n cloud_factor)\n\n elif self.cloud_method == 'unsworth1975':\n cth = cloud.Unsworth1975(cth,\n air_temp,\n cloud_factor)\n\n elif self.cloud_method == 'kimball1982':\n cth = cloud.Kimball1982(cth,\n air_temp,\n vapor_pressure/1000,\n cloud_factor)\n\n elif self.cloud_method == 'crawford1999':\n cth = cloud.Crawford1999(cth,\n air_temp,\n cloud_factor)\n\n # make output variable\n self.thermal_cloud = cth.copy()\n\n # correct for vegetation\n if self.correct_veg:\n cth = vegetation.thermal_correct_canopy(cth,\n air_temp,\n self.veg_tau,\n self.veg_height)\n\n # make output variable\n self.thermal_veg = cth.copy()\n\n self.thermal = utils.set_min_max(cth, self.min, self.max)", "def main():\n strikes, dips, normals, slip = generate_normal_ss_data(330, 60, n=500, porp=1)\n #strikes, dips, normals, slip = generate_normal_data(330, 60, n=500, porp=10)\n sigma = invert_plane_stress(normals, slip)\n plot(sigma, strikes, dips)\n plt.show()", "def solid_surface_density(M, a, delta_a):\n sigma_solid = (M*gen.Mearth*1e3)/(2.*np.pi*(a*gen.AU)*(delta_a*gen.AU))\n return sigma_solid", "def calc_saturation_curves(self):\n HEOS = CP.AbstractState(self.additional_backend, self.fluid)\n PCSAFT = CP.AbstractState(self.backend, self.fluid)\n self.dictL, self.dictV = {}, {}\n for Q, dic in zip([0, 1], [self.dictL, self.dictV]):\n # rhomolar, smolar, hmolar, T, p, umolar = [], [], [], [], [], []\n rhomolar, T, p = [], [], []\n for _T in np.logspace(np.log10(HEOS.keyed_output(CP.iT_triple)), np.log10(HEOS.keyed_output(CP.iT_critical)), 500):\n try:\n PCSAFT.update(CP.QT_INPUTS, Q, _T)\n # print('T', PCSAFT.T())\n # print('p', PCSAFT.p())\n # print('rhomolar', PCSAFT.rhomolar())\n if (PCSAFT.p() < 0): raise ValueError('P is negative:' + str(PCSAFT.p()))\n PCSAFT.T(), PCSAFT.p(), PCSAFT.rhomolar()\n # PCSAFT.hmolar(), PCSAFT.smolar(), PCSAFT.umolar()\n\n T.append(PCSAFT.T())\n p.append(PCSAFT.p())\n rhomolar.append(PCSAFT.rhomolar())\n # hmolar.append(PCSAFT.hmolar())\n # smolar.append(PCSAFT.smolar())\n # umolar.append(PCSAFT.umolar())\n except ValueError as VE:\n myprint(1, 'satT error:', VE, '; T:', '{T:0.16g}'.format(T=_T), 'T/Tc:', _T / HEOS.keyed_output(CP.iT_critical))\n\n dic.update(dict(T=np.array(T),\n P=np.array(p),\n Dmolar=np.array(rhomolar)))\n # Hmolar=np.array(hmolar),\n # Smolar=np.array(smolar)))\n # Umolar=np.array(umolar)))", "def P_AI_Rocky(in_dict):\n # START\n fs = 16\n plt.rc('font', size=fs)\n fig = plt.figure(figsize=(14,12))\n ds = nc.Dataset(in_dict['fn'])\n\n # PLOT CODE\n aa = [-122.8, -122.54, 47.92, 48.22]\n import cmocean\n cmap = cmocean.cm.balance\n # cmap = 'RdYlBu_r'\n\n from warnings import filterwarnings\n filterwarnings('ignore') # skip some warning messages\n \n # plot Code\n \n # calculate divergence and vorticity\n uu = ds['u'][0, -1, :, :]\n vv = ds['v'][0, -1, :, :]\n u = zfun.fillit(uu)\n v = zfun.fillit(vv)\n u[np.isnan(u)] = 0\n v[np.isnan(v)] = 0\n \n G = zrfun.get_basic_info(in_dict['fn'], only_G=True)\n \n dive = ((np.diff(u, axis=1)/G['DX'][:, 1:-1])[1:-1, :]\n + (np.diff(v, axis = 0)/G['DY'][1:-1, :])[:, 1:-1])\n #dive[G['mask_rho'][1:-1,1:-1]==False] = np.nan\n \n vort = np.diff(v, axis=1)/G['DX'][1:,1:] - np.diff(u, axis=0)/G['DY'][1:,1:]\n #vort[G['mask_rho'][1:,1:]==False] = np.nan\n \n scl = 2e-3\n \n # panel 1\n ax = fig.add_subplot(121)\n # cs = plt.pcolormesh(G['lon_psi'], G['lat_psi'], dive/scl, cmap=cmap,\n # vmin=-1, vmax=1)\n cs = plt.pcolormesh(G['lon_rho'][1:-1,1:-1], G['lat_rho'][1:-1,1:-1], dive/scl, cmap=cmap,\n vmin=-1, vmax=1, shading='gouraud')\n tstr = (r'Surface Divergence (%0.1e $s^{-1}$)' % (scl))\n #pfun.add_bathy_contours(ax, ds, txt=True)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_ylabel('Latitude')\n ax.set_title(tstr)\n pfun.add_info(ax, in_dict['fn'])\n ax.set_xticks([-122.8, -122.7, -122.6])\n ax.set_yticks([48, 48.1, 48.2])\n #\n # panel 2\n ax = fig.add_subplot(122)\n # cs = plt.pcolormesh(G['lon_rho'], G['lat_rho'], vort/scl, cmap=cmap,\n # vmin=-1, vmax=1)\n cs = plt.pcolormesh(G['lon_psi'], G['lat_psi'], vort/scl, cmap=cmap,\n vmin=-1, vmax=1, shading='gouraud')\n tstr = (r'Surface Vorticity (%0.1e $s^{-1}$)' % (scl))\n ax.set_xticks([-122.8, -122.7, -122.6])\n ax.set_yticks([])\n #fig.colorbar(cs)\n \n # Inset colorbar\n from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n cbaxes = inset_axes(ax, width=\"4%\", height=\"40%\", loc='lower left')\n fig.colorbar(cs, cax=cbaxes, orientation='vertical')\n \n #pfun.add_bathy_contours(ax, ds)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_title(tstr) \n \n #fig.tight_layout()\n # FINISH\n ds.close()\n if len(in_dict['fn_out']) > 0:\n plt.savefig(in_dict['fn_out'])\n plt.close()\n else:\n plt.show()\n plt.rcdefaults()", "def govardovskii2000_template(\n wavelengths: np.ndarray,\n alpha_max: Union[float, np.ndarray],\n A_alpha: Union[float, np.ndarray] = 69.7,\n a_alpha1: Union[float, np.ndarray] = 0.8795,\n a_alpha2: Union[float, np.ndarray] = 0.0459,\n a_alpha3: Union[float, np.ndarray] = 300.0,\n a_alpha4: Union[float, np.ndarray] = 11940.0,\n B_alpha: Union[float, np.ndarray] = 28.0,\n b_alpha: Union[float, np.ndarray] = 0.922,\n C_alpha: Union[float, np.ndarray] = -14.9,\n c_alpha: Union[float, np.ndarray] = 1.104,\n D_alpha: Union[float, np.ndarray] = 0.674,\n A_beta: Union[float, np.ndarray] = 0.26,\n beta_max1: Union[float, np.ndarray] = 189.0,\n beta_max2: Union[float, np.ndarray] = 0.315,\n d_beta1: Union[float, np.ndarray] = -40.5,\n d_beta2: Union[float, np.ndarray] = 0.195,\n) -> np.ndarray:\n x_alpha = (wavelengths / alpha_max) ** -1\n a_alpha = a_alpha1 + a_alpha2 * np.exp(-((alpha_max - a_alpha3) ** 2) / a_alpha4)\n\n alpha_band = (\n np.exp(A_alpha * (a_alpha - x_alpha))\n + np.exp(B_alpha * (b_alpha - x_alpha))\n + np.exp(C_alpha * (c_alpha - x_alpha))\n + D_alpha\n ) ** -1\n\n beta_max = beta_max1 + beta_max2 * alpha_max\n d_beta = d_beta1 + d_beta2 * alpha_max\n beta_band = np.exp(-(((wavelengths - beta_max) / d_beta) ** 2))\n\n return alpha_band + A_beta * beta_band", "def compute_surface_temperature(heat_flux):\n\n return 1.1e-4*heat_flux + 323", "def calc_VPD(t_celsius, rel_humidity):\n # according to Licor LI-6400 manual pg 14-10\n # and Buck AL (1981). New equations for computing vapor pressure and\n # enhancement factor. J Appl Meteor 20:1527-1532\n vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))\n\n vp_air = vp_sat * rel_humidity\n return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)", "def get_sky(plate, mjd, output_path, verbose=False):\n tag = f'PLATE {plate:05d} MJD {mjd:05d} PATH {output_path}'\n if verbose:\n print('Starting {}'.format(tag))\n # Initialize output data.\n last_nexp = None\n plugmaps = []\n wlens = {'b': [], 'r': []}\n wdisps = {'b': [], 'r': []}\n fluxes = {'b': [], 'r': []}\n ivars = {'b': [], 'r': []}\n flats = {'b': [], 'r': []}\n rdnoises = {'b': [], 'r': []}\n masks = {'b': [], 'r': []}\n obskeys = ('EXPOSURE', 'TAI-BEG', 'EXPTIME', 'AZ', 'ALT', 'AIRMASS',\n 'PRESSURE', 'AIRTEMP',\n 'RDNOISE0', 'RDNOISE1', 'RDNOISE2', 'RDNOISE3')\n obsvals = {key: [] for key in obskeys}\n # Size of each amplifier in raw image pixels along (wlen, tracex) axes.\n ampsize = {'b': (2056, 2048), 'r': (2064, 2057)}\n # ampx[band] tabulates whether each wavelength index is readout by\n # amplifier 0/2 (=0) or 1/3 (=1).\n ampx = {'b': 1 * (np.arange(4112) >= 2056),\n 'r': 1 * (np.arange(4128) >= 2064)}\n # amplifer[band] is a function that takes a traceset as input an returns an\n # array that tabulates whether each wavelength index is readout by\n # amplifier 0-3.\n amplifier = {'b': lambda x: 2 * (x >= 2048) + ampx['b'],\n 'r': lambda x: 2 * (x >= 2057) + ampx['r']}\n # Scaling such that RMS = rdnoise_scale * RDNOISEn * neff.\n rdnoise_scale = (4 * np.pi) ** 0.25\n # Conversion from constant log-lambda pixels to wavelength ratio.\n wdisp_const = 1e-4 * np.log(10)\n # Allowed pixel mask bits.\n valid_mask = (1 << 32) - 1\n # Slices of valid data to save. These trim pixels at each end where\n # IVAR=0 or other serious pixel mask bits are often set.\n valid_slices = {'b': slice(767, 3299), 'r': slice(483, 3668) }\n # Initialize data access.\n finder = bossdata.path.Finder()\n mirror = bossdata.remote.Manager()\n # Loop over spectrographs.\n expected_fibers = []\n for specidx in 1, 2:\n # Load the list of science exposures used for this spectrograph's coadd.\n fiber = 500 * (specidx - 1) + 1\n spec_name = finder.get_spec_path(plate, mjd, fiber=fiber, lite=True)\n exposures = bossdata.spec.SpecFile(mirror.get(spec_name)).exposures\n for band in 'b', 'r':\n camera = '{}{}'.format(band, specidx)\n use = valid_slices[band]\n # Loop over science exposures for this camera.\n nexp = exposures.num_by_camera[camera]\n if not (last_nexp is None or nexp == last_nexp):\n print(f'Different nexp for {camera} {tag}')\n return None\n last_nexp = nexp\n for expidx in range(nexp):\n # Load this camera's spFrame file.\n name = exposures.get_exposure_name(expidx, camera, 'spFrame')\n path = mirror.get(finder.get_plate_path(plate, name))\n spFrame = bossdata.plate.FrameFile(path, calibrated=False)\n # Lookup this spectrograph's sky fibers.\n sky_name = binary_type('SKY ', 'ascii')\n fiberidx = np.where(\n spFrame.plug_map['OBJTYPE'] == sky_name)[0]\n if expidx == 0 and band == 'b':\n # Save plugmap metadata.\n plugmaps.append(spFrame.plug_map[\n ['FIBERID','RA','DEC','XFOCAL','YFOCAL']][fiberidx])\n if specidx == 2:\n plugmap = astropy.table.vstack(plugmaps)\n if specidx == 1 and band == 'b':\n # Record observation metadata.\n for key in obskeys:\n try:\n value = spFrame.header[key]\n except KeyError:\n value = -999 # invalid value for int/float types\n obsvals[key].append(value)\n # Load the sky fiber data.\n fibers = spFrame.plug_map['FIBERID'][fiberidx].data\n assert np.all(fiberidx == spFrame.get_fiber_offsets([fibers]))\n if expidx == 0 and band == 'b':\n expected_fibers.append(fibers)\n if verbose:\n print('Found {} sky fibers on spec{}: {}.'.format(\n len(fibers), specidx,\n ','.join([str(f) for f in fibers])))\n else:\n if not np.all(fibers == expected_fibers[specidx - 1]):\n print('Did not get expected fibers for {} exp {}'\n .format(camera, expidx))\n data = spFrame.get_valid_data(\n fibers, include_sky=True, include_wdisp=True, use_ivar=True,\n pixel_quality_mask=valid_mask)\n if verbose:\n print('Reading {} for exposure {} / {}...'\n .format(camera, expidx + 1, nexp))\n assert data.shape == (len(fibers), 2 * ampsize[band][0])\n mask = spFrame.get_pixel_masks(fibers)\n masks[band].append(mask[:, use])\n # Identify pixels with valid data.\n valid = ~data['ivar'].mask\n bad_fibers = ~np.any(valid, axis=1)\n if verbose and np.any(bad_fibers):\n print(' bad fibers: {}'.format(fibers[bad_fibers]))\n ivar = data['ivar'].data\n assert np.all(ivar[valid] > 0)\n ivars[band].append(ivar[:, use])\n # Load the superflat and trace vectors for sky fibers.\n superflat = spFrame.get_superflat(fibers)\n tracex = spFrame.hdulist[7].read()[fiberidx]\n # Load fiberflat and neff vectors from this camera's spFlat.\n name = exposures.get_exposure_name(expidx, camera, 'spFlat')\n path = mirror.get(finder.get_plate_path(plate, name))\n with fits.open(path) as spFlat:\n fiberflat = spFlat[0].data[fiberidx]\n neff = bossdata.plate.TraceSet(spFlat[3]).get_y()[fiberidx]\n if np.any(neff[valid] <= 0):\n print(f'WARNING: neff <= 0 for {camera} {expidx} {tag}')\n # Lookup the per-amplifier readnoise values.\n readnoises = np.array([\n spFrame.header['RDNOISE{}'.format(amp)]\n for amp in range(4)], dtype=np.float32)\n # Determine which amplifier (0-3) each pixel along the trace is\n # read out by and scale to RMS readnoise per wavelength pixel.\n amp = amplifier[band](tracex)\n rdnoise = rdnoise_scale * readnoises[amp] * neff\n rdnoises[band].append(rdnoise[:, use].astype(np.float32))\n # Combine the superflat and fiberflat.\n flat = superflat * fiberflat\n assert np.all(flat[valid] > 0)\n flats[band].append(flat[:, use])\n # Save wavelength solutions in angstroms.\n wlen = data['wavelength'].data\n wlens[band].append(wlen[:, use])\n # Save wavelength dispersions in angstroms.\n wdisp = data['wdisp'].data\n assert np.all(wdisp[valid] > 0)\n wdisp = wlen * np.expm1(wdisp_const * wdisp)\n wdisps[band].append(wdisp[:, use])\n # Save the combined flat-fielded sky models + residuals,\n # which might be negative due to readnoise.\n flux = data['flux'].data + data['sky'].data\n fluxes[band].append(flux[:, use])\n # Build observation metadata table.\n obslist = astropy.table.Table()\n for key in obskeys:\n obslist[key] = obsvals[key]\n # Build the output HDU list.\n hdus = fits.HDUList()\n cards = dict(PLATE=plate, MJD=mjd, NFIBERS=len(plugmap), NEXP=nexp)\n hdus.append(fits.PrimaryHDU(header=fits.Header(cards)))\n hdus.append(fits.table_to_hdu(obslist))\n hdus[-1].name = 'OBSLIST'\n hdus.append(fits.table_to_hdu(plugmap))\n hdus[-1].name = 'PLUGMAP'\n for band in 'b', 'r':\n Band = band.upper()\n # Combine arrays for each band and save an an image HDU.\n hdus.append(fits.ImageHDU(np.vstack(wlens[band]),\n name='{}WLEN'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(wdisps[band]),\n name='{}WDISP'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(rdnoises[band]),\n name='{}RDNOISE'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(flats[band]),\n name='{}FLAT'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(fluxes[band]),\n name='{}FLUX'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(ivars[band]),\n name='{}IVAR'.format(Band)))\n hdus.append(fits.ImageHDU(np.vstack(masks[band]),\n name='{}MASK'.format(Band)))\n name = os.path.join(output_path, 'sky-{}-{}.fits'.format(plate, mjd))\n hdus.writeto(name, overwrite=True)\n print('Completed {}'.format(tag))\n return obslist", "def spec_helm_decomp(k,Cu,Cv,GM=False):\n dk = k[1]-k[0]\n s = np.log(k)\n\n Fphi = np.zeros_like(Cu)\n Fpsi = np.zeros_like(Cu)\n Cphi = np.zeros_like(Cu)\n Cpsi = np.zeros_like(Cu)\n\n # assume GM for decomposing into wave and vortex\n if GM:\n gm = np.load(\"/Users/crocha/Projects/dp_spectra/GM/gm_omega_star.npz\")\n f2omg2 = gm['rgm']\n ks = gm['k']*1.e3\n\n for i in range(s.size-1):\n\n ds = np.diff(s[i:])\n\n sh = sinh(s[i]-s[i:])\n ch = cosh(s[i]-s[i:])\n\n # the function to integrate\n Fp = Cu[i:]*sh + Cv[i:]*ch\n Fs = Cv[i:]*sh + Cu[i:]*ch\n\n # integrate using Simpson's rule\n Fpsi[i] = integrate.simps(Fs,s[i:])\n Fphi[i] = integrate.simps(Fp,s[i:])\n\n # zero out unphysical values\n Fpsi[Fpsi < 0.] = 0.\n Fphi[Fphi < 0.] = 0.\n\n # compute rotational and divergent components\n Cpsi = Fpsi - Fphi + Cu\n Cphi = Fphi - Fpsi + Cv\n\n if GM:\n\n f2omg2i = np.interp(k,ks,f2omg2)\n\n Cv_w = f2omg2i*Fphi - Fpsi + Cv\n Cv_v = Cv - Cv_w\n \n kdkromg = diff_central(ks, f2omg2)\n kdkromg = np.interp(k,ks[1:-1],kdkromg)\n\n dFphi = diff_central(k, Fphi)\n #dFphi = np.gradient(Fphi,k)\n dFphi = np.interp(k,k[1:-1],dFphi.real)\n E_w = Fphi - k*dFphi\n\n Cu_w = -k*kdkromg*Fphi + f2omg2i*(-Fpsi+Cv) + Fphi\n Cu_v = Cu - Cu_w\n\n Cb_w = E_w - (Cu_w + Cv_w)/2.\n\n return Cpsi,Cphi, Cu_w,Cv_w, Cu_v,Cv_v, E_w, Cb_w\n\n else:\n return Cpsi,Cphi", "def test_double_ended_ols_wls_fix_gamma_estimate_synthetic():\n from dtscalibration import DataStore\n import numpy as np\n\n np.random.seed(0)\n\n cable_len = 100.\n nt = 500\n time = np.arange(nt)\n x = np.linspace(0., cable_len, 100)\n ts_cold = np.ones(nt) * 4.\n ts_warm = np.ones(nt) * 20.\n\n C_p = 15246\n C_m = 2400.\n dalpha_r = 0.0005284\n dalpha_m = 0.0004961\n dalpha_p = 0.0005607\n gamma = 482.6\n cold_mask = x < 0.5 * cable_len\n warm_mask = np.invert(cold_mask) # == False\n temp_real = np.ones((len(x), nt))\n temp_real[cold_mask] *= ts_cold + 273.15\n temp_real[warm_mask] *= ts_warm + 273.15\n\n st = C_p * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_p * x[:, None]) * np.exp(gamma / temp_real) / \\\n (np.exp(-gamma / temp_real) - 1)\n ast = C_m * np.exp(-dalpha_r * x[:, None]) * \\\n np.exp(-dalpha_m * x[:, None]) / (np.exp(-gamma / temp_real) - 1)\n rst = C_p * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * \\\n np.exp(-dalpha_p * (-x[:, None] + cable_len)) * \\\n np.exp(gamma / temp_real) / (np.exp(-gamma / temp_real) - 1)\n rast = C_m * np.exp(-dalpha_r * (-x[:, None] + cable_len)) * np.exp(\n -dalpha_m * (-x[:, None] + cable_len)) / \\\n (np.exp(-gamma / temp_real) - 1)\n\n alpha = np.mean(np.log(rst / rast) - np.log(st / ast), axis=1) / 2\n\n ds = DataStore({\n 'st': (['x', 'time'], st),\n 'ast': (['x', 'time'], ast),\n 'rst': (['x', 'time'], rst),\n 'rast': (['x', 'time'], rast),\n 'userAcquisitionTimeFW': (['time'], np.ones(nt)),\n 'userAcquisitionTimeBW': (['time'], np.ones(nt)),\n 'cold': (['time'], ts_cold),\n 'warm': (['time'], ts_warm)\n },\n coords={\n 'x': x,\n 'time': time},\n attrs={\n 'isDoubleEnded': '1'})\n\n sections = {\n 'cold': [slice(0., 0.5 * cable_len)],\n 'warm': [slice(0.5 * cable_len, cable_len)]}\n\n # OLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n method='ols',\n solver='sparse',\n fix_gamma=(gamma, 0.))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=8)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n # WLS\n ds.calibration_double_ended(sections=sections,\n st_label='st',\n ast_label='ast',\n rst_label='rst',\n rast_label='rast',\n st_var=1e-7,\n ast_var=1e-7,\n rst_var=1e-7,\n rast_var=1e-7,\n method='wls',\n solver='sparse',\n tmpw_mc_size=5,\n fix_gamma=(gamma, 0.))\n\n np.testing.assert_almost_equal(\n ds.gamma.values, gamma, decimal=6)\n np.testing.assert_almost_equal(\n ds.alpha.values, alpha, decimal=7)\n np.testing.assert_almost_equal(\n ds.TMPF.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPB.values, temp_real - 273.15, decimal=4)\n np.testing.assert_almost_equal(\n ds.TMPW.values, temp_real - 273.15, decimal=4)\n\n pass", "def cooled_surface_temp(T:np.ndarray) -> float:\n \n return T.dot(cs_temp_weights)", "def spring_particle(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate, noise_std, seed):\n num_particles = NUM_PARTS\n collater = {}\n\n def diffeq_hyper(t, q, k, m, nparts):\n num_particles = nparts\n vels = q[2 * num_particles:]\n xs = q[:2 * num_particles]\n xs = xs.reshape(-1, 2)\n forces = np.zeros(xs.shape)\n new_k = np.repeat(k, num_particles) * np.tile(k, num_particles)\n new_k = np.repeat(new_k, 2).reshape(-1, 2)\n dx = np.repeat(xs, num_particles, axis=0) - np.tile(xs, (num_particles, 1))\n resu = -new_k * dx\n forces = np.add.reduceat(resu, np.arange(0, nparts * nparts, nparts)).ravel()\n\n return np.concatenate([vels / np.repeat(m, 2), forces]).ravel()\n\n def hamiltonian(vec, m, k, num_particles):\n num_particles = num_particles\n x = vec[:num_particles * 2]\n p = vec[2 * num_particles:]\n xs = x.reshape(-1, 2)\n ps = p.reshape(-1, 2)\n U1 = 0\n K = 0\n for i in range(num_particles):\n for j in range(i + 1, num_particles):\n U1 += .5 * k[i] * k[j] * ((xs[i] - xs[j]) ** 2).sum()\n K += 0.5 * ((ps[i] ** 2).sum()) / m[i]\n return K, U1\n\n theta = []\n dtheta = []\n energy = []\n mass_arr = []\n ks_arr = []\n lagrangian = []\n np.random.seed(seed)\n\n for traj in range(num_trajectories):\n ks = np.ones(NUM_PARTS)#np.random.uniform(.5, 1, size=(NUM_PARTS))\n positions = np.random.uniform(-1, 1, size=(NUM_PARTS, 2))\n velocities = np.random.uniform(-3, 3, size=(NUM_PARTS, 2))\n masses = np.ones(NUM_PARTS)#np.random.uniform(0.1, 1, size=NUM_PARTS)\n momentum = np.multiply(velocities, np.repeat(masses, 2).reshape(-1, 2))\n q = np.concatenate([positions, momentum]).ravel()\n qnrk = rk(lambda t, y: diffeq_hyper(t, y, ks, masses, num_particles), (0, T_max), q,\n t_eval=np.arange(0, T_max, dt),\n rtol=1e-12, atol=1e-12, method='DOP853')\n accum = qnrk.y.T\n ssr = int(sub_sample_rate / dt)\n accum = accum[::ssr]\n daccum = np.array([diffeq_hyper(0, accum[i], ks, masses, num_particles) for i in range(accum.shape[0])])\n energies = []\n lags = []\n for i in range(accum.shape[0]):\n ktmp, utmp = hamiltonian(accum[i], masses, ks, NUM_PARTS)\n energies.append(ktmp + utmp)\n lags.append(ktmp - utmp)\n\n accum += np.random.randn(*accum.shape) * noise_std\n daccum += np.random.randn(*daccum.shape) * noise_std\n\n theta.append(accum)\n dtheta.append(daccum)\n energy.append(energies)\n mass_arr.append(masses)\n ks_arr.append(ks)\n lagrangian.append(lags)\n\n collater['x'] = np.concatenate(theta)\n collater['dx'] = np.concatenate(dtheta)\n collater['energy'] = np.concatenate(energy)\n collater['lagrangian'] = np.concatenate(lagrangian)\n\n collater['mass'] = mass_arr\n collater['ks'] = ks_arr\n\n f = open(name + \".pkl\", \"wb\")\n pickle.dump(collater, f)\n f.close()\n\n return collater", "def velocity_dispersion_from(\r\n self, redshift_0: float, redshift_1: float, einstein_radius: float\r\n ) -> float:\r\n const = constants.c.to(\"kpc / s\")\r\n\r\n angular_diameter_distance_to_redshift_0_kpc = (\r\n self.angular_diameter_distance_to_earth_in_kpc_from(redshift=redshift_1)\r\n )\r\n\r\n angular_diameter_distance_to_redshift_1_kpc = (\r\n self.angular_diameter_distance_to_earth_in_kpc_from(redshift=redshift_1)\r\n )\r\n\r\n angular_diameter_distance_between_redshifts_kpc = (\r\n self.angular_diameter_distance_between_redshifts_in_kpc_from(\r\n redshift_0=redshift_0, redshift_1=redshift_1\r\n )\r\n )\r\n\r\n kpc_per_arcsec = self.kpc_per_arcsec_from(redshift=redshift_0)\r\n\r\n einstein_radius_kpc = einstein_radius * kpc_per_arcsec\r\n\r\n velocity_dispersion_kpc = const * np.sqrt(\r\n (einstein_radius_kpc * angular_diameter_distance_to_redshift_1_kpc)\r\n / (\r\n 4\r\n * np.pi\r\n * angular_diameter_distance_to_redshift_0_kpc\r\n * angular_diameter_distance_between_redshifts_kpc\r\n )\r\n )\r\n\r\n return velocity_dispersion_kpc.to(\"km/s\").value", "def scale_fixed_M2V(seed=425, th=150, fmass=1, fb=1, fv=1, rfig=False):\n \n # impact parameters\n M = 1e8*u.Msun\n B0 = 19.85*u.kpc\n V0 = 220*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta0 = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.05*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 3000\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n Bs = 20*u.kpc\n xr = Bs + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n \n # generate stream model\n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V0.si.value, theta0.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq.ra.deg[::10], xeq.dec.deg[::10])\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V0.si.value, theta0.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n farray = np.array([0.3, 0.5, 1, 2, 3])\n #farray = np.array([0.5, 1, 2])\n #farray = np.array([0.5, 1])\n \n rasterized = False\n if rfig:\n rasterized = True\n \n plt.close()\n fig, ax = plt.subplots(1,1,figsize=(12,12), sharex=True, squeeze=False)\n \n for e, f in enumerate(farray):\n fsqrt = np.sqrt(f)\n par_perturb = np.array([f*fmass*M.si.value, 0., 0., 0.])\n #B = B0\n \n dB = (B0 - Bs)*fb\n B = dB + Bs\n \n vpar = Vh + np.cos(theta0.rad)*V0\n vperp = np.sin(theta0.rad)*V0\n \n vpar_scaled = vpar*f\n vperp_scaled = vperp*f\n \n V = np.sqrt((vpar_scaled-Vh)**2 + vperp_scaled**2)\n theta = coord.Angle(np.arctan2(vperp_scaled, vpar_scaled-Vh))\n \n #fi = np.abs(V*T/(dB/f)).decompose()\n fi = np.abs(dB/(vperp_scaled)).to(u.Myr)\n #print(fi)\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n \n color = '{:f}'.format(0.65 - 0.65*(e+1)/(np.size(farray)) + 0.35)\n ms = 1.5*(e+2)\n zorder = np.size(farray)-e\n label = 'f={:g}, $t_{{imp}}$={:.1f}'.format(f, fi)\n #print(e, p, color)\n \n plt.sca(ax[0][0])\n plt.plot(xi.wrap_at(wangle), eta, 'o', mec='none', color=color, ms=ms, zorder=zorder, label=label, rasterized=rasterized)\n \n #for i in range(3):\n #plt.sca(ax[i+1])\n #vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n #plt.plot(xi.wrap_at(wangle), veq[i]-vexp, 'o', mec='none', color=color, ms=ms, zorder=zorder, rasterized=rasterized)\n \n # label axes\n plt.sca(ax[0][0])\n plt.ylabel('$\\phi_1$ [deg]')\n plt.ylim(-10,10)\n plt.xlim(65,135)\n #plt.gca().set_aspect('equal')\n plt.legend(fontsize='x-small', loc=2)\n plt.title('f M, f V | M = {:g} | V = {:g} | $\\\\theta$ = {:.0f}'.format(fmass*M, V.to(u.km/u.s), theta.to(u.deg)), fontsize='medium')\n \n #vlabel = ['$\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]','$\\mu_{\\delta}$ [mas yr$^{-1}$]', '$V_r$ [km s$^{-1}$]']\n #ylims = [[-0.5, 0.5], [-0.5, 0.5], [-25,25]]\n #ylims = [[-1,1], [-1,1], [-50,50]]\n #for i in range(3):\n #plt.sca(ax[i+1])\n #plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n #plt.ylim(*ylims[i])\n\n plt.xlabel('$\\phi_2$ [deg]')\n \n plt.tight_layout()\n \n if rfig:\n return fig\n else:\n plt.savefig('../plots/scale_MV_th{:03d}_{:.1f}_{:.1f}.png'.format(th, fmass, fv))", "def get_hc_external(self, weather, surface, h_surface, terrain):\r\n roughness = surface.construction[0].roughness_unit # Change back to this line...left as below to match Na's\r\n if roughness == \"VeryRough\":\r\n D = 11.58\r\n E = 5.894\r\n F = 0\r\n elif roughness == \"Rough\":\r\n D = 12.49\r\n E = 4.065\r\n F = 0.028\r\n elif roughness == \"MediumRough\":\r\n D = 10.79\r\n E = 4.192\r\n F = 0.0\r\n elif roughness == \"MediumSmooth\":\r\n D = 8.23\r\n E = 4.0\r\n F = -0.057\r\n elif roughness == \"Smooth\":\r\n D = 10.22\r\n E = 3.1\r\n F = 0.0\r\n elif roughness == \"VerySmooth\":\r\n D = 8.23\r\n E = 3.33\r\n F = -0.036\r\n else:\r\n D = 8.23\r\n E = 4.0\r\n F = -0.057\r\n print \"No Roughness Value Found so Set Default Values of 8.23,4.0,-0.057\"\r\n\r\n wind_speed_temp = weather[\"wind_speed\"]\r\n # Terrain Lookup Table\r\n if terrain == 'Flat or Open Countryside':\r\n sigma = 270\r\n a = 0.14\r\n elif terrain == 'Rough or Wooded Country':\r\n sigma = 370\r\n a = 0.22\r\n elif terrain == 'Towns and City Scapes':\r\n sigma = 460\r\n a = 0.33\r\n elif terrain == 'Ocean Front Areas':\r\n sigma = 210\r\n a = 0.10\r\n elif terrain == 'Urban, Industrial, or Forest':\r\n sigma = 370\r\n a = 0.22\r\n else:\r\n sigma = 370\r\n a = 0.22\r\n print \"No Terrain Type Found so Set Default Values of 370,0.22\"\r\n terrain_sigma = sigma\r\n terrain_cof = a\r\n\r\n # Adjust the wind speed...Stable air above human inhabited areas:\r\n #wind_speed = wind_speed_temp * ((h_surface / 10) ** 0.5) # This was the line used to get wind_speed before terrain was added\r\n # Wind speed corrected for terrain differences;\r\n wind_speed = wind_speed_temp * ((270/10) ** 0.14) * (h_surface/terrain_sigma) ** terrain_cof\r\n #print wind_speed\r\n # Calculate the hc_external\r\n # hc_external= D+E*Wind_speed+F*Wind_speed^2\r\n hc_external = D + (E * wind_speed) + (F * wind_speed ** 2)\r\n\r\n # depending on the direction of the wind adjust the hc_external...as of versions 3 and 4 this part seems omitted\r\n #x = abs(wind_speed_dir - azimuth)\r\n #if x > 100:\r\n # if x < 260:\r\n # hc_external *= 0.5\r\n #print \"hc_external : \", hc_external, D, E, F\r\n\r\n return round(hc_external, 5)", "def get_focal_point(patches, shell_point, num_rays=20):\n focal_point = Point3D(0.0, 0.0, 0.0)\n for patch in patches:\n #create a bunch of parallel rays coming from the eye\n ray_vector = normalize(shell_point)\n \n ##TODO: remove me\n #ray_vector = normalize(patch.shell_point)\n \n ray_rotation = numpy.zeros((3, 3))\n optics.rotation_matrix.R_2vect(ray_rotation, PRINCIPAL_RAY, ray_vector)\n rays = []\n for x in numpy.linspace(-LIGHT_RADIUS, LIGHT_RADIUS, num_rays*2+1):\n for y in numpy.linspace(-LIGHT_RADIUS, LIGHT_RADIUS, num_rays*2+1):\n start_point = ray_rotation.dot(Point3D(x, y, 0.0))\n rays.append(Ray(start_point, start_point + ray_vector))\n \n #find the point such that the spot size is minimized on the screen.\n #can average the normal of the reflected rays to get approximately where the screen goes\n #then iteratively try different distances until we've minimized the spot size there\n focal_point = Point3D(0.0, 0.0, 0.0)\n reflected_rays = [ray for ray in patch.reflect_rays_no_bounds(rays) if ray != None]\n approximate_screen_normal = sum([normalize(ray.start - ray.end) for ray in reflected_rays]) / len(reflected_rays)\n if optics.debug.PATCH_FOCAL_REFLECTIONS:\n #TODO: all rays don't come from the origin. draw all rays from their actual start points, and draw non-reflected rays going past the surface\n #also, only draw the part of the surface that is real and should be reflected from\n axes = matplotlib.pyplot.subplot(111, projection='3d')\n size = 5\n num_points = 10\n x, y = numpy.meshgrid(numpy.linspace(-size, size, num_points), numpy.linspace(-size, size, num_points))\n axes.scatter(x, y, patch.poly.get_z_for_plot(x, y), c='r', marker='o').set_label('patch')\n for ray in reflected_rays:\n debug_dist = 2*numpy.linalg.norm(ORIGIN - ray.start)\n rays_to_draw = numpy.array([\n patch.poly_space.point_to_space(ORIGIN),\n patch.poly_space.point_to_space(ray.start),\n patch.poly_space.point_to_space(debug_dist * normalize(ray.end-ray.start) + ray.start)\n ])\n axes.plot(rays_to_draw[:, 0], rays_to_draw[:, 1], rays_to_draw[:, 2], label=\"ray\")\n axes.set_xlabel('X')\n axes.set_ylabel('Y')\n axes.set_zlabel('Z')\n matplotlib.pyplot.legend()\n matplotlib.pyplot.show()\n def calculate_spot_size(distance):\n \"\"\"\n :returns: average distance from the central point for the plane at this distance\n \"\"\"\n screen_plane = Plane(distance * approximate_screen_normal * -1.0 + shell_point, approximate_screen_normal)\n points = []\n for ray in reflected_rays:\n points.append(screen_plane.intersect_line(ray.start, ray.end))\n average_point = sum(points) / len(points)\n errors = [numpy.linalg.norm(p - average_point) for p in points]\n if optics.debug.PATCH_FOCAL_SPOT_SIZE:\n #use coordinate space to move everything to the xy plane\n space = CoordinateSpace(screen_plane._point, screen_plane._normal)\n transformed_points = numpy.array([space.point_to_space(p) for p in points])\n matplotlib.pyplot.plot(transformed_points[:, 0], transformed_points[:, 1], \"r\", linestyle='None', marker='o', label=\"rays at %s\" % (distance))\n matplotlib.pyplot.legend()\n matplotlib.pyplot.show()\n #keep a fixed scale to x and y so that each graph can be compared with the previous\n #should probably print the errors as well\n print errors\n print sum(errors) / len(errors)\n return sum(errors) / len(errors)\n previous_distance = numpy.linalg.norm(patch.shell_point - patch.screen_point)\n min_dist = previous_distance * 0.9\n max_dist = previous_distance * 1.1\n num_iterations = 20\n tolerance = 0.0001\n best_dist = scipy.optimize.fminbound(calculate_spot_size, min_dist, max_dist, maxfun=num_iterations, xtol=tolerance, full_output=False, disp=0)\n focal_point += best_dist * approximate_screen_normal * -1.0 + shell_point\n return focal_point / len(patches)", "def expansionConservationHotHeightDefined(self, mat: str, isotope: str):\n hotHeight = 1.0\n\n circle1 = Circle(\"circle\", mat, self.tCold, self.tWarm, self.coldOuterDiameter)\n circle2 = Circle(\"circle\", mat, self.tCold, self.tHot, self.coldOuterDiameter)\n\n # mass density is proportional to Fe number density and derived from\n # all the number densities and atomic masses\n self.assertAlmostEqual(\n circle1.p.numberDensities[isotope] / circle2.p.numberDensities[isotope],\n circle1.density() / circle2.density(),\n )\n\n # the colder one has more because it is the same cold outer diameter\n # but it would be taller at the same temperature\n mass1 = circle1.density() * circle1.getArea() * hotHeight\n mass2 = circle2.density() * circle2.getArea() * hotHeight\n self.assertGreater(mass1, mass2)\n\n # they are off by factor of thermal exp\n self.assertAlmostEqual(\n mass1 * circle1.getThermalExpansionFactor(),\n mass2 * circle2.getThermalExpansionFactor(),\n )\n\n # material.pseudoDensity is the 2D density of a material\n # material.density is true density and not equal in this case\n for circle in [circle1, circle2]:\n # 2D density is not equal after application of coldMatAxialExpansionFactor\n # which happens during construction\n self.assertNotAlmostEqual(\n circle.density(),\n circle.material.pseudoDensity(Tc=circle.temperatureInC),\n )\n # 2D density is off by the material thermal exp factor\n percent = circle.material.linearExpansionPercent(Tc=circle.temperatureInC)\n thermalExpansionFactorFromColdMatTemp = 1 + percent / 100\n self.assertAlmostEqual(\n circle.density() * thermalExpansionFactorFromColdMatTemp,\n circle.material.pseudoDensity(Tc=circle.temperatureInC),\n )\n self.assertAlmostEqual(\n circle.density(),\n circle.material.density(Tc=circle.temperatureInC),\n )\n\n # brief 2D expansion with set temp to show mass is conserved\n # hot height would come from block value\n warmMass = circle1.density() * circle1.getArea() * hotHeight\n circle1.setTemperature(self.tHot)\n hotMass = circle1.density() * circle1.getArea() * hotHeight\n self.assertAlmostEqual(warmMass, hotMass)\n circle1.setTemperature(self.tWarm)\n\n # Change temp to circle 2 temp to show equal to circle2\n # and then change back to show recoverable to original values\n oldArea = circle1.getArea()\n initialDens = circle1.density()\n\n # when block.setHeight is called (which effectively changes component height)\n # component.setNumberDensity is called (for solid isotopes) to adjust the number\n # density so that now the 2D expansion will be approximated/expanded around\n # the hot temp which is akin to these adjustments\n heightFactor = circle1.getHeightFactor(self.tHot)\n circle1.adjustDensityForHeightExpansion(self.tHot) # apply temp at new height\n circle1.setTemperature(self.tHot)\n\n # now its density is same as hot component\n self.assertAlmostEqual(\n circle1.density(),\n circle2.density(),\n )\n\n # show that mass is conserved after expansion\n circle1NewHotHeight = hotHeight * heightFactor\n self.assertAlmostEqual(\n mass1, circle1.density() * circle1.getArea() * circle1NewHotHeight\n )\n\n self.assertAlmostEqual(\n circle1.density(),\n circle1.material.density(Tc=circle1.temperatureInC),\n )\n # change back to old temp\n circle1.adjustDensityForHeightExpansion(self.tWarm)\n circle1.setTemperature(self.tWarm)\n\n # check for consistency\n self.assertAlmostEqual(initialDens, circle1.density())\n self.assertAlmostEqual(oldArea, circle1.getArea())\n self.assertAlmostEqual(mass1, circle1.density() * circle1.getArea() * hotHeight)", "def coeff_display_M202(Nstar=1,seeing=[0.9,0.,0.],npix=npix,zenith=0,filter='r', theta=0., phi=0,corrector='corrector',x=0.,y=0.,z=0.,zernike_max_order=20,regular=False):\n hdu = genImgVallCCD(Nstar=Nstar,seeing=seeing,npix=npix,zenith=zenith,filter=filter, theta=theta,phi=phi, corrector=corrector,x=x,y=y,z=z,regular=regular)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n img = hdui.data[i][4:].reshape(npix,npix)\n img = rebin(img,(40,40))\n M20,M22,M31,M33=complexMoments(data=img,sigma=4.)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data) \n betaAll=[]\n betaErrAll=[]\n R2adjAll=[]\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,2].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n for i in range(3,6):\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].imag,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n betaAll = np.array(betaAll)\n betaErrAll = np.array(betaErrAll)\n R2adjAll = np.array(R2adjAll)\n ind = np.arange(len(betaAll[0]))\n momname = ('M20','M22.Real','M22.imag','M31.real','M31.imag','M33.real','M33.imag')\n fmtarr = ['bo-','ro-','go-','co-','mo-','yo-','ko-']\n pl.figure(figsize=(17,7))\n for i in range(3):\n pl.subplot(4,1,i+1)\n pl.errorbar(ind[1:],betaAll[i][1:],yerr = betaErrAll[i][1:],fmt=fmtarr[i])\n if i == 0:\n pl.title('x: '+str(hdu[0].header['x'])+' y: '+str(hdu[0].header['y'])+' z: '+str(hdu[0].header['z'])+' tilt: '+str(hdu[0].header['theta'])+' fwhm: '+str(hdu[0].header['s_fwhm'])+' e1: '+str(hdu[0].header['e1'])+' e2: '+str(hdu[0].header['e2']))\n pl.grid()\n pl.xlim(-1,len(betaAll[i])+1)\n pl.ylim(min(betaAll[i][1:])-0.5,max(betaAll[i][1:])+0.5)\n #pl.ylim(-0.1,0.1)\n pl.xticks(ind,('','','','','','','','','','','','','','','','','','','',''))\n pl.ylabel(momname[i])\n pl.xticks(ind,('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20'),rotation=90)\n pl.xlabel('Zernike Coefficients')\n return betaAll,betaErrAll", "def scale_fixed_V2B(seed=425, th=150, fmass=1, fb=1, fv=1, rfig=False):\n \n # impact parameters\n M = 1e8*u.Msun\n B0 = 19.85*u.kpc\n V0 = 220*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 3000\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n Bs = 20*u.kpc\n xr = Bs + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n \n # generate stream model\n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V0.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq.ra.deg[::10], xeq.dec.deg[::10])\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B0.si.value, phi.rad, V0.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n farray = np.array([0.3, 0.5, 1, 2, 3])\n \n rasterized = False\n if rfig:\n rasterized = True\n \n plt.close()\n fig, ax = plt.subplots(4,1,figsize=(12,12), sharex=True)\n \n for e, f in enumerate(farray):\n fsqrt = np.sqrt(f)\n par_perturb = np.array([fmass*M.si.value, 0., 0., 0.])\n #B = B0\n \n dB = (B0 - Bs)*fb\n B = dB*f + Bs\n \n V = fv*V0/fsqrt\n \n #fi = np.abs(V*T/(dB/f)).decompose()\n fi = np.abs(dB/V).to(u.Myr)\n #print(fi)\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n \n color = '{:f}'.format(0.65 - 0.65*(e+1)/(np.size(farray)) + 0.35)\n ms = 1.5*(e+2)\n zorder = np.size(farray)-e\n label = 'f={:g}, $t_{{imp}}$={:.1f}'.format(f, fi)\n #print(e, p, color)\n \n plt.sca(ax[0])\n plt.plot(xi.wrap_at(wangle), eta, 'o', mec='none', color=color, ms=ms, zorder=zorder, label=label, rasterized=rasterized)\n \n for i in range(3):\n plt.sca(ax[i+1])\n vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n plt.plot(xi.wrap_at(wangle), veq[i]-vexp, 'o', mec='none', color=color, ms=ms, zorder=zorder, rasterized=rasterized)\n \n # label axes\n plt.sca(ax[0])\n plt.ylabel('$\\phi_1$ [deg]')\n plt.ylim(-10,10)\n plt.xlim(65,135)\n #plt.gca().set_aspect('equal')\n plt.legend(fontsize='x-small', loc=2)\n plt.title('f M, f B | M = {:g} | B = {:g} | $\\\\theta$ = {:.0f}'.format(fmass*M, dB.to(u.pc), theta), fontsize='medium')\n \n vlabel = ['$\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]','$\\mu_{\\delta}$ [mas yr$^{-1}$]', '$V_r$ [km s$^{-1}$]']\n ylims = [[-0.5, 0.5], [-0.5, 0.5], [-25,25]]\n ylims = [[-1,1], [-1,1], [-50,50]]\n for i in range(3):\n plt.sca(ax[i+1])\n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n\n plt.xlabel('$\\phi_2$ [deg]')\n \n plt.tight_layout()\n \n if rfig:\n return fig\n else:\n plt.savefig('../plots/scale_VB_th{:03d}_{:.1f}_{:.1f}.png'.format(th, fv, fb))", "def pwl(site_residuals, azSpacing=0.5,zenSpacing=0.5):\n tdata = res.reject_absVal(site_residuals,100.)\n del site_residuals \n data = res.reject_outliers_elevation(tdata,5,0.5)\n del tdata\n\n Bvec_complete = []\n Sol_complete = []\n model_complete = []\n meas_complete = []\n\n numd = np.shape(data)[0]\n numZD = int(90.0/zenSpacing) + 1\n numAZ = int(360./azSpacing)\n print(\"numAZ\",numAZ)\n pwl_All = np.zeros((numAZ,numZD))\n pwlSig_All = np.zeros((numAZ,numZD))\n #pwl_All = np.zeros((numZD,numAZ))\n #pwlSig_All = np.zeros((numZD,numAZ))\n\n for j in range(0,numAZ):\n # Find only those value within this azimuth bin:\n if(j - azSpacing/2. < 0) :\n criterion = (data[:,1] < (j + azSpacing/2.)) | (data[:,1] > (360. - azSpacing/2.) )\n else:\n criterion = (data[:,1] < (j + azSpacing/2.)) & (data[:,1] > (j - azSpacing/2.) )\n ind = np.array(np.where(criterion))[0]\n azData =data[ind,:]\n numd = np.shape(azData)[0]\n if numd < 2:\n continue\n\n # Neq is acting like a constrain on the model a small value 0.001\n # let the model vary by 1000 mm\n # will let it vary more. a large value -> 1 will force the model to be closer to 0\n Neq = np.eye(numZD,dtype=float) * 0.001\n Apart = np.zeros((numd,numZD))\n #aiz = j* int(np.floor(360./zenSpacing))\n \n for i in range(0,numd):\n iz = int(np.floor(azData[i,2]/zenSpacing)) #+ aiz\n Apart[i,iz] = (1.-(azData[i,2]-float(iz)*zenSpacing)/zenSpacing)\n Apart[i,iz+1] = (azData[i,2]-float(iz)*zenSpacing)/zenSpacing\n #Apart_1 = (1.-(azData[i,2]-float(iz)*zenSpacing)/zenSpacing)\n #Apart_2 = (azData[i,2]-float(iz)*zenSpacing)/zenSpacing\n\n prechi = np.dot(azData[:,3].T,azData[:,3])\n\n Neq = np.add(Neq, np.dot(Apart.T,Apart) )\n Bvec = np.dot(Apart.T,azData[:,3])\n for val in Bvec:\n Bvec_complete.append(val)\n Cov = np.linalg.pinv(Neq)\n Sol = np.dot(Cov,Bvec)\n\n for val in Sol:\n Sol_complete.append(val)\n\n postchi = prechi - np.dot(Bvec.T,Sol)\n pwlsig = np.sqrt(np.diag(Cov) *postchi/numd)\n \n model = np.dot(Apart,Sol)\n\n for d in range(0,numd):\n meas_complete.append(azData[d,3])\n model_complete.append(model[d])\n\n pwl_All[j,:] = Sol \n pwlSig_All[j,:] = pwlsig\n\n #print(\"Sol:\",Sol)\n #print(\"PWL:\",pwl_All[j,:])\n\n #pwl_All[:,j] = Sol \n #print(\"Sol:\",np.shape(Sol),np.shape(pwl_All))\n #pwlSig_All[:,j] = pwlsig\n del Sol,pwlsig,Cov,Bvec,Neq,Apart,azData,ind\n\n # Calculate the AIC and BIC values...\n f = loglikelihood(np.array(meas_complete),np.array(model_complete))\n numd = np.size(meas_complete)\n dof = numd - np.shape(Sol_complete)[0]\n aic = calcAIC(f,dof)\n bic = calcBIC(f,dof,numd)\n\n prechi = np.dot(np.array(meas_complete).T,np.array(meas_complete))\n postchi = prechi - np.dot(np.array(Bvec_complete).T,np.array(Sol_complete))\n\n #print(\"My loglikelihood:\",f,aic,bic,dof,numd)\n #print(\"STATS:\",numd,np.sqrt(prechi/numd),np.sqrt(postchi/numd),np.sqrt((prechi-postchi)/numd),aic,bic)\n stats = {}\n stats['prechi'] = np.sqrt(prechi/numd)\n stats['postchi'] = np.sqrt(postchi/numd)\n stats['chi_inc'] = np.sqrt((prechi-postchi)/numd)\n stats['aic'] = aic\n stats['bic'] = bic\n\n return pwl_All, pwlSig_All, stats", "def rho(SA, CT, p):\n\n SA = np.maximum(SA, 0)\n\n xs = np.sqrt(sfac * SA + soffset)\n ys = CT * 0.025\n z = p * 1e-4\n\n specific_volume = (v000\n + xs * (v100 + xs * (v200 + xs * (v300 + xs * (v400 + xs * (v500\n + xs * v600)))))\n + ys * (v010\n + xs * (v110 + xs * (v210 + xs * (v310 + xs * (v410 + xs * v510))))\n + ys * (v020 + xs * (v120 + xs * (v220 + xs * (v320 + xs * v420)))\n + ys * (v030 + xs * (v130 + xs * (v230 + xs * v330))\n + ys * (v040 + xs * (v140 + xs * v240)\n + ys * (v050 + xs * v150 + ys * v060)))))\n + z * (v001\n + xs * (v101 + xs * (v201 + xs * (v301 + xs * (v401 + xs * v501))))\n + ys * (v011 + xs * (v111 + xs * (v211 + xs * (v311 + xs * v411)))\n + ys * (v021 + xs * (v121 + xs * (v221 + xs * v321))\n + ys * (v031 + xs * (v131 + xs * v231)\n + ys * (v041 + xs * v141 + ys * v051))))\n + z * (v002\n + xs * (v102 + xs * (v202 + xs * (v302 + xs * v402)))\n + ys * (v012 + xs * (v112 + xs * (v212 + xs * v312))\n + ys * (v022 + xs * (v122 + xs * v222)\n + ys * (v032 + xs * v132 + ys * v042)))\n + z * (v003\n + xs * (v103 + xs * v203)\n + ys * (v013 + xs * v113 + ys * v023)\n + z * (v004 + xs * v104 + ys * v014\n + z * (v005 + z * v006))))))\n\n return 1. / specific_volume", "def update_pressure(self):\n m_multipliers = np.ones(self.mesh.get_number_of_cells())\n\n\n rhs_current = np.zeros(self.mfd.get_number_of_dof()) \n rhs_current += self.rhs_mfd\n\n\n for cell_index in range(self.mesh.get_number_of_cells()):\n density = -self.ref_pressure\n density += self.current_pressure[cell_index]\n density *= self.compressibility\n density += 1.\n density *= self.ref_density\n\n # We multiply by the inverse of \\frac{\\rho}{\\mu}\n m_multipliers[cell_index] = self.viscosity/density\n\n c_entry = self.compressibility\n c_entry *= self.porosities[cell_index]\n c_entry /= self.delta_t\n c_entry *= self.mesh.get_cell_volume(cell_index)\n\n rhs_current[self.mesh.get_number_of_faces()+\n cell_index] += c_entry*self.current_pressure[cell_index]\n\n self.lhs_coo.data[self.c_start+cell_index] = c_entry\n\n for [index, cell_index] in enumerate(self.rate_wells):\n rhs_current[self.mesh.get_number_of_faces()+cell_index] += \\\n self.rate_wells_rate[index]\n\n self.mfd.update_m(self.lhs_coo.data[:self.m_x_coo_length], m_multipliers)\n\n solution = dsolve.spsolve(self.lhs_coo.tocsr(), rhs_current)\n self.prev_pressure = self.current_pressure\n self.current_pressure = solution[self.mesh.get_number_of_faces():]\n self.current_velocity = solution[:self.mesh.get_number_of_faces()]", "def plotblackbody(_zband, _yband, _jband, _hband, _kband, _parallax, _perr):\n # Set pyplot style to be consistent within the program\n plt.style.use('seaborn-whitegrid')\n # Import raw data to plot Hertzsprung-Russell diagram\n _hrdata = inithr('hr.dat')\n # Determine distance in parsecs\n _distance = 1 / np.tan(_parallax * 10**-3)\n _derr = (_perr * 10**-3) / ((_parallax * 10**-3)**2)\n # Create single data array with all bands\n _bands = [_zband, _yband, _jband, _hband, _kband]\n _lambda = [0.9, 1.02, 1.22, 1.63, 2.2]\n # Set up empty arrays for each star\n _largestar = np.zeros((1, 2))\n _smallstar = np.zeros((1, 2))\n\n # Determine the spectral flux density from the large star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # The large star uses the maximum flux value (smallest magnitude)\n _largestar = np.append(_largestar, np.array([_lambda[i], (magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete first empty row of the array\n _largestar = np.delete(_largestar, 0, axis=0)\n\n # Determine the spectral flux density from the small star\n i = 0\n while i < 5:\n # Determine the maximum and minimum values of the observed band\n _max, _min = lightcurve.maxminvals(_bands[i])\n # Smaller star flux value is combined value minus the large star\n _smallstar = np.append(_smallstar, np.array([_lambda[i], (magtoflux(_max, i) -\n magtoflux(_min, i))], ndmin=2), axis=0)\n i += 1\n # Delete the first empty row of the array\n _smallstar = np.delete(_smallstar, 0, axis=0)\n\n # Determine the luminosity and effective temperature of each star\n _luma, _lumaerr, _wiena = getwientemp(_largestar, _distance, _derr, 1)\n _lumb, _lumberr, _wienb = getwientemp(_smallstar, _distance, _derr, 2)\n\n # Calculate luminosities in solar units\n _solluma = _luma / (3.828*10**26)\n _sollumb = _lumb / (3.828*10**26)\n _lumaerr = _lumaerr / (3.828*10**26)\n _lumberr = _lumberr / (3.828*10**26)\n\n # Calculate masses using the mass/luminosity relation in solar mass units\n # N.B. only works as an approximation for main sequence stars, giants and dwarfs are not sutiable for this\n # approximation\n _solmassa = np.power(_solluma, 1/3.5)\n _solmassaerr = ((_solmassa * (1/3.5) * _lumaerr) / _solluma)**2\n _solmassb = np.power(_sollumb, 1/3.5)\n _solmassberr = ((_solmassb * (1 / 3.5) * _lumberr) / _sollumb) ** 2\n\n # Calculate stellar radius in solar radii using the relationship between luminosity, surface area and temperature\n _solrada = np.sqrt(_solluma / np.power(_wiena / 5778, 4))\n _solradb = np.sqrt(_sollumb / np.power(_wienb / 5778, 4))\n _solradaerr = ((_solrada * 0.5 * _lumaerr) / _solluma)**2\n _solradberr = ((_solradb * 0.5 * _lumberr) / _sollumb)**2\n\n # Output determined values to the screen and write to file\n print('Values for the large star:')\n print('Effective temperature: ' + str(round_sig(_wiena)))\n print('Solar luminosities: ' + str(round_sig(_solluma)) + ', error: ' + str(round_sig(_lumaerr)))\n print('Solar radii: ' + str(round_sig(_solrada)) + ', error: ' + str(round_sig(_solradaerr)))\n print('Solar masses: ' + str(round_sig(_solmassa)) + ', error: ' + str(round_sig(_solmassaerr)))\n print('-----------------------------------------------------')\n print('Values for the small star:')\n print('Effective temperature: ' + str(round_sig(_wienb)))\n print('Solar luminosities: ' + str(round_sig(_sollumb)) + ', error: ' + str(round_sig(_lumberr)))\n print('Solar radii: ' + str(round_sig(_solradb)) + ', error: ' + str(round_sig(_solradberr)))\n print('Solar masses: ' + str(round_sig(_solmassb)) + ', error: ' + str(round_sig(_solmassberr)))\n\n # Convert from luminosity to magnitude in solar units\n _luma = -2.5 * np.log10(_luma / (3.0128 * 10**28))\n _lumb = -2.5 * np.log10(_lumb / (3.0128 * 10**28))\n\n # Plot Hertzsprung-Russell diagram using provided array\n plt.scatter(_hrdata[:, 1], _hrdata[:, 0], s=0.5)\n # Plot determined values for each star\n plt.scatter(_wiena, _luma, s=16, c='red', label='Larger Star')\n plt.scatter(_wienb, _lumb, s=16, c='green', label='Smaller Star')\n # Set the x and y axis limits to sensible values\n plt.legend()\n plt.xlim(3000, 10000)\n plt.ylim(-10, 20)\n # Invert both axes as convention\n plt.gca().invert_xaxis()\n plt.gca().invert_yaxis()\n # Save figure to current folder\n plt.savefig('hr.png')\n # Display to screen\n plt.show()", "def find_metallicity_distribution(redshifts, min_logZ_COMPAS, max_logZ_COMPAS,\n mu0=0.035, muz=-0.23, sigma_0=0.39, sigma_z=0.0, alpha =0.0,\n min_logZ =-12.0, max_logZ =0.0, step_logZ = 0.01): \n ##################################\n # Log-Linear redshift dependence of sigma\n sigma = sigma_0* 10**(sigma_z*redshifts)\n \n ##################################\n # Follow Langer & Norman 2006 in assuming that mean metallicities evolve in z as:\n mean_metallicities = mu0 * 10**(muz * redshifts) \n \n # Now we re-write the expected value of the log-skew-normal to retrieve mu\n beta = alpha/(np.sqrt(1 + (alpha)**2))\n PHI = NormDist.cdf(beta * sigma) \n mu_metallicities = np.log(mean_metallicities/2. * 1./(np.exp(0.5*sigma**2) * PHI ) ) \n\n ##################################\n # create a range of metallicities (the x-values, or random variables)\n log_metallicities = np.arange(min_logZ, max_logZ + step_logZ, step_logZ)\n metallicities = np.exp(log_metallicities)\n\n\n ##################################\n # probabilities of log-skew-normal (without the factor of 1/Z since this is dp/dlogZ not dp/dZ)\n dPdlogZ = 2./(sigma[:,np.newaxis]) * NormDist.pdf((log_metallicities - mu_metallicities[:,np.newaxis])/sigma[:,np.newaxis]) * NormDist.cdf(alpha * (log_metallicities - mu_metallicities[:,np.newaxis])/sigma[:,np.newaxis] )\n\n ##################################\n # normalise the distribution over all metallicities; this choice of normalisation assumes that metallicities outside the COMPAS range have yields of zero\n norm = dPdlogZ.sum(axis=-1) * step_logZ\n dPdlogZ = dPdlogZ /norm[:,np.newaxis]\n\n ##################################\n # assume a flat in log distribution in sampled metallicity to find probability of drawing Z in COMPAS\n p_draw_metallicity = 1 / (max_logZ_COMPAS - min_logZ_COMPAS)\n \n return dPdlogZ, metallicities, p_draw_metallicity" ]
[ "0.6823951", "0.68156433", "0.64645", "0.62532675", "0.5977594", "0.5888927", "0.5858084", "0.5850966", "0.5778458", "0.5767043", "0.5753279", "0.5737354", "0.5723255", "0.5714657", "0.57088953", "0.5705945", "0.56355387", "0.56164163", "0.561608", "0.56118447", "0.5599761", "0.5590967", "0.55508405", "0.5537181", "0.55344886", "0.55212325", "0.55204123", "0.55125654", "0.55106556", "0.55104506", "0.5502348", "0.549432", "0.5489594", "0.5481778", "0.5480972", "0.54730856", "0.54723644", "0.5470736", "0.54617864", "0.54612345", "0.54589766", "0.54563797", "0.54483247", "0.5432313", "0.5431978", "0.5412071", "0.54118717", "0.5411295", "0.5404955", "0.53949845", "0.538309", "0.5355145", "0.53549194", "0.5351914", "0.53510827", "0.53498095", "0.53405094", "0.53391707", "0.5337755", "0.5337485", "0.53251255", "0.5322845", "0.53223085", "0.5319866", "0.531541", "0.53142065", "0.5308977", "0.53061444", "0.5301485", "0.5292237", "0.5288209", "0.52842253", "0.52799654", "0.5278964", "0.5268154", "0.5267537", "0.5264818", "0.5264683", "0.5252801", "0.52524686", "0.52524114", "0.5248835", "0.52457625", "0.5239873", "0.5238028", "0.52317595", "0.52234656", "0.5214218", "0.52119714", "0.52110606", "0.5210366", "0.5208881", "0.5207216", "0.5206482", "0.52026397", "0.5200289", "0.51998776", "0.5199765", "0.5191052", "0.5190776" ]
0.7354267
0
Restaurant fixture for future tests
def restaurant_only(): work_time = { "Понедельник": "8:00-23:00", "Вторник": "8:00-23:00", "Среда": "8:00-23:00", "Четверг": "8:00-23:00", "Пятница": "8:00-23:00", "Суббота": "8:00-23:00", "Воскресенье": "Выходной", } restaurant = Restaurant("Снежинка", work_time, False) return restaurant
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n valid_name = \"Tungalo\"\n valid_location = \"Rivne\"\n valid_status = 0\n valid_tables_count = 10\n valid_description = \"description\"\n\n self.restaurant = Restaurant()\n self.restaurant.name = valid_name\n self.restaurant.location = valid_location\n self.restaurant.status = valid_status\n self.restaurant.tables_count = valid_tables_count\n self.restaurant.description = valid_description", "def fixtures():", "def setUpFixture(self):\n pass", "def setUp(self):\n self.client = APIClient()\n self.apple = Food.objects.create(name=\"apple\", calories=50)\n self.oatmeal = Food.objects.create(name=\"oatmeal\", calories=400)\n self.breakfast = Meal.objects.create(name=\"breakfast\")\n self.snack = Meal.objects.create(name=\"snack\")\n self.lunch = Meal.objects.create(name=\"lunch\")\n self.dinner = Meal.objects.create(name=\"dinner\")\n self.breakfast.foods.add(self.apple)", "def test_get_food(self):\n pass", "def test_create_restaurant_with_all_fields(self):\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n name = 'Restaurant Chinois'\n street = '999 Sutter St'\n suite = '510'\n city = 'Wood-Ridge'\n state = 'NJ'\n zip_code = '07075'\n phone_num = '201-555-7777'\n website = 'www.chinois-nj.com'\n email = 'chinois-nj@gmail.com'\n date_established = '2014'\n creator = 'some-user@gmail.com'\n info = {'name': name, 'street': street, 'suite': suite,\n 'city': city, 'state': state, 'zip_code': zip_code,\n 'phone_num': phone_num, 'website': website, 'email': email,\n 'date_established': date_established, 'creator': creator\n }\n resp = self.test_client.post(self.API_BASE + '/create', headers=headers, data=json.dumps(info))\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['id'], 1)\n self.assertTrue(name in resp_dict['message'])\n\n # Check that all restaurant fields are returned.\n self.assertTrue('restaurant' in resp_dict)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['name'], name)\n self.assertEqual(resp_dict['restaurant']['street'], street)\n self.assertEqual(resp_dict['restaurant']['suite'], suite)\n self.assertEqual(resp_dict['restaurant']['city'], city)\n self.assertEqual(resp_dict['restaurant']['state'], state)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code)\n self.assertEqual(resp_dict['restaurant']['phone_num'], phone_num)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['date_established'], date_established)\n self.assertEqual(resp_dict['restaurant']['creator'], creator)\n\n # -----------------------------\n # Make a separate request to retrieve the restaurant and assert that all fields are as created\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['name'], name)\n self.assertEqual(resp_dict['restaurant']['street'], street)\n self.assertEqual(resp_dict['restaurant']['suite'], suite)\n self.assertEqual(resp_dict['restaurant']['city'], city)\n self.assertEqual(resp_dict['restaurant']['state'], state)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code)\n self.assertEqual(resp_dict['restaurant']['phone_num'], phone_num)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['date_established'], date_established)\n self.assertEqual(resp_dict['restaurant']['creator'], creator)", "def setUp(self):\n super(TranscriptionsTest, self).setUp()\n mommy.make_recipe('grunt.seed', _quantity=2)", "def test_get_restaurants(self):\n address = {'number': '375',\n 'street': 'Noe St',\n 'city': 'San Francisco',\n 'zip': '94114'}\n\n with self.app.app_context():\n restaurants = ordrin.get_restaurants(address)\n\n # Ordr.in returns a test entry as the first item in the list when\n # when hitting their testing servers.\n entry = restaurants[0]\n self.assertEquals(entry['na'], 'Test Merchant 20130315')\n self.assertEquals(entry['id'], 23917)", "def test_full_restaurant(restaurant_full):\n assert restaurant_full", "def location_fixture():\n return _create_location()", "def fixture_example_data():\n import_example_data()", "def test_get_two_restaurants(self):\n from espresso import db\n from espresso import Restaurant\n\n name_1 = 'Restaurant Italiano'\n db.session.add(Restaurant(name=name_1, creator='test-user@gmail.com'))\n name_2 = 'Restaurant Français'\n db.session.add(Restaurant(name=name_2, creator='test-user@gmail.com'))\n db.session.commit()\n\n resp = self.test_client.get(self.API_BASE, headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(len(resp_dict['restaurants']), 2)\n self.assertEqual(resp_dict['restaurants'][0]['name'], name_1)\n self.assertEqual(resp_dict['restaurants'][1]['name'], name_2)", "def fixture_andy():\n yield Person(name=\"Andy\", age=12, hobbies=[\"Star Wars\", \"Bicycles\"])", "def test_user_get_restaurants_list(self):\n response = self.client.get('/api/places/', format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_restaurant(self):\n url = \"/get_restaurants\"\n response = app.test_client().get(url)\n response_json = response.json\n with open('expected_responses/restaurants.json', 'r') as f:\n datastore = json.load(f)\n\n assert datastore == response_json, logging.error(\n \"GET Restaurants Failed!\")\n logging.info(\"GET Restaurants API Tested\")", "def create_test_data(self):\n fake = Faker(['en_US', 'ja_JP', 'el_GR', 'de_DE'])\n\n self.actor_request = {\n 'name': fake.name(),\n 'age': random.randint(22, 88),\n 'gender': random.choice(['M', 'F'])\n }\n\n self.movie_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n 'releaseDate': str(fake.date_between())\n }\n\n self.actor_update_request = {\n 'name': fake.name(),\n }\n\n self.movie_update_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n }\n\n for _ in range(30):\n actor_name = fake.name()\n actor_age = random.randint(22, 88)\n actor_gender = random.choice(['M', 'F'])\n\n movie_title = fake.color_name() + ' ' + fake.street_suffix()\n movie_release_date = str(fake.date_between())\n\n actor = Actor(actor_name, actor_age, actor_gender)\n actor.insert()\n\n movie = Movie(movie_title, movie_release_date)\n movie.insert()\n\n for _ in range(20):\n actors = Actor.query.all()\n movies = Movie.query.all()\n\n actor_to_update = random.choice(actors)\n movie_to_update = random.choice(movies)\n actor_to_update.movies.append(movie_to_update)", "def fixture_pandy():\n yield Person(name=\"Pandy\", age=12, hobbies=[\"Fortnite\"])", "def test_get_random_recipes(self):\n pass", "def test_foodtrucks_create(self):\n\t\tprint 'API Test: create a new foodtruck'\n\t\turl = reverse('foodtruck_list')\n\t\tdata = {\"status\" : \"APPROVED\",\\\n\t\t \"expirationdate\" : \"2015-03-15T00:00:00\",\\\n\t\t \"permit\" : \"14MFF-0107\",\\\n\t\t \"block\" : \"3794\",\\\n\t\t \"received\" : \"Jun 24 2014 1:49PM\",\\\n\t\t \"facilitytype\" : \"Truck\",\\\n\t\t \"blocklot\" : \"3794002A\",\\\n\t\t \"locationdescription\" : \"02ND ST: TOWNSEND ST to KING ST (700 - 799)\",\\\n\t\t \"cnn\" : 148000,\\\n\t\t \"priorpermit\" : 1,\\\n\t\t \"approved\" : \"2014-06-24T13:55:30\",\\\n\t\t \"noisent\" : \"2013-07-25T00:00:00\",\\\n\t\t \"schedule\" : \"http://bsm.sfdpw.org/PermitsTracker/reports/report.aspx?title=schedule&report=rptSchedule&params=permit=14MFF-0107&ExportPDF=1&Filename=14MFF-0107_schedule.pdf\",\\\n\t\t \"address\" : \"750 02ND ST\",\\\n\t\t \"applicant\" : \"Steve's Mobile Deli\",\\\n\t\t \"lot\" : \"002A\",\\\n\t\t \"fooditems\" : \"Cold Truck: Pre-packaged sandwiches: Burgers: Hot Dogs: Muffin Sandwiches: Enchiladas: Bagels: Burritos: Salads: Snacks: Beverages\",\\\n\t\t \"longitude\" : -122.402978526686,\\\n\t\t \"latitude\" : 37.7302216813049, \\\n\t\t \"y\" : 2093947.369,\\\n\t\t \"x\" : 6011371.493,\\\n\t\t \"objectid\" : 554527}\n\t\t\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\t\n\t\tquant = '1.000000'\n\t\tfor k, v in data.iteritems():\n\t\t\tif v is not None and (k is \"y\" or k is \"x\" or k is \"latitude\" or k is \"longitude\"):\n\t\t\t\tself.assertEqual(response.data[k].quantize(Decimal(quant)), Decimal(v).quantize(Decimal(quant)))\n\t\t\telif v is not None and (k is \"approved\" or k is \"received\" or k is \"expirationdate\" or k is \"noisent\"):\n\t\t\t\tself.assertEqual(response.data[k], parse(v))\n\t\t\telse:\n\t\t\t\tself.assertEqual(response.data[k], v)\n\t\t\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\t\tfor k, v in data.iteritems():\n\t\t\tif v is not None and (k is \"y\" or k is \"x\" or k is \"latitude\" or k is \"longitude\"):\n\t\t\t\tself.assertEqual(response.data[0][k].quantize(Decimal(quant)), Decimal(v).quantize(Decimal(quant)))\n\t\t\telif v is not None and (k is \"approved\" or k is \"received\" or k is \"expirationdate\" or k is \"noisent\"):\n\t\t\t\tself.assertEqual(response.data[0][k], parse(v))\n\t\t\telse:\n\t\t\t\tself.assertEqual(response.data[0][k], v)\n\t\tprint 'pass'", "def _fixture_setup(self):\n pass", "def setUp(self):\n response = self.client.post('/trainer/create/',\n {\"name\": \"Blue\",\n \"las_name\": \"Oak\"})\n self.trainer_id = response.json()[\"id\"]", "def fixture_candy():\n yield Person(name=\"Candy\", age=13, hobbies=[\"Gardening\"])", "def setUpTestData(cls):\n cls.post = PostFactory()", "def test_get_restaurant_by_id(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com'))\n db.session.commit()\n\n # Since this is a freshly created table, the first id should be 1\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['name'], name)", "def setUp(self):\n self.factory = RequestFactory()\n StaffProfile.objects.rebuild()\n self.manager = mommy.make(\n \"auth.User\", first_name=\"Jane\", last_name=\"Ndoe\", email=\"jane@example.com\"\n )\n self.user = mommy.make(\n \"auth.User\", first_name=\"Bob\", last_name=\"Ndoe\", email=\"bob@example.com\"\n )\n manager_mommy = Recipe(StaffProfile, lft=None, rght=None, user=self.manager)\n staff_mommy = Recipe(StaffProfile, lft=None, rght=None, user=self.user)\n self.manager_profile = manager_mommy.make()\n self.staffprofile = staff_mommy.make()", "def setUp(self):\n self.prod_1 = Product.objects.create(\n pk=1,\n ean='3350033118072',\n name='test 1',\n category='cat 1',\n image='product_default.png',\n nutriscore='u'\n )\n\n self.user_1 = User.objects.create_user(\n pk=1,\n username='Fav Models Unit Test 1',\n email='boggusmail@boggusmail.net'\n )\n\n self.fav_1 = Favourite.objects.create(\n pk=1,\n date_added='2019-12-20 09:00:00',\n user=self.user_1,\n product=self.prod_1\n )", "def test_creating_recipe_with_ingredients(self):\n ingredient1 = sample_ingredients(user=self.user, name='Prawns')\n ingredient2 = sample_ingredients(user=self.user, name='Garlic')\n\n payload = {\n 'title': 'Avocado lime cheesecake',\n 'time_minutes': 20,\n 'price': 500.00,\n 'currency': 'NGN',\n 'ingredients': [ingredient1.id, ingredient2.id]\n }\n self.evaluate_recipe(ingredient1, ingredient2, payload, 'ingredient')", "def setUp(self):\n storage = FileStorage()\n self.amenity = Amenity()\n self.amenity.name = 'test'\n self.amenity.save()\n self.state = State()\n self.state.name = 'California'\n self.state.save()\n self.city = City()\n self.city.name = 'San_Mateo'\n self.city.state_id = self.state.id\n self.city.save()\n self.user = User()\n self.user.first_name = 'test'\n self.user.last_name = 'test'\n self.user.email = 'test'\n self.user.password = 'test'\n self.user.save()\n self.place = Place()\n self.place.city_id = self.city.id\n self.place.user_id = self.user.id\n self.place.name = 'test_place'\n self.place.description = 'test_description'\n self.place.number_rooms = 2\n self.place.number_bathrooms = 1\n self.place.max_guest = 4\n self.place.price_by_night = 100\n self.place.latitude = 120.12\n self.place.longitude = 101.4\n self.place.save()\n self.review = Review()\n self.review.place_id = self.city.id\n self.review.user_id = self.user.id\n self.review.save()", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.food = cuisine_type", "def test_search_recipes(self):\n pass", "def setUpTestData(cls):\n User.objects.create_user('Claire', 'claire@email.com', '12345678')\n User.objects.create_user('Georgie', 'georgie@email.com', '12345678')\n User.objects.create_user('Tristan', 'tristan@email.com', '12345678')\n\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 1\",\n category=\"Food\",\n amount=20,\n converted_amount=20,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Georgie\"\n )\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 2\",\n category=\"Food\",\n amount=10,\n converted_amount=10,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Claire\"\n )\n Expense.objects.create(\n date=date.today(),\n description=\"Test balance 3\",\n category=\"Food\",\n amount=30,\n converted_amount=30,\n currency=\"GBP\",\n who_for=\"Everyone\",\n who_paid=\"Tristan\"\n )", "def fixture_test_store(andy, pandy, candy):\n store_ = InMemoryStore[Person](unique_keys={\"name\"})\n store_.add(andy)\n store_.add(pandy)\n store_.add(candy)\n yield store_", "def test_create_restaurant_with_token(self):\n url = '/api/places/'\n client = APIClient()\n\n client.credentials(HTTP_AUTHORIZATION=self.test_user1_token)\n response = client.post(url, self.restaurant_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def setUpTestData(cls):\n cls.post = PostFactory(\n author__first_name='Peter',\n author__last_name='Mustermann',\n title='My test title',\n subtitle='A subtitle for the test post',\n views=10,\n last_viewed=(timezone.now() - datetime.timedelta(days=1)),\n is_active=True,\n activation_date=None\n )", "def test_get_foods(self):\n pass", "def test_post_foods(self):\n pass", "def setUp(self):\n # Create table\n db.create_all()\n\n #Create test registree\n mcdonalds = Store(name='mcdonalds', shop_address='63 Northbrook st', shop_postcode='rg14 1ae', takeaway=True)\n tesco = Store(name='tesco', shop_address='London rd, Newbury', shop_postcode='rg14 2bp', takeaway=False)\n coop = Store(name='coop', shop_address='Andover rd', shop_postcode='rg19 3bp', takeaway=False)\n \n #adding test receipts to db\n receipt1 = Receipts(most_expensive=5.09, cost_of_alcohol=0, receipt_total=11.36, takeaway=True, delivery_fee=1.99, delivery_time_mins=28, store_id=1, shop=mcdonalds)\n receipt2 = Receipts(most_expensive=2.80, cost_of_alcohol=16, receipt_total=11.90, store_id=2, shop=tesco)\n receipt3 = Receipts(most_expensive=3.00, cost_of_alcohol=0, receipt_total=18.76, store_id=2, shop=tesco)\n receipt4 = Receipts(most_expensive=2.00, cost_of_alcohol=0, receipt_total=20.91, store_id=2, shop=tesco)\n \n #Add and save to database\n store_list = [mcdonalds, tesco, coop]\n receipt_list = [receipt1, receipt2, receipt3, receipt4]\n for i in store_list:\n db.session.add(i)\n for i in receipt_list:\n db.session.add(i)\n db.session.commit()", "def test_create_recipe_with_ingredient(self):\n ingredient1 = sample_ingredient(user=self.user, name='Prawns')\n ingrident2 = sample_ingredient(user=self.user, name ='Ginger')\n\n payload = {\n 'title': 'Thai prawn and curry',\n 'ingredient': [ingredient1.id,ingrident2.id],\n 'time_minuts':60,\n 'price': 250\n }\n res = self.client.post(RECIPE_URL,payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredient.all()\n self.assertEqual(ingredients.count(),2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingrident2,ingredients)", "def setUp(self):\n UsuarioFactory.create()\n self.user = Usuario.objects.get(username='admin')\n ProyectoFactory.lider_proyecto = self.user\n ProyectoFactory.create()\n FaseFactory.proyecto = Proyecto.objects.get(nombre='Proyecto01')\n FaseFactory.create()\n TipoItemFactory.fase = Fase.objects.get(nombre='Fase01')\n TipoItemFactory.create()\n self.factory = RequestFactory()", "def setUpTestData(cls) -> None:\n\n # Define base url\n cls.url = BASE_URL + '/'\n\n # Make 9 \"normal\" authors.\n cls.authors: typing.List[Author] = [\n create_author() for _ in range(9)\n ]\n\n # Make 1 superuser author.\n cls.super_author: Author = create_author(True)\n\n # Serialize data once so that it's not called in ever test\n cls.serialized_data = AuthorListSerializer(Author.objects.all(), many=True).data", "def test_alien_data(self):", "def test_delete_restaurant(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com'))\n db.session.commit()\n\n # Since this is a freshly created table, the first id should be 1\n resp = self.test_client.delete(self.API_BASE + '/1', headers=auth_header_all_permissions)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['success'], True)\n\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_all_permissions)\n self.assertEqual(resp.status_code, 404)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['success'], False)", "def setUp(self):\n\n # Get the Flask test client.\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n app.config[\"SECRET_KEY\"] = \"key\"\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[\"dietitian_id\"] = 1\n\n # Connect to the test database.\n connect_to_db(app, db_uri=\"postgresql:///testnourish\") \n\n # Create the tables and add the sample data.\n db.create_all()\n load_test_data()", "def test_add_to_fav_(self):\n result = self.client.post(\"/add_to_fav\", data={\"yelp_biz_id\":\"JA_V9TqDCrkgknqrcUndIQ\", \n \"yelp_rest_name\":\"Siam\", \"yelp_rating\":\"4\", \n \"yelp_category\":\"Thai\", \"yelp_price\":\"$$\", \n \"yelp_image_url\":\"https://s3-media2.fl.yelpcdn.com/bphoto/1SkZwZrRZkQSzRMn_Trs3w/o.jpg\" })\n\n DB_result = Restaurant_details.query.filter_by(biz_id = \"JA_V9TqDCrkgknqrcUndIQ\").first()\n self.assertIsNotNone(DB_result) #testing that the returned result is not NONE\n self.assertEqual(DB_result.restaurant_name, 'Siam') #testing restaurant name is what it should be\n \n self.assertIn(b\"Your Favourite has been saved\", result.data)", "def test_get_recipe_information(self):\n pass", "def setUp(self):\n super(PlayTests, self).setUp(\n \"tests/data/shakespeare/\", \"structure.json\", \"brief_example.xml\")", "def setUp(self):\n\n self.veh = Vehicle(0, 0)\n self.R = Random(seed)", "def setUp(self):\n self.client = APIClient()\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.pk],\n 'days_in_hospital': 2,\n 'days_in_destination': 2,\n 'duration_minutes': 120,\n 'overview': '<strong>Bla</strong> bla bla',\n }", "def setUp(self):\n super(TravelogueTest, self).setUp()\n self.test_travelogue =TravelogueFactory()\n self.pl2 = PhotoFactory()\n self.tpoint = TrailPointFactory(point = Point(22.4604, 33.9420, 0.0))\n self.tnf2 = TripNoteFactory()\n self.tnf2.location_detail = self.tpoint\n self.test_travelogue.photos.add(self.pl)\n self.test_travelogue.photos.add(self.pl2)\n self.test_travelogue.notes.add(self.tnf)\n self.test_travelogue.notes.add(self.tnf2)", "def setUp(self):\n super(RestaurantTest, self).setUp()\n qsa = {'username': 'davedash.livejournal.com',\n 'password': 'sexy'}\n response = self.client.post('/login', qsa, follow=True)\n # print dir(response) ['__class__', '__contains__', '__delattr__',\n # '__delitem__', '__dict__', '__doc__', '__getattribute__',\n # '__getitem__', '__hash__', '__init__', '__iter__', '__module__',\n # '__new__', '__reduce__', '__reduce_ex__', '__repr__',\n # '__setattr__', '__setitem__', '__str__', '__weakref__',\n # '_charset', '_container', '_convert_to_ascii', '_get_content',\n # '_headers', '_is_string', '_set_content', 'client', 'close',\n # 'content', 'context', 'cookies', 'delete_cookie', 'flush', 'get',\n # 'has_header', 'items', 'next', 'request', 'set_cookie',\n # 'status_code', 'tell', 'template', 'write']", "def setUp(self):\n self.item = Furniture('11', 'sofa', '4', '5', 'suede', 'xl')", "def test_create(self):\n retreat = Retreat.objects.create(\n name=\"random_retreat\",\n details=\"This is a description of the retreat.\",\n seats=40,\n address_line1=\"123 random street\",\n postal_code=\"123 456\",\n state_province=\"Random state\",\n country=\"Random country\",\n timezone=\"America/Montreal\",\n price=3,\n start_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 8)),\n end_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 17, 12)),\n min_day_refund=7,\n min_day_exchange=7,\n refund_rate=100,\n is_active=True,\n accessibility=True,\n form_url=\"example.com\",\n carpool_url='example2.com',\n review_url='example3.com',\n has_shared_rooms=True,\n room_type=Retreat.DOUBLE_OCCUPATION,\n toilet_gendered=True,\n )\n\n self.assertEqual(retreat.__str__(), \"random_retreat\")", "def get_random_restaurant(self, request, **kwargs):\n restaurant = Restaurant.objects.order_by(\n '?'\n ).select_related(\n 'address'\n ).prefetch_related(\n 'employees'\n ).first()\n serializer = RestaurantFullInfoSerializer(restaurant)\n return Response(serializer.data)", "def test_create_recipe_with_ingredients(self):\n ing1 = sample_ingredient(user=self.user,name=\"ginger\")\n ing2 = sample_ingredient(user=self.user, name=\"Prawn\")\n payload = {\n 'title':'Prawn curry',\n 'ingredient':[ing1.id,ing2.id],\n 'time_minutes':60,\n 'price':10.00,\n }\n res = self.client.post(RECIPE_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredient.all()\n self.assertEqual(ingredients.count(),2)\n self.assertIn(ing1,ingredients)\n self.assertIn(ing2,ingredients)", "def setUp(self):\n\n self.batch_tennis_data = [\n {\n \"ATP\": \"11\",\n \"Court\": \"Brisbane International\",\n \"Date\": \"02-01-2011\",\n \"Location\": \"ATP250\",\n \"Loser\": \"Outdoor\",\n \"Round\": \"Hard\",\n \"Series\": \"1st Round\",\n \"Surface\": \"49\",\n \"Tournament\": \"Istomin D.\",\n \"Winner\": \"De Bakker T.\"\n },\n\n {\n \"ATP\": \"12\",\n \"Court\": \"Brisbane International\",\n \"Date\": \"02-01-2011\",\n \"Location\": \"ATP250\",\n \"Loser\": \"Outdoor\",\n \"Round\": \"Hard\",\n \"Series\": \"2nd Round\",\n \"Surface\": \"49\",\n \"Tournament\": \"Berrer M.\",\n \"Winner\": \"Sela D.\"\n },\n {\n \"ATP\": \"13\",\n \"Court\": \"Chennai\",\n \"Date\": \"02-01-2016\",\n \"Location\": \"ATP250\",\n \"Loser\": \"Outdoor\",\n \"Round\": \"Hard\",\n \"Series\": \"2nd Round\",\n \"Surface\": \"49\",\n \"Tournament\": \"Berdych T.\",\n \"Winner\": \"Phau B.\"\n }\n ]\n self.tennis_data = {\n \"ATP\": \"1\",\n \"Court\": \"TestCourt\",\n \"Date\": \"TestDate\",\n \"Location\": \"TestLocation\",\n \"Loser\": \"TestLoser\",\n \"Round\": \"TesrRound\",\n \"Series\": \"TestSeries\",\n \"Surface\": \"TestSurface\",\n \"Tournament\": \"TestTournament\",\n \"Winner\": \"TestWinner\"\n }\n db.connect()", "def setUp(cls):\n cls.place = Place()\n cls.place.city_id = \"hawaii808\"\n cls.place.user_id = \"modern123\"\n cls.place.name = \"The Modern Honolulu\"\n cls.place.description = \"The heart of Waikiki\"\n cls.place.number_rooms = 375\n cls.place.number_bathrooms = 1\n cls.place.max_guest = 10000\n cls.place.price_by_night = 300\n cls.place.latitude = 21.306944\n cls.place.longitude = -157.858337\n cls.place.amenity_ids = [\"amenity321\"]", "def test_update_restaurant(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Mexicano'\n zip_code = \"94110\"\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com', zip_code=zip_code))\n db.session.commit()\n\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n website = 'www.mexicano-nj.com'\n email = 'mexicano-nj@gmail.com'\n info = {'website': website, 'email': email}\n resp = self.test_client.put(self.API_BASE + '/1', headers=headers, data=json.dumps(info))\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['id'], 1)\n self.assertTrue(name in resp_dict['message'])\n\n # Check that all restaurant fields are returned.\n self.assertTrue('restaurant' in resp_dict)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code)\n\n self.assertTrue('name' in resp_dict['restaurant'])\n self.assertTrue('street' in resp_dict['restaurant'])\n self.assertTrue('suite' in resp_dict['restaurant'])\n self.assertTrue('city' in resp_dict['restaurant'])\n self.assertTrue('state' in resp_dict['restaurant'])\n self.assertTrue('phone_num' in resp_dict['restaurant'])\n self.assertTrue('date_established' in resp_dict['restaurant'])\n self.assertTrue('creator' in resp_dict['restaurant'])\n\n # -----------------------------\n # Make a separate request to retrieve the restaurant and assert that updated fields are as intended\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code) # Make sure this has not changed", "def setUp(self):\n self.place = Place()", "def populate_fixtures():\n languages()\n words()", "def test_create_from_pear(self):\n pass", "def test_simple_restaurant(restaurant_only):\n assert restaurant_only", "def setUp(self):\n\t\tself.app = app.test_client()\n\t\tself.app.test = True\n\t\tself.base_data = [\n\t\t\t\t\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"alta\",\n\t\t\t\t\"base_total\": 98,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"beaver mountain\",\n\t\t\t\t\"base_total\": 68,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"brian head\",\n\t\t\t\t\"base_total\": 40,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"brighton\",\n\t\t\t\t\"base_total\": 77,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"cherry peak\",\n\t\t\t\t\"base_total\": 44,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"deer valley\",\n\t\t\t\t\"base_total\": 67,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"eagle point\",\n\t\t\t\t\"base_total\": 19,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"nordic valley\",\n\t\t\t\t\"base_total\": 12,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"park city\",\n\t\t\t\t\"base_total\": 50,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"powder mountain\",\n\t\t\t\t\"base_total\": 51,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"snowbasin\",\n\t\t\t\t\"base_total\": 64,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"snowbird\",\n\t\t\t\t\"base_total\": 103,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"solitude\",\n\t\t\t\t\"base_total\": 75,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"sundance\",\n\t\t\t\t\"base_total\": 36,\n\t\t\t\t\"crawled_at\": \"2018-03-05\"\n\t\t\t\t}\n\t\t]\n\n\t\tself.twenty_four_hour_data = [\t\t\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"alta\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 20\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"beaver mountain\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 3\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"brian head\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 4\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"brighton\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 5\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"cherry peak\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 5\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"deer valley\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 5\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"eagle point\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 4\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"nordic valley\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 2\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"park city\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 7\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"powder mountain\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 10\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"snowbasin\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 7\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"snowbird\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 18\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"solitude\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 7\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\"area_name\": \"sundance\",\n\t\t\t\t\"crawled_at\": \"2018-03-05\",\n\t\t\t\t\"twenty_four_hour_total\": 4\n\t\t\t\t}\n\t\t]", "def start_fixture(self):\n pass", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client()\n self.app_context = self.app\n\n self.order = {\n \"order_id\":\"100\",\n \"pickup_location\":\"nakuru\",\n \"destination\":\"nairobi\",\n \"price\":\"1400\",\n \"user_id\":\"5\"\n\n }\n self.user = {\n \"user_id\":\"25\",\n \"firstname\":\"James\",\n \"lastname\":\"Martin\",\n \"username\":\"senge\",\n \"email\":\"senge@yahoo.com\",\n \"password\":\"andela\"\n }", "def setUp(self):\n self.driver = {\n \"Email\": \"p@gmail.com\",\n \"Type\": \"driver\",\n \"Password\": \"pass123\",\n \"Confirm Password\": \"pass123\"\n }\n self.ride = {\n \"Destination\": \"Meru\",\n \"Origin\": \"Kutus\",\n \"Time\": \"9:00\",\n \"Date\": \"23/7/2018\",\n \"Ride Name\": \"Toyota\",\n \"Capacity\": \"7\"\n }\n self.request = {\n \"Email\": \"Njobu\",\n \"Tel\": \"+254716272376\"\n }\n self.app = create_app('testing')\n self.client = self.app.test_client\n self.app_context = self.app.app_context()\n self.app_context.push()", "def setUp(self) -> None:\n self.state = State(name='toto')\n self.state_id = self.state.id\n self.city = City(name='toto', state_id=self.state.id)\n self.city_id = self.city.id\n self.user = User(email='email', password='password')\n self.user_id = self.user.id\n self.place = Place(name='toto', city_id=self.city.id,\n user_id=self.user.id)\n self.place_id = self.place.id\n storage.new(self.state)\n storage.new(self.city)\n storage.new(self.user)\n storage.new(self.place)\n storage.save()\n self.url = '{}/places/{}'.format(api_url, self.place.id)\n self.invalid_url = '{}/places/{}'.format(api_url, 'toto')", "def setUp(self) -> None:\n self.state = State(name='toto')\n self.state_id = self.state.id\n self.city = City(name='toto', state_id=self.state.id)\n self.city_id = self.city.id\n self.user = User(email='email', password='password')\n self.user_id = self.user.id\n self.place = Place(name='toto', city_id=self.city.id,\n user_id=self.user.id)\n self.place_id = self.place.id\n storage.new(self.state)\n storage.new(self.city)\n storage.new(self.user)\n storage.new(self.place)\n storage.save()\n self.url = '{}/places/{}'.format(api_url, self.place.id)\n self.invalid_url = '{}/places/{}'.format(api_url, 'toto')", "def setUp(self):\n\n self.caffe = Caffe.objects.create(\n name='kafo',\n city='Gliwice',\n street='Wieczorka',\n house_number='14',\n postal_code='44-100'\n )\n self.filtry = Caffe.objects.create(\n name='filtry',\n city='Warszawa',\n street='Filry',\n house_number='14',\n postal_code='44-100'\n )\n\n self.kate = Employee.objects.create(\n username='KateT',\n first_name='Kate',\n last_name='Tempest',\n telephone_number='12345678',\n email='kate@tempest.com',\n favorite_coffee='flat white',\n caffe=self.caffe\n )\n\n self.cash_report = CashReport.objects.create(\n creator=self.kate,\n caffe=self.caffe,\n cash_before_shift=2000,\n cash_after_shift=3000,\n card_payments=500,\n amount_due=1900\n )\n\n Company.objects.create(name='GoodCake', caffe=self.caffe)\n Company.objects.create(name='Tesco', caffe=self.caffe)\n\n Expense.objects.create(\n name='Cakes',\n company=Company.objects.get(name='GoodCake'),\n caffe=self.caffe\n )\n\n Expense.objects.create(\n name='Supply',\n company=Company.objects.get(name='Tesco'),\n caffe=self.caffe\n )\n\n FullExpense.objects.create(\n expense=Expense.objects.get(name='Cakes'),\n amount=50,\n cash_report=CashReport.objects.first(),\n caffe=self.caffe\n )\n\n FullExpense.objects.create(\n expense=Expense.objects.get(name='Supply'),\n amount=500,\n cash_report=CashReport.objects.first(),\n caffe=self.caffe\n )", "def test_create_restaurant_no_creator(self):\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n info = {'name': 'Ping Yan', 'city': 'Chicago'}\n resp = self.test_client.post(self.API_BASE + '/create', headers=headers, data=json.dumps(info))\n self.assertEqual(resp.status_code, 400)", "def setUpTestData(cls):\n cls.board = Board.objects.create(name = DICT.get('board_name') )\n\n cls.task = Task.objects.create(head = DICT.get('task_head'),\n description = DICT.get('task_description'),\n board = cls.board )", "def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ):\n self.local_user = models.User.objects.create_user(\n \"mouse@local.com\",\n \"mouse@mouse.com\",\n \"mouseword\",\n local=True,\n localname=\"mouse\",\n remote_id=\"https://example.com/users/mouse\",\n )\n self.work = models.Work.objects.create(title=\"Test Work\")\n self.book = models.Edition.objects.create(\n title=\"Example Edition\",\n remote_id=\"https://example.com/book/1\",\n parent_work=self.work,\n )\n with patch(\"bookwyrm.models.activitypub_mixin.broadcast_task.delay\"):\n self.shelf = models.Shelf.objects.create(\n name=\"Test Shelf\", identifier=\"test-shelf\", user=self.local_user\n )\n models.SiteSettings.objects.create()", "def setUp(self) -> None:\n self.state = State(name='toto')\n self.state_id = self.state.id\n self.city = City(name='toto', state_id=self.state_id)\n self.city_id = self.city.id\n self.user = User(email='email', password='password')\n self.user_id = self.user.id\n self.place = Place(name='toto', city_id=self.city.id,\n user_id=self.user.id)\n self.place_id = self.place.id\n storage.new(self.state)\n storage.new(self.city)\n storage.new(self.user)\n storage.new(self.place)\n storage.save()\n self.url = '{}/places/{}'.format(api_url, self.place_id)\n self.invalid_url = '{}/places/{}'.format(api_url, 'toto')", "def locations_fixture(location):\n return [location, _create_location()]", "def test_create_restaurant_no_name(self):\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n info = {'creator': 'nobody@gmail.com', 'city': 'Chicago'}\n resp = self.test_client.post(self.API_BASE + '/create', headers=headers, data=json.dumps(info))\n self.assertEqual(resp.status_code, 400)", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.type = cuisine_type", "def setUpTestData(cls):\n Product_type.objects.create(\n name='New_Product', display_name='New Product.')", "def test_create(self):\n pass", "def test_client_tax_information_create(self):\n pass", "def test_create_recipe_with_ingredients(self):\n ingredient1 = sample_ingredient(user=self.user, name = 'bla')\n ingredient2 = sample_ingredient(user=self.user, name = 'blaa')\n payload = {\n 'title': 'red curry',\n 'ingredients': [ingredient1.id, ingredient2.id],\n 'time_minutes': 30,\n 'price': 30.00\n }\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredients.all()\n\n self.assertEqual(ingredients.count(), 2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingredient2, ingredients)", "def setUp(self):\n self.factory = PhoneFactory()", "def setUp(self):\n server.Inventory.remove_all()\n server.Inventory(0, \"shampoo\", 2, 'new').save()\n server.Inventory(0, \"conditioner\", 5, 'new').save()\n self.app = server.app.test_client()", "def setUp(self):\n\n # Get the Flask test client.\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n # Connect to the test database.\n connect_to_db(app, db_uri=\"postgresql:///testnourish\") \n\n # Create the tables and add the sample data.\n db.create_all()\n load_test_data()", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.name = restaurant_name\n\t\tself.type = cuisine_type", "def test_post_foods_list(self):\n pass", "def setUp(self):\n user = CustomUser(id=100, email='testuser@mail.com', is_active=True)\n user.set_password('testpassword')\n user.save()\n\n way_first = Way.objects.create(id=100, user=user)\n way_second = Way.objects.create(id=101, user=user)\n start_place = Place.objects.create(id=100, longitude=111.123456, latitude=222.123456)\n end_place = Place.objects.create(id=200, longitude=222.123456, latitude=111.123456)\n\n Route.objects.create(\n id=100,\n way=way_first,\n time='23:58:59',\n position=0,\n start_place=start_place,\n end_place=end_place\n )\n\n Route.objects.create(\n id=101,\n way=way_first,\n time='01:02:03',\n position=1,\n start_place=start_place,\n end_place=end_place\n )\n\n Route.objects.create(\n id=102,\n way=way_second,\n time='11:22:33',\n position=1,\n start_place=start_place,\n end_place=end_place\n )\n\n self.route = Route.objects.get(id=100)\n self.client = Client()\n self.client.login(email='testuser@mail.com', password='testpassword')", "def test_countries_regions_created(self):\n country_existing = CountryFactory(\n name=iso3166.countries.get('France').name,\n numeric=iso3166.countries.get('France').numeric,\n alpha_3=iso3166.countries.get('France').alpha3,\n )\n region_existing = RegionFactory(name='Existing Region')\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n # The Countries and Regions have been assigned to the correct PowerPlants and Projects\n self.assertEqual(Country.objects.count(), 4)\n self.assertEqual(Region.objects.count(), 3)\n greece = Country.objects.get(name='Greece')\n china = Country.objects.get(name='China')\n norway = Country.objects.get(name='Norway')\n mediterranean = Region.objects.get(name='Gulf and Mediterranean')\n northeast_asia = Region.objects.get(name='Northeast Asia')\n self.assertEqual(set(powerplant_ouessant.countries.all()), set([country_existing]))\n self.assertEqual(set(powerplant_ouessant.regions.all()), set([region_existing]))\n self.assertEqual(set(project_ouessant1.countries.all()), set([country_existing]))\n self.assertEqual(set(project_ouessant1.regions.all()), set([region_existing]))\n self.assertEqual(set(project_ouessant1.countries.all()), set([country_existing]))\n self.assertEqual(set(project_ouessant1.regions.all()), set([region_existing]))\n self.assertEqual(set(powerplant_ilarionas.countries.all()), set([greece]))\n self.assertEqual(set(powerplant_ilarionas.regions.all()), set([mediterranean]))\n self.assertEqual(set(project_liaoning.countries.all()), set([china]))\n self.assertEqual(set(project_liaoning.regions.all()), set([northeast_asia]))\n self.assertEqual(set(powerplant_tonstad.countries.all()), set([norway]))\n self.assertEqual(set(powerplant_tonstad.regions.all()), set([region_existing]))", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n self.expense = {'name': 'snacks', 'amount': 12.23, 'date_of_expense': '01-01-2021'}\n\n\n # binds the app to the current context\n with self.app.app_context():\n # create all tables\n db.create_all()", "def data_manager_fixture():\n\n class DataManager:\n def __init__(self):\n self.gen = 1000\n self.cfg = get_cfg_defaults()\n mode = \"test_inference\"\n self.dataset = Dataset(None, self.cfg, mode)\n self.auto_anchors = AutoAnchors(self.dataset, self.cfg.model, self.gen)\n self.k_points = torch.ones((12, 2)) * 2.0\n self.wh = torch.ones((1000, 2)) * 2.0\n\n return DataManager()", "def test_create_boat(self):\n pass", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n\n # Make mock of Google Flights API call\n def _mock_flight_results(parameter):\n return functions.flight_results_from_file('seed_data/testflights.txt')\n\n functions.flight_results = _mock_flight_results\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True\n\n # Connect to test database\n connect_to_db(app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n db.create_all()\n example_data()", "def setUp(self):\n password = factory.Faker('pystr', min_chars=8, max_chars=16)\n self.user = UserFactory.create(password=password)\n self.token = Token.objects.create(user=self.user)\n self.factory = APIRequestFactory()\n\n # set up the data\n store = StoreFactory(user=self.user)\n material = MaterialFactory()\n self.material_stock = MaterialStockFactory(\n store=store, material=material, current_capacity=20, max_capacity=100\n )", "def test_retrieve_recipes(self):\n sample_recipe(user = self.user)\n sample_recipe(user = self.user)\n\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.all().order_by('-id')\n serializer = RecipeSerializer(recipes, many=True) # many=true returns the data as a list\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def setUp(self):\n\n # Get the Flask test client.\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n app.config[\"SECRET_KEY\"] = \"key\"\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[\"patient_id\"] = 1\n\n # Connect to the test database.\n connect_to_db(app, db_uri=\"postgresql:///testnourish\") \n\n # Create the tables and add the sample data.\n db.create_all()\n load_test_data()", "def setUp(self):\n self.new_inv_item = ['1', 'Knife Set', 10, 'n', 'n']\n self.new_furn_item = ['2', 'Couch', 25, 'y', 'Cloth', 'L']\n self.new_elec_item = ['3', 'Dryer', 100, 'n', 'y', 'Samsung', 12]", "def test_seeded_data():\n assert Tweet.all()", "def test_create_run(self):\n pass", "def fixture_fixture_business_details_example():\n test_example = BusinessDetails(\n business_problem=BUSINESS_PROBLEM,\n business_stakeholders=BUSINESS_STAKEHOLDERS,\n line_of_business=LINE_OF_BUSINESS,\n )\n return test_example", "def __init__(self, restaurant_name, cuisine_type):\r\n\t\tself.restaurant_name = restaurant_name\r\n\t\tself.cuisine_type = cuisine_type" ]
[ "0.7482953", "0.6990066", "0.67108124", "0.66626084", "0.6656253", "0.6604087", "0.6598095", "0.64222395", "0.6398554", "0.6376477", "0.63677603", "0.6358689", "0.63136774", "0.6313415", "0.629336", "0.6270328", "0.62372196", "0.6197963", "0.61795425", "0.617826", "0.6164018", "0.6154073", "0.61468375", "0.61467373", "0.6113804", "0.61011964", "0.6096006", "0.6093015", "0.60906357", "0.6089874", "0.6068461", "0.60652584", "0.6061628", "0.60573107", "0.60572886", "0.6040638", "0.60366315", "0.6003661", "0.600275", "0.5999694", "0.598517", "0.59815747", "0.59801763", "0.59726995", "0.59659755", "0.596031", "0.59581155", "0.5956561", "0.59563285", "0.59531903", "0.5941598", "0.5919501", "0.5918626", "0.5907931", "0.5905893", "0.5892691", "0.5887646", "0.587699", "0.58728004", "0.5850505", "0.58504266", "0.5845613", "0.5841025", "0.5834763", "0.58298236", "0.58291066", "0.58291066", "0.5825895", "0.5824972", "0.58236295", "0.58194226", "0.581614", "0.5801598", "0.5800921", "0.5800599", "0.5797516", "0.57941496", "0.57896996", "0.5783688", "0.5780813", "0.57803565", "0.57791567", "0.57753617", "0.57728696", "0.577111", "0.5770348", "0.5768689", "0.5768689", "0.5768689", "0.57655245", "0.5763564", "0.5756723", "0.57547754", "0.5754228", "0.57534844", "0.5752819", "0.5752543", "0.5749164", "0.57456046", "0.5741183", "0.5738795" ]
0.0
-1
Kitchen fixture for future tests
def kitchen_only(restaurant_only): kitchen = Kitchen(restaurant_only) return kitchen
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixtures():", "def setUpFixture(self):\n pass", "def _fixture_setup(self):\n pass", "def fixture_example_data():\n import_example_data()", "def fixture_runner():\n return CliRunner()", "def setUp(self):\n self.setup_beets()", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def start_fixture(self):\n pass", "def test_bed(self):\n #TODO write bed tests", "def setUp(self):\n super(PlayTests, self).setUp(\n \"tests/data/shakespeare/\", \"structure.json\", \"brief_example.xml\")", "def test_create_run(self):\n pass", "def setUp(self):\n self.fixtureFile = r\"v:\\workspace\\FileHandling\\src\\test-read-write.txt\"\n self.fixtureList = [\"my\", \"written\", \"text\"]\n self.fixtureListEmptyStrings = [\"my\", \"\", \"\", \"written\", \"text\"]\n self.fixtureListTrailingEmptyString = [\"my\", \"written\", \"text\", \"\", \"\"]", "def setUp(self):\n super(TranscriptionsTest, self).setUp()\n mommy.make_recipe('grunt.seed', _quantity=2)", "def setUp(self):\n self.fixtures_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"fixtures/\"\n )", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def tests():", "def setUp(self):\n self.fixture_file = r\"v:\\workspace\\FileHandling\\src\\test-read-write.txt\"\n self.fixture_list = [\"my\", \"written\", \"text\"]\n self.fixture_list_empty_strings = [\"my\", \"\", \"\", \"written\", \"text\"]\n self.fixture_list_trailing_empty_strings = [\"my\", \"written\", \"text\", \"\", \"\"]", "def setUp(self):\n self.epath = 'flyeye/tests/fixtures'\n self.dpath = join(self.epath, 'disc.silhouette')", "def tearDownFixture(self):\n pass", "def setUp(self):\n self", "def setUp(self):\n self", "def test_generate_all_testing(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n \n pass", "def fixture_candy():\n yield Person(name=\"Candy\", age=13, hobbies=[\"Gardening\"])", "def setUp(self):\n\n self.test_data_path = 'testing/test_data/'", "def setUp(self) :\n pass", "def fixture_andy():\n yield Person(name=\"Andy\", age=12, hobbies=[\"Star Wars\", \"Bicycles\"])", "def setUp(self):\n\n pass", "def setUp(self):\n\n pass", "def fixture_pandy():\n yield Person(name=\"Pandy\", age=12, hobbies=[\"Fortnite\"])", "def setUp(self):\r\n pass", "def setUp(self):\r\n\r\n \r\n self.client = app.test_client()\r\n # Show Flask errors that happen during tests\r\n app.config['TESTING'] = True\r\n \r\n connect_to_db(server.app)\r\n db.create_all()\r\n test_seed.create_test_data()", "def setUp(self):\n\n self.client = app.test_client()\n app.config['Testing'] = True\n app.config['SECRET_KEY'] = 'test'\n connect_to_db(app, db_uri='postgresql:///testdb', echo=False)\n db.create_all()\n\n example_data() # Need to expand!", "def setUp(self):\n super(TestCase, self).setUp()\n # Change the default directory that the tempfile\n # module places temporary files and directories in\n self.useFixture(fixtures.NestedTempfile())\n # Create a temporary directory and set it as $HOME in the environment.\n self.useFixture(fixtures.TempHomeDir())\n self.useFixture(tools.StandardLogging())\n self.addCleanup(self._clear_attrs)", "def setUp(self):\n super(TestCase, self).setUp()\n # Change the default directory that the tempfile\n # module places temporary files and directories in\n self.useFixture(fixtures.NestedTempfile())\n # Create a temporary directory and set it as $HOME in the environment.\n self.useFixture(fixtures.TempHomeDir())\n self.useFixture(tools.StandardLogging())\n self.addCleanup(self._clear_attrs)", "def setUp(self):\n\n #Get Flask test client\n self.client = app.test_client\n #Show errors from Flask than happen\n app.config['TESTING'] = True\n #Connect to test database\n connect_to_db(app, \"postgresql:///testdb\")\n #Create tables and add sample data to them\n db.create_all()\n example_data()", "def setUp(self):\n super().setUp()\n self.runner = CliRunner()", "def populate_fixtures():\n languages()\n words()", "def setUp(self):\n\n return", "def setUp(self):\n\n # Get the Flask test client.\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n app.config[\"SECRET_KEY\"] = \"key\"\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[\"dietitian_id\"] = 1\n\n # Connect to the test database.\n connect_to_db(app, db_uri=\"postgresql:///testnourish\") \n\n # Create the tables and add the sample data.\n db.create_all()\n load_test_data()", "def setUp(self):\n self.tmp = TemporaryDirectory()", "def test_create_scenario1(self):\n pass" ]
[ "0.8008476", "0.7767613", "0.74596703", "0.74033487", "0.72472155", "0.6927232", "0.69233274", "0.69233274", "0.69233274", "0.69233274", "0.69233274", "0.69233274", "0.69233274", "0.69233274", "0.69233274", "0.68981403", "0.68428254", "0.67888683", "0.6758065", "0.67523795", "0.67267805", "0.67113614", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.669169", "0.66879207", "0.6663914", "0.665168", "0.66417706", "0.6624423", "0.6624423", "0.66206145", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.658738", "0.6585089", "0.658139", "0.65676624", "0.65632385", "0.65581834", "0.6540295", "0.6540295", "0.6539532", "0.6537139", "0.64945066", "0.64853734", "0.64810747", "0.64810747", "0.64804775", "0.6477674", "0.6477625", "0.64758545", "0.6462066", "0.6442167", "0.6440399" ]
0.0
-1
Delivery fixture for future tests
def delivery_only(restaurant_only): delivery = Delivery(restaurant_only) return delivery
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_confirm_delivery_details(self):\n pass", "def fixtures():", "def setUp(self):\n self.client = APIClient()\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.pk],\n 'days_in_hospital': 2,\n 'days_in_destination': 2,\n 'duration_minutes': 120,\n 'overview': '<strong>Bla</strong> bla bla',\n }", "def setUpFixture(self):\n pass", "def test_create_shipment(self):\n pass", "def _fixture_setup(self):\n pass", "def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ):\n self.local_user = models.User.objects.create_user(\n \"mouse@local.com\",\n \"mouse@mouse.com\",\n \"mouseword\",\n local=True,\n localname=\"mouse\",\n remote_id=\"https://example.com/users/mouse\",\n )\n self.work = models.Work.objects.create(title=\"Test Work\")\n self.book = models.Edition.objects.create(\n title=\"Example Edition\",\n remote_id=\"https://example.com/book/1\",\n parent_work=self.work,\n )\n with patch(\"bookwyrm.models.activitypub_mixin.broadcast_task.delay\"):\n self.shelf = models.Shelf.objects.create(\n name=\"Test Shelf\", identifier=\"test-shelf\", user=self.local_user\n )\n models.SiteSettings.objects.create()", "def test_xfail_fixture(broken_fixture):\n pass", "def setUp(self):\n self.staff = get_user_model().objects.create_user(\n email='staff@curesio.com',\n password='staffpassword1234',\n username='staffusername'\n )\n self.staff.is_staff = True\n self.staff.save()\n self.staff.refresh_from_db()\n\n self.client = APIClient()\n self.client.force_authenticate(user=self.staff)\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.id],\n 'overview': '<strong>Bla</strong> bla bla',\n }\n\n \"\"\"Test that list procedure is success\"\"\"\n p1 = models.Procedure.objects.create(\n name=\"procedure1\",\n overview='bla bla bla'\n )\n p1.speciality.set([self.speciality.pk])\n p1.save()\n\n p2 = models.Procedure.objects.create(\n name=\"procedure2\",\n overview='bla bla bla'\n )\n p2.speciality.set([self.speciality.pk])\n p2.save()\n\n res = self.client.get(PROCEDURE_URL)\n\n procedures = models.Procedure.objects.all().order_by(\"-name\")\n ser = serializer.ProcedureSerializer(procedures, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(res.data, ser.data)", "def test_delivery_factory_class():\n # __init__()\n factory = DeliveryFactory()\n order = Order(1)\n file = \"This is a file.\"\n\n expected_uber = UberEatsDelivery(order, file)\n expected_foodora = FoodoraDelivery(order, file)\n expected_delivery = Delivery(order, \"not uber or foodora\")\n\n assert factory.create_delivery(order, UBER_EATS, file).get_deliverer() == \\\n expected_uber.get_deliverer()\n assert factory.create_delivery(order, FOODORA, file).get_deliverer() == \\\n expected_foodora.get_deliverer()\n assert factory.create_delivery(order, \"not uber or foodora\", file).\\\n get_deliverer() == expected_delivery.get_deliverer()", "def afterSetUp(self):\n # Set some variables :\n\n TestERP5BankingCheckbookDeliveryMixin.afterSetUp(self)\n self.tic()\n self.createCheckbookDelivery()\n # the stop payment module\n self.stop_payment_module = self.getStopPaymentModule()", "def setUp(self):\n self.testUser = User.objects.get(username=\"c1e1\")\n self.client = APIClient()\n self.client.force_authenticate(user=self.testUser)\n self.data = {\n \"tracking\": 1234,\n \"mail_class\": \"12\",\n \"return_address\": {\n \"address1\": \"1234\",\n \"address2\": \"1234\",\n \"city\": \"1234\",\n \"state\": \"12\",\n \"zip\": 1234\n },\n \"rate\": 1234,\n \"address\": {\n \"address1\": \"1234\",\n \"address2\": \"1234\",\n \"city\": \"1234\",\n \"state\": \"12\",\n \"zip\": 1234\n },\n \"cost_center\": CostCenter.objects.filter(company=\n self.testUser.profile.company.pk)[0].pk\n }\n self.url = reverse('MailPiece-list')", "def test_delivery_subclasses():\n # Start with UberEatsDelivery class\n # __init__()\n order = Order(1)\n fries = MenuItem(\"fries\", \"Sides\", True, 5.00, 1)\n order.add_to_cart(fries)\n uber_eats = UberEatsDelivery(order, \"Test1.json\")\n deliver_error = UberEatsDelivery(order, \"FileDoesNotExist.jsdson\")\n uber_eats.set_address(\"some address\")\n\n # Getter methods\n assert uber_eats.get_file() == \"Test1.json\"\n assert uber_eats.get_deliverer() == UBER_EATS\n\n # deliver()\n assert uber_eats.deliver()\n assert not deliver_error.deliver()\n\n # FoodoraDelivery class\n # __init__()\n foodora = FoodoraDelivery(order, \"Test2.csv\")\n deliver_error = FoodoraDelivery(order, \"FileDoesNotExist.cdwasv\")\n foodora.set_address(\"some address\")\n\n # Getter methods\n assert foodora.get_file() == \"Test2.csv\"\n assert foodora.get_deliverer() == FOODORA\n\n # deliver()\n assert foodora.deliver()\n assert not deliver_error.deliver()", "def test_fixture(request):\n def finalizer():\n teardown()\n request.addfinalizer(finalizer)\n setup()", "def setUp(self):\n rand = ''.join(\n [random\n .choice(string.ascii_letters + string.digits) for n in range(16)])\n self.secret_key = 'sk_test_16c58271c29a007970de0353d8a47868df727cd0'\n self.random_ref = util.utf8(rand)\n self.test_email = 'bernard@disgui.se'\n self.test_amount = 5000\n self.plan = 'Basic'\n self.client = TransactionResource(self.secret_key, self.random_ref)\n # self.client.initialize(util.utf8(self.test_amount),\n # util.utf8(self.test_email),\n # util.utf8(self.plan))", "def prepare_for_delivery(self, by=None):", "def prepare_for_delivery(self, by=None):", "def prepare_for_delivery(self, by=None):", "def tearDownFixture(self):\n pass", "def setUp(self):\n super(PurchaseOrderTest, self).setUp()\n \n self.ct = ContentType(app_label=\"po\")\n self.ct.save()\n self.p = Permission(codename=\"add_purchaseorder\", content_type=self.ct)\n self.p.save()\n self.p2 = Permission(codename=\"change_purchaseorder\", content_type=self.ct)\n self.p2.save()\n #Create the user\n self.username = 'tester'\n self.password = 'pass'\n self.user = User.objects.create_user(self.username, 'charliep@dellarobbiathailand.com', self.password)\n self.user.save()\n self.user.user_permissions.add(self.p)\n self.user.user_permissions.add(self.p2)\n self.client.login(username=self.username, password=self.password)\n self.client.force_authenticate(self.user)\n \n self.supplier = Supplier(**base_supplier)\n self.supplier.save()\n self.address = Address(**base_address)\n self.address.contact = self.supplier\n self.address.save()\n self.contact = SupplierContact(name='test', email='test@yahoo.com', telephone=1234, primary=True)\n self.contact.supplier = self.supplier\n self.contact.save()\n\n # Create Custom Supply\n # not implemented\n\n # Create Fabric\n self.supply = Fabric.create(**base_fabric)\n \n #self.supply.units = \"m^2\"\n self.supply.save()\n self.supply1 = self.supply\n \n self.product = Product(supply=self.supply, supplier=self.supplier, cost=base_fabric['unit_cost'],\n purchasing_units='m')\n self.product.save()\n self.supply2 = Fabric.create(**base_fabric2)\n self.supply2.discount = 5\n self.supply2.save()\n self.product2 = Product(supply=self.supply2, supplier=self.supplier, cost=base_fabric['unit_cost'])\n self.product2.save()\n self.supply1.supplier = self.supplier\n self.supply2.supplier = self.supplier\n \n\n #Create supply with no target item\n self.supply3 = Supply.objects.create(description='test supply')\n self.supply3.id = 203\n self.supply3.save()\n\n #Create a project\n self.project = Project()\n self.project.codename = 'MC House'\n self.project.save()\n \n self.po = PurchaseOrder()\n self.po.employee = self.user\n self.po.supplier = self.supplier\n self.po.terms = self.supplier.terms\n self.po.vat = 7\n self.order_date = datetime.datetime(2017, 1, 15, 15, 30, 0, 0, tzinfo=timezone('Asia/Bangkok'))\n self.po.order_date = self.order_date\n self.po.receive_date = datetime.datetime.now()\n self.po.save()\n #self.po.create_and_upload_pdf()\n \n self.item = Item.create(supplier=self.supplier, id=1, **base_purchase_order['items'][0])\n self.item.purchase_order = self.po\n self.item.save()\n \n self.po.calculate_total()\n self.po.save()", "def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ), patch(\"bookwyrm.lists_stream.populate_lists_task.delay\"):\n self.local_user = models.User.objects.create_user(\n \"mouse@local.com\",\n \"mouse@mouse.com\",\n \"mouseword\",\n local=True,\n localname=\"mouse\",\n remote_id=\"https://example.com/users/mouse\",\n )\n work = models.Work.objects.create(title=\"Work\")\n self.book = models.Edition.objects.create(\n title=\"Example Edition\",\n remote_id=\"https://example.com/book/1\",\n parent_work=work,\n )\n\n with patch(\n \"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async\"\n ), patch(\"bookwyrm.lists_stream.remove_list_task.delay\"):\n self.list = models.List.objects.create(\n name=\"Test List\", user=self.local_user\n )\n self.anonymous_user = AnonymousUser\n self.anonymous_user.is_authenticated = False\n\n models.SiteSettings.objects.create()", "def test_alien_data(self):", "def test_make_delivery_report_default(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_delivery_report()\n self.assert_message_fields(event, {\n 'event_type': 'delivery_report',\n 'delivery_status': 'delivered',\n })", "def test_trade(self):\n pass", "def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ), patch(\"bookwyrm.lists_stream.populate_lists_task.delay\"):\n self.local_user = models.User.objects.create_user(\n \"mouse@example.com\",\n \"mouse@mouse.mouse\",\n \"mouseword\",\n local=True,\n localname=\"mouse\",\n )\n with patch(\"bookwyrm.models.user.set_remote_server.delay\"):\n self.remote_user = models.User.objects.create_user(\n \"rat\",\n \"rat@rat.rat\",\n \"ratword\",\n remote_id=\"http://example.com/rat\",\n local=False,\n )\n self.book = models.Edition.objects.create(\n title=\"Test Book\",\n parent_work=models.Work.objects.create(title=\"Test work\"),\n )", "def start_fixture(self):\n pass", "def fixture_example_data():\n import_example_data()", "def setUp(self):\n super(TranscriptionsTest, self).setUp()\n mommy.make_recipe('grunt.seed', _quantity=2)", "async def test_dependent_fixture(dependent_fixture):\n await asyncio.sleep(0.1)", "def setUp(self):\n super(ItemTest, self).setUp()\n \n self.ct = ContentType(app_label=\"po\")\n self.ct.save()\n self.p = Permission(codename=\"add_purchaseorder\", content_type=self.ct)\n self.p.save()\n self.p2 = Permission(codename=\"change_purchaseorder\", content_type=self.ct)\n self.p2.save()\n \n #Create the user\n self.username = 'tester'\n self.password = 'pass'\n self.user = User.objects.create_user(self.username, 'charliep@dellarobbiathailand.com', self.password)\n self.user.save()\n self.user.user_permissions.add(self.p)\n self.user.user_permissions.add(self.p2)\n self.client.login(username=self.username, password=self.password)\n \n \n self.supplier = Supplier(**base_supplier)\n self.supplier.save()\n self.address = Address(**base_address)\n self.address.contact = self.supplier\n self.address.save()\n self.contact = SupplierContact(name='test', email='test@yahoo.com', telephone=1234, primary=True)\n self.contact.supplier = self.supplier\n self.contact.save()\n \n \n self.supply = Fabric.create(**base_fabric)\n \n #self.supply.units = \"m^2\"\n self.supply.save()\n \n self.po = PurchaseOrder()\n self.po.employee = self.user\n self.po.supplier = self.supplier\n self.po.terms = self.supplier.terms\n self.po.vat = 7\n self.po.order_date = datetime.datetime(2014, 3, 2)\n self.po.save()\n \n self.item = Item(unit_cost=Decimal('13.55'), quantity=Decimal('10'), supply=self.supply)\n self.item.description = self.supply.description\n self.item.purchase_order = self.po\n self.item.save()", "def setUp(self):\n server.Inventory.remove_all()\n server.Inventory(0, \"shampoo\", 2, 'new').save()\n server.Inventory(0, \"conditioner\", 5, 'new').save()\n self.app = server.app.test_client()", "def fixture_microbial_order_id():\n return \"microbial_order_test\"", "def test_list_delivery_usage(self):\n pass", "def setUp(self):\n password = factory.Faker('pystr', min_chars=8, max_chars=16)\n self.user = UserFactory.create(password=password)\n self.token = Token.objects.create(user=self.user)\n self.factory = APIRequestFactory()\n\n # set up the data\n store = StoreFactory(user=self.user)\n material = MaterialFactory()\n self.material_stock = MaterialStockFactory(\n store=store, material=material, current_capacity=20, max_capacity=100\n )", "def setUp(self):\n super(Orders, self).setUp()", "def test_create_confirm_order_details(self):\n pass", "def fixture_pulseaudio_server():\n\n yield pulseaudio_server", "def setUp(self):\n self.client = APIClient()\n self.order_data = {\n \"customer\": {\n \"first_name\": \"Larosh\",\n \"last_name\": \"Tanbari\",\n \"address\": \"Coppistr\"\n },\n \"size\": \"BIG\"\n }\n self.response = self.client.post(\n reverse(\"get_all_or_create\"),\n data=self.order_data,\n format=\"json\"\n )", "def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ), patch(\"bookwyrm.lists_stream.populate_lists_task.delay\"):\n self.local_user = models.User.objects.create_user(\n \"mouse@your.domain.here\",\n \"mouse@mouse.com\",\n \"password\",\n local=True,\n localname=\"mouse\",\n two_factor_auth=False,\n )\n self.rat = models.User.objects.create_user(\n \"rat@your.domain.here\",\n \"rat@rat.com\",\n \"password\",\n local=True,\n localname=\"rat\",\n )\n self.badger = models.User.objects.create_user(\n \"badger@your.domain.here\",\n \"badger@badger.com\",\n \"password\",\n local=True,\n localname=\"badger\",\n two_factor_auth=True,\n )\n self.anonymous_user = AnonymousUser\n self.anonymous_user.is_authenticated = False\n models.SiteSettings.objects.create(id=1, require_confirm_email=False)", "def test_subscribe_offer(self):\n pass", "def test_create(self):\n pass", "def test_ship_orders(self):\n pass", "def test_get_shipment(self):\n pass", "def test_make_delivery_report_with_delivery_statuss(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_delivery_report(delivery_status='pending')\n self.assert_message_fields(event, {\n 'event_type': 'delivery_report',\n 'delivery_status': 'pending',\n })", "def setUpTestData(cls):\n cls.post = PostFactory()", "def setUp(self):\n self.valid_data={'subject': 'Test subject',\n 'sender_name': 'Steve Tester',\n 'sender_email': 'test@example.com',\n 'message': 'This is my test message',\n 'cc_myself': 'True'}", "def test_tenant_setup_celery(self):\n\n class interceptor(mock.Mock):\n tenant = None\n\n def send(self, *kw, **args):\n self.tenant = properties.tenant\n\n msg = interceptor()\n tenant = mock.Mock()\n tenant.client_name = 'mock-tenant'\n\n _send_celery_mail(msg, tenant, send=True)\n\n self.assertTrue(msg.tenant is tenant)", "def test_client_create(self):\n pass", "def setUp(self):\n self.store = Store()", "def test_create_run(self):\n pass", "def test_download(self):\n pass", "def test_tenant_setup_celery_reset(self):\n msg = mock.Mock()\n tenant = mock.Mock()\n tenant.client_name = 'mock-tenant'\n\n _send_celery_mail(msg, tenant, send=False)\n\n self.assertFalse(hasattr(properties, 'tenant'))\n self.assertEqual(properties.tenant_properties, {})", "async def test_worker_alteration_court_order(app, session, mocker):\n identifier = 'BC1234567'\n business = create_business(identifier, legal_type='BC')\n\n file_number: Final = '#1234-5678/90'\n order_date: Final = '2021-01-30T09:56:01+08:00'\n effect_of_order: Final = 'hasPlan'\n\n filing = copy.deepcopy(FILING_HEADER)\n filing['filing']['alteration'] = {}\n filing['filing']['alteration']['business'] = BUSINESS\n filing['filing']['alteration']['contactPoint'] = CONTACT_POINT\n\n filing['filing']['alteration']['courtOrder'] = COURT_ORDER\n filing['filing']['alteration']['courtOrder']['effectOfOrder'] = effect_of_order\n\n payment_id = str(random.SystemRandom().getrandbits(0x58))\n filing_id = (create_filing(payment_id, filing, business_id=business.id)).id\n\n filing_msg = {'filing': {'id': filing_id}}\n\n # mock out the email sender and event publishing\n mocker.patch('entity_filer.worker.publish_email_message', return_value=None)\n mocker.patch('entity_filer.worker.publish_event', return_value=None)\n mocker.patch('entity_filer.filing_processors.filing_components.name_request.consume_nr', return_value=None)\n mocker.patch('entity_filer.filing_processors.filing_components.business_profile.update_business_profile',\n return_value=None)\n mocker.patch('legal_api.services.bootstrap.AccountService.update_entity', return_value=None)\n\n # Test\n await process_filing(filing_msg, app)\n\n # Check outcome\n final_filing = Filing.find_by_id(filing_id)\n assert file_number == final_filing.court_order_file_number\n assert datetime.fromisoformat(order_date) == final_filing.court_order_date\n assert effect_of_order == final_filing.court_order_effect_of_order", "def setUp(self):\n self.cashFlowDate=Date(1,October,2018)\n self.fixingDate=Date(1,November,2018)\n self.foreignAmount=1000.0\n self.familyName=\"ECB\"\n self.fixingDays=2\n self.sourceCurrency=USDCurrency()\n self.targetCurrency=EURCurrency()\n self.fixingCalendar=UnitedStates()\n self.todayDate=Date(11, November, 2018)\n self.tsDayCounter=Actual360()\n self.flatForwardUSD=FlatForward(self.todayDate, 0.005, self.tsDayCounter)\n self.sourceYts=RelinkableYieldTermStructureHandle(self.flatForwardUSD)\n self.flatForwardEUR=FlatForward(self.todayDate, 0.03, self.tsDayCounter);\n self.targetYts=RelinkableYieldTermStructureHandle(self.flatForwardEUR)\n self.fxindex=FxIndex(self.familyName,self.fixingDays,self.sourceCurrency,self.targetCurrency,self.fixingCalendar,self.sourceYts,self.targetYts)\n self.fxlinkedcashflow=FXLinkedCashFlow(self.cashFlowDate,self.fixingDate,self.foreignAmount,self.fxindex)", "def setUp(self):\n # Setup dummy custmers\n Customer.objects.create(name=\"Mike Zinyoni\", phone=\"+263784528370\", email=\"mzinyoni7@outlook.com\", address=\"Stand #11 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n Customer.objects.create(name=\"Josh Nyamulomo\", phone=\"+26356839021\", email=\"jnyamulomo@gmail.com\", address=\"Stand #5 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n Customer.objects.create(name=\"Brian Mpofu\", phone=\"+26390839021\", email=\"brianmpofu@gmail.com\", address=\"Stand #25 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n # Setup dummy items\n Item.objects.create(name=\"Chicken thighs\", description=\"Chunky big chicken thighs from Irvines chickens\", price=4.99, unit=\"Kg\")\n Item.objects.create(name=\"Beef steak\", description=\"Premium quality beef steak from Caswell meats\", price=6.99, unit=\"Kg\")\n Item.objects.create(name=\"Kefalos Youghgut\", description=\"Healthy and tasty youghgut available in strawberry, banana and butter milk flavour\", price=5.21, unit=\"litre\")\n Item.objects.create(name=\"Eversharp pen\", description=\"Pens available in: blue , red, green and black ink\", price=0.99, unit=\"dozen\")\n Item.objects.create(name=\"Proton Bread\", description=\"Fresh 700g bread\", price=0.9, unit=\"loaf\")\n # Setup dummy Invoice along side the invoice line\n invoice_1 = Invoice(customer=Customer.objects.get(id=1),total=0)\n invoice_1.save()\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=1), quantity=2, amount=(Item.objects.get(id=1).price*2))\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=4), quantity=1, amount=(Item.objects.get(id=4).price*1))\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=3), quantity=6, amount=(Item.objects.get(id=3).price*6))\n invoice_1.total = sum(invoiceLine.amount for invoiceLine in invoice_1.invoiceLines.all())\n invoice_1.save()\n \n invoice_2 = Invoice(customer=Customer.objects.get(id=3),total=0)\n invoice_2.save()\n InvoiceLine.objects.create(invoice=invoice_2,item=Item.objects.get(id=5), quantity=12, amount=(Item.objects.get(id=5).price*12))\n InvoiceLine.objects.create(invoice=invoice_2,item=Item.objects.get(id=4), quantity=2, amount=(Item.objects.get(id=4).price*2))\n invoice_2.total = sum(invoiceLine.amount for invoiceLine in invoice_2.invoiceLines.all())\n invoice_2.save()\n \n invoice_3 = Invoice(customer=Customer.objects.get(id=2),total=0)\n invoice_3.save()\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=5), quantity=12, amount=(Item.objects.get(id=5).price*12))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=4), quantity=2, amount=(Item.objects.get(id=4).price*2))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=1), quantity=2, amount=(Item.objects.get(id=1).price*2))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=4), quantity=1, amount=(Item.objects.get(id=4).price*1))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=3), quantity=6, amount=(Item.objects.get(id=3).price*6))\n invoice_3.total = sum(invoiceLine.amount for invoiceLine in invoice_3.invoiceLines.all())\n invoice_3.save()\n\n invoice_4 = Invoice(customer=Customer.objects.get(id=1),total=0)\n invoice_4.save()\n InvoiceLine.objects.create(invoice=invoice_4,item=Item.objects.get(id=1), quantity=6, amount=(Item.objects.get(id=1).price*6))\n invoice_4.total = sum(invoiceLine.amount for invoiceLine in invoice_4.invoiceLines.all())\n invoice_4.save()", "def test_create_subscription(self):\n pass", "def setUp(self):\n self.store = yield buildStore(self, None)\n\n @inlineCallbacks\n def doit(txn):\n for statement in splitSQLString(schemaText):\n yield txn.execSQL(statement)\n\n yield inTransaction(\n self.store.newTransaction,\n doit,\n label=\"bonus schema\"\n )\n\n def indirectedTransactionFactory(*a, **b):\n \"\"\"\n Allow tests to replace \"self.store.newTransaction\" to provide\n fixtures with extra methods on a test-by-test basis.\n \"\"\"\n return self.store.newTransaction(*a, **b)\n\n def deschema():\n @inlineCallbacks\n def deletestuff(txn):\n for stmt in dropSQL:\n yield txn.execSQL(stmt)\n return inTransaction(\n lambda *a, **b: self.store.newTransaction(*a, **b), deletestuff\n )\n self.addCleanup(deschema)\n\n self.node1 = ControllerQueue(\n reactor, indirectedTransactionFactory, useWorkerPool=False)\n self.node2 = ControllerQueue(\n reactor, indirectedTransactionFactory, useWorkerPool=False)\n\n class FireMeService(Service, object):\n def __init__(self, d):\n super(FireMeService, self).__init__()\n self.d = d\n\n def startService(self):\n self.d.callback(None)\n\n d1 = Deferred()\n d2 = Deferred()\n FireMeService(d1).setServiceParent(self.node1)\n FireMeService(d2).setServiceParent(self.node2)\n ms = MultiService()\n self.node1.setServiceParent(ms)\n self.node2.setServiceParent(ms)\n ms.startService()\n @inlineCallbacks\n def _clean():\n yield ms.stopService()\n self.flushLoggedErrors(CancelledError)\n\n self.addCleanup(_clean)\n yield gatherResults([d1, d2])\n self.store.queuer = self.node1\n\n DummyWorkItem.results = {}", "def setUp(self):\n self.app = create_app(config_class=config.TestingConfig)\n self.app.app_context().push()\n\n self.mail = mail\n\n self.db = db\n self.db.create_all()", "def test_post_foods(self):\n pass", "def setUp(self):\n self.factory = RequestFactory()", "def setUp(self):\n self.factory = RequestFactory()", "def setUp(self):\n self.factory = RequestFactory()", "def prepare(self):\n # Create a purchase order from a supplier\n Company = self.old_state.apps.get_model('company', 'company')\n PurchaseOrder = self.old_state.apps.get_model('order', 'purchaseorder')\n Part = self.old_state.apps.get_model('part', 'part')\n Supplierpart = self.old_state.apps.get_model('company', 'supplierpart')\n # TODO @matmair fix this test!!!\n # SalesOrder = self.old_state.apps.get_model('order', 'salesorder')\n\n supplier = Company.objects.create(\n name='Supplier A',\n description='A great supplier!',\n is_supplier=True,\n is_customer=True,\n )\n\n part = Part.objects.create(\n name='Bob',\n description='Can we build it?',\n assembly=True,\n salable=True,\n purchaseable=False,\n tree_id=0,\n level=0,\n lft=0,\n rght=0,\n )\n supplierpart = Supplierpart.objects.create(\n part=part,\n supplier=supplier\n )\n\n # Create some orders\n for ii in range(10):\n\n order = PurchaseOrder.objects.create(\n supplier=supplier,\n reference=f\"{ii}-abcde\",\n description=\"Just a test order\"\n )\n order.lines.create(\n part=supplierpart,\n quantity=12,\n received=1\n )\n order.lines.create(\n quantity=12,\n received=1\n )\n\n # TODO @matmair fix this test!!!\n # sales_order = SalesOrder.objects.create(\n # customer=supplier,\n # reference=f\"{ii}-xyz\",\n # description=\"A test sales order\",\n # )\n # sales_order.lines.create(\n # part=part,\n # quantity=12,\n # received=1\n # )", "def setUp(self):\n self.store = yield buildStore(self, None)\n\n def doit(txn):\n return txn.execSQL(schemaText)\n\n yield inTransaction(\n lambda: self.store.newTransaction(\"bonus schema\"), doit\n )\n\n def indirectedTransactionFactory(*a):\n \"\"\"\n Allow tests to replace \"self.store.newTransaction\" to provide\n fixtures with extra methods on a test-by-test basis.\n \"\"\"\n return self.store.newTransaction(*a)\n\n def deschema():\n @inlineCallbacks\n def deletestuff(txn):\n for stmt in dropSQL:\n yield txn.execSQL(stmt)\n return inTransaction(\n lambda *a: self.store.newTransaction(*a), deletestuff\n )\n self.addCleanup(deschema)\n\n from twisted.internet import reactor\n self.node1 = PeerConnectionPool(\n reactor, indirectedTransactionFactory, 0, schema)\n self.node2 = PeerConnectionPool(\n reactor, indirectedTransactionFactory, 0, schema)\n\n class FireMeService(Service, object):\n def __init__(self, d):\n super(FireMeService, self).__init__()\n self.d = d\n\n def startService(self):\n self.d.callback(None)\n\n d1 = Deferred()\n d2 = Deferred()\n FireMeService(d1).setServiceParent(self.node1)\n FireMeService(d2).setServiceParent(self.node2)\n ms = MultiService()\n self.node1.setServiceParent(ms)\n self.node2.setServiceParent(ms)\n ms.startService()\n self.addCleanup(ms.stopService)\n yield gatherResults([d1, d2])\n self.store.queuer = self.node1", "def setUp(self):\n self.store = Store(FilePath(self.mktemp()))", "def test_make_event_defaults_dr(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_event(\n 'delivery_report', 'abc123', delivery_status='pending')\n expected_event = TransportEvent(\n event_type='delivery_report', user_message_id='abc123',\n delivery_status='pending',\n transport_type=msg_helper.transport_type,\n transport_name=msg_helper.transport_name,\n transport_metadata={}, helper_metadata={},\n # These fields are generated in both messages, so copy them.\n event_id=event['event_id'], timestamp=event['timestamp'])\n self.assertEqual(expected_event, event)", "def test_archive_run(self):\n pass", "def test_bed(self):\n #TODO write bed tests", "def test_create_confirm_service_details(self):\n pass", "def setUp(self):\n self.client = APIClient()\n self.apple = Food.objects.create(name=\"apple\", calories=50)\n self.oatmeal = Food.objects.create(name=\"oatmeal\", calories=400)\n self.breakfast = Meal.objects.create(name=\"breakfast\")\n self.snack = Meal.objects.create(name=\"snack\")\n self.lunch = Meal.objects.create(name=\"lunch\")\n self.dinner = Meal.objects.create(name=\"dinner\")\n self.breakfast.foods.add(self.apple)", "def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ):\n self.local_user = models.User.objects.create_user(\n \"mouse@local.com\",\n \"mouse@mouse.mouse\",\n \"password\",\n local=True,\n localname=\"mouse\",\n )\n models.SiteSettings.objects.create()", "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client()\n self.app_context = self.app\n\n self.order = {\n \"order_id\":\"100\",\n \"pickup_location\":\"nakuru\",\n \"destination\":\"nairobi\",\n \"price\":\"1400\",\n \"user_id\":\"5\"\n\n }\n self.user = {\n \"user_id\":\"25\",\n \"firstname\":\"James\",\n \"lastname\":\"Martin\",\n \"username\":\"senge\",\n \"email\":\"senge@yahoo.com\",\n \"password\":\"andela\"\n }", "def setUp(self):\n storage = FileStorage()\n self.amenity = Amenity()\n self.amenity.name = 'test'\n self.amenity.save()\n self.state = State()\n self.state.name = 'California'\n self.state.save()\n self.city = City()\n self.city.name = 'San_Mateo'\n self.city.state_id = self.state.id\n self.city.save()\n self.user = User()\n self.user.first_name = 'test'\n self.user.last_name = 'test'\n self.user.email = 'test'\n self.user.password = 'test'\n self.user.save()\n self.place = Place()\n self.place.city_id = self.city.id\n self.place.user_id = self.user.id\n self.place.name = 'test_place'\n self.place.description = 'test_description'\n self.place.number_rooms = 2\n self.place.number_bathrooms = 1\n self.place.max_guest = 4\n self.place.price_by_night = 100\n self.place.latitude = 120.12\n self.place.longitude = 101.4\n self.place.save()\n self.review = Review()\n self.review.place_id = self.city.id\n self.review.user_id = self.user.id\n self.review.save()", "def setUp(self):\n super().setUp()\n create_board()\n create_repo()\n create_list()\n create_subscription()\n create_subscribed_list()\n db.session.commit()", "def retrieve_fixture():\n j = json.load(open(\"./tests/fixtures/crond_event.json\"))\n return j", "def test_peers_post(self):\n pass", "def test_client_retrieve(self):\n pass", "def test_process_data(self):\n pass", "def test_datatransformationsetups_post(self):\n pass", "def setUp(self):\n self.test_sig = Signature(agreement=self.test_agreement,\n signatory=self.test_user,\n username=self.test_user.username,\n first_name=self.test_user.first_name,\n last_name=self.test_user.last_name,\n email=self.test_user.email,\n department=self.test_department)\n self.test_sig.full_clean()\n self.test_sig.save()", "def setUp(self):\n self.tenant_root_domain = Tenant.objects.get_tenant_root_domain()\n self.site = Site.objects.create(\n name=f\"a.{self.tenant_root_domain}\",\n domain=f\"a.{self.tenant_root_domain}\")\n self.tenant = Tenant.objects.create(name=\"A\", site=self.site)\n self.domain = Domain.objects.create(domain=\"a.com\", tenant=self.tenant)\n\n self.other_site = Site.objects.create(\n name=f\"other.{self.tenant_root_domain}\",\n domain=f\"other.{self.tenant_root_domain}\"\n )\n self.other_tenant = Tenant.objects.create(\n name=\"Other\", site=self.other_site)\n self.other_domain = Domain.objects.create(\n domain=\"other.com\", tenant=self.other_tenant)\n\n self.marketing_page = Site.objects.create(\n name=\"Marketingpage\", domain=\"landingpage.com\")\n\n self.site_not_linked = Site.objects.create(\n name=f\"notlinked.{self.tenant_root_domain}\",\n domain=f\"notlinked.{self.tenant_root_domain}\")\n\n self.home_url = reverse(\"home\")\n self.secret_url = reverse(\"tenants:dashboard\")", "def setUp(self):\n self.client = Client()\n\n self.user = User.objects.get_or_create(\n username='max', first_name=\"Max\", last_name=\"Mustermann\")[0]\n self.someone = User.objects.get_or_create(\n username='peter', first_name=\"Peter\", last_name=\"Müller\")[0]\n\n self.client.force_login(self.user)\n self.facility = 'g'\n\n self.current_week = BookingPeriod().weeks[0]\n self.first_day = self.current_week.start\n\n now = datetime.now()\n self.today = datetime(\n year=now.year, month=now.month, day=now.day, hour=8)", "def test_client_verification_create(self):\n pass", "def test_create_subscription_template(self):\n pass", "def setUp(self):\n\n self.ach_model = FundingSources.get_ach_model()\n self.verify = self.get_basic_verification()", "def setUp(self):\n self.setup_beets()", "def setUp(self):\n APP.config.from_object(CONFIGS['testing_config'])\n self.api = APP\n self.api_context = self.api.app_context()\n self.api_context.push()\n self.api_test_client = APP.test_client()\n\n # Base url common to all endpoints\n self.BASE_URL = '/api/v1'\n # Sample data for POST requests\n self.ORDER = {\n 'item_name': 'Big Samosa',\n 'item_price': 200,\n 'quantity': 1\n }\n\n self.ORDER_2 = {\n 'item_name': 'Pork Ribs',\n 'item_price': 1080,\n 'quantity': 1\n }", "def setUp(self):\n UsuarioFactory.create()\n self.user = Usuario.objects.get(username='admin')\n ProyectoFactory.lider_proyecto = self.user\n ProyectoFactory.create()\n FaseFactory.proyecto = Proyecto.objects.get(nombre='Proyecto01')\n FaseFactory.create()\n TipoItemFactory.fase = Fase.objects.get(nombre='Fase01')\n TipoItemFactory.create()\n self.factory = RequestFactory()", "def setUp(self):\n self.portal = self.layer['portal']\n self.workflow = api.portal.get_tool('portal_workflow')\n\n # in tests we have to manually map content types to workflows\n self.workflow.setChainForPortalTypes(\n ['eestec.portal.event'],\n 'event_workflow'\n )\n\n # add test item\n self.lc = api.content.create(\n type='eestec.portal.lc',\n title=u'lc',\n container=self.portal,\n )\n\n self.event = api.content.create(\n type='eestec.portal.event',\n title=u'Tést event',\n container=self.lc\n )\n\n self.event = self.portal.lc['test-event']", "def tearDown(self):\n for name in ('Config', 'FlatPage'):\n model = getattr(shavida.models, name)\n model.objects.all().delete()\n\n # def test_is_first_refill_with_member_who_never_refilled(self):\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='roddy@red.com', postpaid_plan=None,\n # storage_provider='CVB')\n # self.assertTrue(customer.is_first_refill)\n\n # def test_is_first_refill_with_member_who_refilled_once(self):\n # prepaid_plan = PrepaidPlan(id=2, name='plan1', cost=5000)\n # storage = Storage(name='storage', size=32000, size_label=32000, type=Storage.FLASH_DISK)\n # prepaid_plan.save()\n # storage.save()\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='roddy@red.com', postpaid_plan=None, prepaid_plan=prepaid_plan,\n # storage_provider='CVB', storage_status=Storage.ACQUIRING)\n # when = datetime.now() - timedelta(days=12)\n # latest_prepayment = RetailPrepayment(member=customer, when=when, amount=5000, storage=storage, duration=30, balance=20000)\n # latest_prepayment.save()\n # self.assertFalse(customer.can_order_adult)\n\n # def test_can_order_adult_with_member_having_prepaid_plan_and_max_orders_reached(self):\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='roddy@red.com', postpaid_plan=None,\n # storage_provider='CVB' )\n # latest_prepayment = RetailPrepayment(member=customer, amount=5000, duration=30, balance=20000)\n # latest_prepayment.save()\n # for i in range(4):\n # order = CVBOrder(member=customer, cost=5000, status=CVBOrder.PENDING,\n # storage_amount=0, movies_amount=0, delivery_amount=0,copy_amount=0)\n # order.save()\n # self.assertTrue(customer.can_order_adult)\n #\n # def test_can_order_adult_with_member_having_prepaid_plan_and_max_orders_not_reached(self):\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='roddy@red.com', postpaid_plan=None,\n # storage_provider='CVB')\n # latest_prepayment = RetailPrepayment(member=customer, amount=5000, duration=30, balance=20000)\n # latest_prepayment.save()\n # for i in range(2):\n # order = CVBOrder(member=customer, cost=5000, status=CVBOrder.PENDING,\n # storage_amount=0, movies_amount=0, delivery_amount=0,copy_amount=0)\n # order.save()\n # self.assertFalse(customer.can_order_adult)\n #\n # def test_can_order_adult_with_member_having_prepaid_plan_and_prepayment_expired(self):\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='roddy@red.com', postpaid_plan=None,\n # storage_provider='CVB')\n # when = datetime.now() - timedelta(days=40)\n # latest_prepayment = RetailPrepayment(member=customer, when=when, amount=5000, duration=30, balance=20000)\n # latest_prepayment.save()\n # self.assertTrue(customer.can_order_adult)\n #\n # def test_can_order_adult_with_member_having_prepaid_plan_and_prepayment_not_expired(self):\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='roddy@red.com', postpaid_plan=None,\n # storage_provider='CVB')\n # when = datetime.now() - timedelta(days=12)\n # latest_prepayment = RetailPrepayment(member=customer, when=when, amount=5000, duration=30, balance=20000)\n # latest_prepayment.save()\n # self.assertFalse(customer.can_order_adult)", "def setUp(self):\n self.dummyFile = File('/tmp/dummyfile', 9999, 0, 0, 0)\n self.dummySet = set()\n self.dummySet.add(self.dummyFile)\n self.dummyFileSet = Fileset(name='SubscriptionTestFileset',\n files=self.dummySet)\n self.dummyWorkFlow = Workflow()\n self.dummySubscription = Subscription(fileset=self.dummyFileSet,\n workflow=self.dummyWorkFlow)\n return", "def test_download1(self):\n pass", "def setUp(self):\n self.client = Client()\n self.user = User.objects.create_user('testuser', 'test@user.com', 'q2w3E$R%')\n self.meter = Meter.objects.create(meter_name='testmeter', meter_unit='X')\n self.meter.save()\n reading = Reading.objects.create(meter=self.meter,\n reading=100,\n date=datetime.strptime('2001-01-01', '%Y-%m-%d').date(),\n remark='test reading')\n reading.save()\n usage = Usage.objects.create(month=1,\n year=2018,\n meter=self.meter,\n usage=1234)\n usage.save()", "def setUp(self) -> None:\n sqlite_db = setup_sqlite_in_memory_db()\n sqlite_db.create_tables([\n Schedule,\n Destination\n ])\n self.schedule_factory = ScheduleFactory()", "def test_transportzone_create(self):\n self.assertTrue(True)", "def test_order_fulfilled(client, mocker, application, bootcamp_run, user, has_paid):\n payment = 123\n order = create_test_order(application, payment, fulfilled=False)\n order.application = BootcampApplicationFactory.create(\n bootcamp_run=bootcamp_run, user=user, state=AppStates.AWAITING_PAYMENT.value\n )\n order.save()\n data_before = order.to_dict()\n\n data = {}\n for _ in range(5):\n data[FAKE.text()] = FAKE.text()\n\n data[\"req_reference_number\"] = make_reference_id(order)\n data[\"decision\"] = \"ACCEPT\"\n mocker.patch(\n \"ecommerce.views.IsSignedByCyberSource.has_permission\", return_value=True\n )\n send_email = mocker.patch(\"ecommerce.api.MailgunClient.send_individual_email\")\n mock_tasks = mocker.patch(\"ecommerce.api.tasks\")\n paid_in_full_mock = mocker.patch(\n \"applications.models.BootcampApplication.is_paid_in_full\",\n new_callable=PropertyMock,\n )\n paid_in_full_mock.return_value = has_paid\n\n resp = client.post(reverse(\"order-fulfillment\"), data=data)\n\n assert len(resp.content) == 0\n assert resp.status_code == statuses.HTTP_200_OK\n order.refresh_from_db()\n assert order.status == Order.FULFILLED\n assert order.receipt_set.count() == 1\n assert order.receipt_set.first().data == data\n\n assert send_email.call_count == 0\n assert OrderAudit.objects.count() == 2\n order_audit = OrderAudit.objects.last()\n assert order_audit.order == order\n assert order_audit.data_before == data_before\n assert order_audit.data_after == order.to_dict()\n\n order.application.refresh_from_db()\n assert order.application.state == (\n AppStates.COMPLETE.value if has_paid else AppStates.AWAITING_PAYMENT.value\n )\n assert (\n BootcampRunEnrollment.objects.filter(\n bootcamp_run=order.application.bootcamp_run, user=order.application.user\n ).exists()\n is has_paid\n )\n\n mock_tasks.send_receipt_email.delay.assert_called_once_with(order.application.id)", "def setUp(self):\r\n self.client = Client()\r\n self.ping_url = reverse('status.service.celery.ping')", "def setUp(self):\n signals.post_save.disconnect(create_notification_task, sender=Notification)\n signals.post_delete.disconnect(revoke_notification_task, sender=Notification)\n\n user = CustomUser.objects.create(id=100, email='testuser@mail.com', is_active=True)\n user.set_password('testpassword')\n user.save()\n\n self.client = Client()\n self.client.login(email='testuser@mail.com', password='testpassword')\n\n way_first = Way.objects.create(id=100, user=user)\n way_second = Way.objects.create(id=101, user=user)\n\n Notification.objects.create(\n id=100,\n way=way_first,\n start_time=datetime.date(2019, 10, 29),\n end_time=datetime.date(2019, 12, 29),\n week_day=6,\n time=datetime.time(23, 58, 59)\n )\n\n Notification.objects.create(\n id=101,\n way=way_first,\n start_time=datetime.date(2019, 11, 27),\n end_time=datetime.date(2020, 12, 27),\n week_day=1,\n time=datetime.time(1, 12, 38)\n )\n\n Notification.objects.create(\n id=102,\n way=way_second,\n start_time=datetime.date(2019, 3, 11),\n end_time=datetime.date(2019, 7, 31),\n week_day=2,\n time=datetime.time(11, 28, 25)\n )\n\n self.notification = Notification.objects.get(id=100)\n self.client = Client()\n self.client.login(email='testuser@mail.com', password='testpassword')", "def test_create_from_pear(self):\n pass", "def test_06_test_via_endpoint(self):\n\n # set up all the bits we need\n dataset = []\n for i in range(10):\n data = ArticleFixtureFactory.make_incoming_api_article(doi=\"10.123/test/\" + str(i),\n fulltext=\"http://example.com/\" + str(i))\n dataset.append(data)\n\n # create the main account we're going to work as\n article_owner = models.Account()\n article_owner.set_id(\"test\")\n article_owner.set_name(\"Tester\")\n article_owner.set_email(\"test@test.com\")\n article_owner.generate_api_key()\n article_owner.add_role('publisher')\n article_owner.add_role('api')\n article_owner.save(blocking=True)\n\n # Add another user who doesn't own these articles\n somebody_else = models.Account()\n somebody_else.set_id(\"somebody_else\")\n somebody_else.set_name(\"Somebody Else\")\n somebody_else.set_email(\"somebodyelse@test.com\")\n somebody_else.generate_api_key()\n somebody_else.add_role('publisher')\n somebody_else.add_role('api')\n somebody_else.save(blocking=True)\n\n assert article_owner.api_key != somebody_else.api_key\n\n # add a journal to the article owner account to create that link between account and articles\n journal = models.Journal(**JournalFixtureFactory.make_journal_source(in_doaj=True))\n journal.set_owner(article_owner.id)\n journal.save(blocking=True)\n\n with self.app_test.test_request_context():\n with self.app_test.test_client() as t_client:\n\n # Bulk create\n # The wrong owner can't create articles\n resp = t_client.post(url_for('api_v3.bulk_article_create', api_key=somebody_else.api_key),\n data=json.dumps(dataset))\n assert resp.status_code == 400, resp.status_code\n\n # Bulk create\n # redirected from v1\n # resp = t_client.post(url_for('api_v1.bulk_article_create', api_key=somebody_else.api_key),\n # data=json.dumps(dataset))\n # assert resp.status_code == 301, resp.status_code\n\n # But the correct owner can create articles\n resp = t_client.post(url_for('api_v3.bulk_article_create', api_key=article_owner.api_key),\n data=json.dumps(dataset))\n assert resp.status_code == 201\n reply = json.loads(resp.data.decode(\"utf-8\"))\n assert len(reply) == len(dataset)\n first_art = reply.pop()\n assert first_art['status'] == 'created'\n # Check we actually created new records\n time.sleep(1)\n assert len(models.Article.all()) == len(dataset)\n\n # Bulk delete\n all_but_one = [new_art['id'] for new_art in reply]\n resp = t_client.delete(url_for('api_v3.bulk_article_delete', api_key=article_owner.api_key),\n data=json.dumps(all_but_one))\n assert resp.status_code == 204\n time.sleep(1)\n # we should have deleted all but one of the articles.\n assert len(models.Article.all()) == 1\n # And our other user isn't allowed to delete the remaining one.\n resp = t_client.delete(url_for('api_v3.bulk_article_delete', api_key=somebody_else.api_key),\n data=json.dumps([first_art['id']]))\n assert resp.status_code == 400", "def test_advertiser_recipient(self):\n self.prep_advertiser()\n UnqualifiedConsumerEmailTask().run(test_mode=self.consumer)\n self.common_asserts()" ]
[ "0.71651965", "0.68431896", "0.6447273", "0.6385813", "0.6308756", "0.6252802", "0.62355095", "0.62030375", "0.61696184", "0.6162334", "0.61370283", "0.6062642", "0.6026275", "0.60202163", "0.60082394", "0.6001781", "0.6001781", "0.6001781", "0.5990583", "0.59405696", "0.59339184", "0.5930063", "0.5917501", "0.5894709", "0.58668435", "0.584991", "0.58490497", "0.58445495", "0.58304745", "0.5809943", "0.580766", "0.57880104", "0.5781204", "0.5758196", "0.5756376", "0.57528275", "0.5745092", "0.57235426", "0.5712831", "0.5701564", "0.56999075", "0.5697122", "0.5695387", "0.5694053", "0.56772065", "0.56757265", "0.5672983", "0.56724995", "0.5668554", "0.5666577", "0.565597", "0.5651629", "0.56488496", "0.5633151", "0.5632509", "0.5629188", "0.56179166", "0.56087685", "0.5606163", "0.5604561", "0.5604561", "0.5604561", "0.55974984", "0.5597314", "0.55965275", "0.5583981", "0.557943", "0.5576544", "0.55744684", "0.5571771", "0.5570298", "0.5554009", "0.55517906", "0.5551027", "0.55487067", "0.5548493", "0.55462605", "0.554574", "0.55416965", "0.5540341", "0.55384713", "0.5537247", "0.553333", "0.5531505", "0.5531009", "0.55262554", "0.55261374", "0.55147433", "0.55144733", "0.55114764", "0.5508298", "0.55073357", "0.5505876", "0.5503439", "0.550292", "0.5502757", "0.54997313", "0.54987174", "0.5497954", "0.54974586", "0.54973996" ]
0.0
-1
Hall fixture for future tests
def hall_only(restaurant_only): hall = Hall(restaurant_only, max=50) return hall
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixtures():", "def setUpFixture(self):\n pass", "def _fixture_setup(self):\n pass", "def fixture_example_data():\n import_example_data()", "def start_fixture(self):\n pass", "def tearDownFixture(self):\n pass", "def data_manager_fixture():\n\n class DataManager:\n def __init__(self):\n self.gen = 1000\n self.cfg = get_cfg_defaults()\n mode = \"test_inference\"\n self.dataset = Dataset(None, self.cfg, mode)\n self.auto_anchors = AutoAnchors(self.dataset, self.cfg.model, self.gen)\n self.k_points = torch.ones((12, 2)) * 2.0\n self.wh = torch.ones((1000, 2)) * 2.0\n\n return DataManager()", "def test_fixture(request):\n def finalizer():\n teardown()\n request.addfinalizer(finalizer)\n setup()", "def fixture_andy():\n yield Person(name=\"Andy\", age=12, hobbies=[\"Star Wars\", \"Bicycles\"])", "def fixture_pandy():\n yield Person(name=\"Pandy\", age=12, hobbies=[\"Fortnite\"])", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n super(PlayTests, self).setUp(\n \"tests/data/shakespeare/\", \"structure.json\", \"brief_example.xml\")", "def test_alien_data(self):", "def test_bed(self):\n #TODO write bed tests", "def setUp(self):\n self.fixtureFile = r\"v:\\workspace\\FileHandling\\src\\test-read-write.txt\"\n self.fixtureList = [\"my\", \"written\", \"text\"]\n self.fixtureListEmptyStrings = [\"my\", \"\", \"\", \"written\", \"text\"]\n self.fixtureListTrailingEmptyString = [\"my\", \"written\", \"text\", \"\", \"\"]", "def test_create_run(self):\n pass", "def populate_fixtures():\n languages()\n words()", "def setUp(self):\n self.setup_beets()", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def fixture_candy():\n yield Person(name=\"Candy\", age=13, hobbies=[\"Gardening\"])", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n cls.post = PostFactory()", "def setUp(self):\n self.fixture_file = r\"v:\\workspace\\FileHandling\\src\\test-read-write.txt\"\n self.fixture_list = [\"my\", \"written\", \"text\"]\n self.fixture_list_empty_strings = [\"my\", \"\", \"\", \"written\", \"text\"]\n self.fixture_list_trailing_empty_strings = [\"my\", \"written\", \"text\", \"\", \"\"]", "def setUp(self):\n self", "def setUp(self):\n self", "def orlov_fixture(request, workspace, minicap):\n logger.info('Orlov Fixture : setup minicap service and other.')\n request.cls.workspace = workspace\n request.cls.minicap = minicap\n request.cls.evidence_dir = request.cls.workspace.mkdir('tmp\\\\evidence')\n request.cls.video_dir = request.cls.workspace.mkdir('tmp\\\\video')\n yield\n logger.info('Olorv Fixture : teardown minicap service and other.')", "def fixture_runner():\n return CliRunner()", "def setUp(self):\n self.epath = 'flyeye/tests/fixtures'\n self.dpath = join(self.epath, 'disc.silhouette')", "def setUp(self):\n self.data = DatabaseIntermediary()", "def test_generate_all_testing(self):\n pass", "def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)", "def setUp(self):\n self.fixtures_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"fixtures/\"\n )", "def tests():", "def setup_class(self):\n self.data_type = 'pytest'", "def setUp(self) :\n pass", "def setUp(self):\n self.tmp = TemporaryDirectory()", "def setUp(self):\r\n\r\n \r\n self.client = app.test_client()\r\n # Show Flask errors that happen during tests\r\n app.config['TESTING'] = True\r\n \r\n connect_to_db(server.app)\r\n db.create_all()\r\n test_seed.create_test_data()", "def test_xfail_fixture(broken_fixture):\n pass", "def test_let(self):", "def setUpTestData(cls):\n # volunteer user\n common.initialize_empty_volunteer()", "def test_0():\n sync.gen_multi_fake_data()#default is only one randomly selected data set\n sync.main(testing=True)", "def fixture_input_block():\n return Mock()", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass" ]
[ "0.8068017", "0.78472394", "0.7669949", "0.725615", "0.7145169", "0.70357", "0.70156425", "0.6989071", "0.678195", "0.67077214", "0.6646608", "0.6646608", "0.6646608", "0.6646608", "0.6646608", "0.6646608", "0.6646608", "0.6646608", "0.6646608", "0.6626644", "0.662342", "0.65937614", "0.6580239", "0.65557355", "0.6532227", "0.65278465", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65171695", "0.65099525", "0.64981246", "0.64981246", "0.64981246", "0.64981246", "0.64936316", "0.64888835", "0.6474406", "0.6474406", "0.64691925", "0.64354", "0.64182264", "0.64113456", "0.6402909", "0.63730294", "0.636636", "0.6347356", "0.63456506", "0.6336366", "0.6325282", "0.63230914", "0.63213664", "0.6320287", "0.632013", "0.63192064", "0.63188255", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874", "0.63179874" ]
0.0
-1
Simple restaurant instance creating test
def test_simple_restaurant(restaurant_only): assert restaurant_only
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n valid_name = \"Tungalo\"\n valid_location = \"Rivne\"\n valid_status = 0\n valid_tables_count = 10\n valid_description = \"description\"\n\n self.restaurant = Restaurant()\n self.restaurant.name = valid_name\n self.restaurant.location = valid_location\n self.restaurant.status = valid_status\n self.restaurant.tables_count = valid_tables_count\n self.restaurant.description = valid_description", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.food = cuisine_type", "def test_create(self):\n retreat = Retreat.objects.create(\n name=\"random_retreat\",\n details=\"This is a description of the retreat.\",\n seats=40,\n address_line1=\"123 random street\",\n postal_code=\"123 456\",\n state_province=\"Random state\",\n country=\"Random country\",\n timezone=\"America/Montreal\",\n price=3,\n start_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 8)),\n end_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 17, 12)),\n min_day_refund=7,\n min_day_exchange=7,\n refund_rate=100,\n is_active=True,\n accessibility=True,\n form_url=\"example.com\",\n carpool_url='example2.com',\n review_url='example3.com',\n has_shared_rooms=True,\n room_type=Retreat.DOUBLE_OCCUPATION,\n toilet_gendered=True,\n )\n\n self.assertEqual(retreat.__str__(), \"random_retreat\")", "def test_create_restaurant_with_all_fields(self):\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n name = 'Restaurant Chinois'\n street = '999 Sutter St'\n suite = '510'\n city = 'Wood-Ridge'\n state = 'NJ'\n zip_code = '07075'\n phone_num = '201-555-7777'\n website = 'www.chinois-nj.com'\n email = 'chinois-nj@gmail.com'\n date_established = '2014'\n creator = 'some-user@gmail.com'\n info = {'name': name, 'street': street, 'suite': suite,\n 'city': city, 'state': state, 'zip_code': zip_code,\n 'phone_num': phone_num, 'website': website, 'email': email,\n 'date_established': date_established, 'creator': creator\n }\n resp = self.test_client.post(self.API_BASE + '/create', headers=headers, data=json.dumps(info))\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['id'], 1)\n self.assertTrue(name in resp_dict['message'])\n\n # Check that all restaurant fields are returned.\n self.assertTrue('restaurant' in resp_dict)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['name'], name)\n self.assertEqual(resp_dict['restaurant']['street'], street)\n self.assertEqual(resp_dict['restaurant']['suite'], suite)\n self.assertEqual(resp_dict['restaurant']['city'], city)\n self.assertEqual(resp_dict['restaurant']['state'], state)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code)\n self.assertEqual(resp_dict['restaurant']['phone_num'], phone_num)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['date_established'], date_established)\n self.assertEqual(resp_dict['restaurant']['creator'], creator)\n\n # -----------------------------\n # Make a separate request to retrieve the restaurant and assert that all fields are as created\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['name'], name)\n self.assertEqual(resp_dict['restaurant']['street'], street)\n self.assertEqual(resp_dict['restaurant']['suite'], suite)\n self.assertEqual(resp_dict['restaurant']['city'], city)\n self.assertEqual(resp_dict['restaurant']['state'], state)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code)\n self.assertEqual(resp_dict['restaurant']['phone_num'], phone_num)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['date_established'], date_established)\n self.assertEqual(resp_dict['restaurant']['creator'], creator)", "def test_get_restaurant_by_id(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com'))\n db.session.commit()\n\n # Since this is a freshly created table, the first id should be 1\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['name'], name)", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.type = cuisine_type", "def setUp(self):\n self.client = APIClient()\n self.apple = Food.objects.create(name=\"apple\", calories=50)\n self.oatmeal = Food.objects.create(name=\"oatmeal\", calories=400)\n self.breakfast = Meal.objects.create(name=\"breakfast\")\n self.snack = Meal.objects.create(name=\"snack\")\n self.lunch = Meal.objects.create(name=\"lunch\")\n self.dinner = Meal.objects.create(name=\"dinner\")\n self.breakfast.foods.add(self.apple)", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.name = restaurant_name\n\t\tself.type = cuisine_type", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def __init__(self, restaurant_name, cuisine_type):\r\n\t\tself.restaurant_name = restaurant_name\r\n\t\tself.cuisine_type = cuisine_type", "def test_get_restaurants(self):\n address = {'number': '375',\n 'street': 'Noe St',\n 'city': 'San Francisco',\n 'zip': '94114'}\n\n with self.app.app_context():\n restaurants = ordrin.get_restaurants(address)\n\n # Ordr.in returns a test entry as the first item in the list when\n # when hitting their testing servers.\n entry = restaurants[0]\n self.assertEquals(entry['na'], 'Test Merchant 20130315')\n self.assertEquals(entry['id'], 23917)", "def test_full_restaurant(restaurant_full):\n assert restaurant_full", "def test_foodtrucks_create(self):\n\t\tprint 'API Test: create a new foodtruck'\n\t\turl = reverse('foodtruck_list')\n\t\tdata = {\"status\" : \"APPROVED\",\\\n\t\t \"expirationdate\" : \"2015-03-15T00:00:00\",\\\n\t\t \"permit\" : \"14MFF-0107\",\\\n\t\t \"block\" : \"3794\",\\\n\t\t \"received\" : \"Jun 24 2014 1:49PM\",\\\n\t\t \"facilitytype\" : \"Truck\",\\\n\t\t \"blocklot\" : \"3794002A\",\\\n\t\t \"locationdescription\" : \"02ND ST: TOWNSEND ST to KING ST (700 - 799)\",\\\n\t\t \"cnn\" : 148000,\\\n\t\t \"priorpermit\" : 1,\\\n\t\t \"approved\" : \"2014-06-24T13:55:30\",\\\n\t\t \"noisent\" : \"2013-07-25T00:00:00\",\\\n\t\t \"schedule\" : \"http://bsm.sfdpw.org/PermitsTracker/reports/report.aspx?title=schedule&report=rptSchedule&params=permit=14MFF-0107&ExportPDF=1&Filename=14MFF-0107_schedule.pdf\",\\\n\t\t \"address\" : \"750 02ND ST\",\\\n\t\t \"applicant\" : \"Steve's Mobile Deli\",\\\n\t\t \"lot\" : \"002A\",\\\n\t\t \"fooditems\" : \"Cold Truck: Pre-packaged sandwiches: Burgers: Hot Dogs: Muffin Sandwiches: Enchiladas: Bagels: Burritos: Salads: Snacks: Beverages\",\\\n\t\t \"longitude\" : -122.402978526686,\\\n\t\t \"latitude\" : 37.7302216813049, \\\n\t\t \"y\" : 2093947.369,\\\n\t\t \"x\" : 6011371.493,\\\n\t\t \"objectid\" : 554527}\n\t\t\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\t\n\t\tquant = '1.000000'\n\t\tfor k, v in data.iteritems():\n\t\t\tif v is not None and (k is \"y\" or k is \"x\" or k is \"latitude\" or k is \"longitude\"):\n\t\t\t\tself.assertEqual(response.data[k].quantize(Decimal(quant)), Decimal(v).quantize(Decimal(quant)))\n\t\t\telif v is not None and (k is \"approved\" or k is \"received\" or k is \"expirationdate\" or k is \"noisent\"):\n\t\t\t\tself.assertEqual(response.data[k], parse(v))\n\t\t\telse:\n\t\t\t\tself.assertEqual(response.data[k], v)\n\t\t\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\t\tfor k, v in data.iteritems():\n\t\t\tif v is not None and (k is \"y\" or k is \"x\" or k is \"latitude\" or k is \"longitude\"):\n\t\t\t\tself.assertEqual(response.data[0][k].quantize(Decimal(quant)), Decimal(v).quantize(Decimal(quant)))\n\t\t\telif v is not None and (k is \"approved\" or k is \"received\" or k is \"expirationdate\" or k is \"noisent\"):\n\t\t\t\tself.assertEqual(response.data[0][k], parse(v))\n\t\t\telse:\n\t\t\t\tself.assertEqual(response.data[0][k], v)\n\t\tprint 'pass'", "def __init__(self, restaurant_name,cuisine_type):\r\n self.restaurant = restaurant_name\r\n self.cuisine = cuisine_type", "def test_get_food(self):\n pass", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.restaurant_name = restaurant_name.title()\n\t\tself.cuisine_type = cuisine_type", "def test_basic_instance_creation(self):\n first = self.constituencies[0]\n self.assertEqual(first.slug, 'my-place')\n self.assertEqual(first.get_absolute_url(),\n u\"/constituency/%s/\" % first.slug)\n count = 0\n for user in self.users:\n self.assertEqual(user.postcode, USERS[count]['postcode'])\n count += 1", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.type = cuisine_type\n self.number_served = 0", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.type = cuisine_type\n self.number_served = 0", "def test_get_two_restaurants(self):\n from espresso import db\n from espresso import Restaurant\n\n name_1 = 'Restaurant Italiano'\n db.session.add(Restaurant(name=name_1, creator='test-user@gmail.com'))\n name_2 = 'Restaurant Français'\n db.session.add(Restaurant(name=name_2, creator='test-user@gmail.com'))\n db.session.commit()\n\n resp = self.test_client.get(self.API_BASE, headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(len(resp_dict['restaurants']), 2)\n self.assertEqual(resp_dict['restaurants'][0]['name'], name_1)\n self.assertEqual(resp_dict['restaurants'][1]['name'], name_2)", "def test_create_restaurant_no_creator(self):\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n info = {'name': 'Ping Yan', 'city': 'Chicago'}\n resp = self.test_client.post(self.API_BASE + '/create', headers=headers, data=json.dumps(info))\n self.assertEqual(resp.status_code, 400)", "def main():\n\n restaurant = Restaurant(\n name=\"Table By Basant\",\n description=\"North India, Italian, Chinese\",\n address=\"Redwood Shores\",\n operating_hours=\"11AM to 11PM\",\n ratings=4.8,\n menu=Menu(\n title=\"Delights\",\n num_of_dishes=5,\n dishes=[\n Dish(name=\"Gajrela\", price=300, discount=0.50, description=\"indian sweet\", ratings=4.5),\n Dish(name=\"Noodles\", price=200, discount=0.20, description=\"chinese\", ratings=4.3),\n Dish(name=\"Burger\", price=100, discount=0.40, description=\"western\", ratings=5.0),\n Dish(name=\"Pizza\", price=500, discount=0.10, description=\"italian\", ratings=4.8),\n Dish(name=\"Samosa\", price=30, discount=0.0, description=\"indian snacks\", ratings=5.0)\n ]\n )\n\n )\n\n restaurant.show_restaurant()", "def test_create_restaurant_with_token(self):\n url = '/api/places/'\n client = APIClient()\n\n client.credentials(HTTP_AUTHORIZATION=self.test_user1_token)\n response = client.post(url, self.restaurant_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_create_restaurant_no_name(self):\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n info = {'creator': 'nobody@gmail.com', 'city': 'Chicago'}\n resp = self.test_client.post(self.API_BASE + '/create', headers=headers, data=json.dumps(info))\n self.assertEqual(resp.status_code, 400)", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.restaurant_name = restaurant_name\n\t\tself.cuisine_type = cuisine_type\n\t\tself.number_served = 0", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.restaurant_name = restaurant_name\n\t\tself.cuisine_type = cuisine_type\n\t\tself.number_served = 0", "def test_create_hotel(self):\n amsterdam = City.objects.get(name=\"Amsterdam\")\n ibis = Hotel.objects.get(name=\"Ibis\")\n\n self.assertEqual(ibis.city, amsterdam)\n self.assertEqual(ibis.code, \"AMS01\")\n self.assertEqual(ibis.name, \"Ibis\")", "def __init__(self, restaurant_name, cuisine_type, number_served=0):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.number_served = number_served", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.number_served = 0", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.number_served = 0", "def test_delete_restaurant(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com'))\n db.session.commit()\n\n # Since this is a freshly created table, the first id should be 1\n resp = self.test_client.delete(self.API_BASE + '/1', headers=auth_header_all_permissions)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['success'], True)\n\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_all_permissions)\n self.assertEqual(resp.status_code, 404)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['success'], False)", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.number_served = 0", "def test_add_to_fav_(self):\n result = self.client.post(\"/add_to_fav\", data={\"yelp_biz_id\":\"JA_V9TqDCrkgknqrcUndIQ\", \n \"yelp_rest_name\":\"Siam\", \"yelp_rating\":\"4\", \n \"yelp_category\":\"Thai\", \"yelp_price\":\"$$\", \n \"yelp_image_url\":\"https://s3-media2.fl.yelpcdn.com/bphoto/1SkZwZrRZkQSzRMn_Trs3w/o.jpg\" })\n\n DB_result = Restaurant_details.query.filter_by(biz_id = \"JA_V9TqDCrkgknqrcUndIQ\").first()\n self.assertIsNotNone(DB_result) #testing that the returned result is not NONE\n self.assertEqual(DB_result.restaurant_name, 'Siam') #testing restaurant name is what it should be\n \n self.assertIn(b\"Your Favourite has been saved\", result.data)", "def test_create(self):\n pass", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.numbers_served = 0", "def describe_restaurant(self):\n print(f\"{self.restaurant_name} is a new restaurant opening on Main Street!\")\n print(f\"The restaurant specializes in {self.cuisine_type}.\")", "def test_creating_recipe_with_ingredients(self):\n ingredient1 = sample_ingredients(user=self.user, name='Prawns')\n ingredient2 = sample_ingredients(user=self.user, name='Garlic')\n\n payload = {\n 'title': 'Avocado lime cheesecake',\n 'time_minutes': 20,\n 'price': 500.00,\n 'currency': 'NGN',\n 'ingredients': [ingredient1.id, ingredient2.id]\n }\n self.evaluate_recipe(ingredient1, ingredient2, payload, 'ingredient')", "def describe_restaurant(self):\n print(f\"{self.restaurant_name} is a new restaurant opening on Main Street!\")\n print(f\"The restaurant specializes in {self.cuisine_type}-style food.\")", "def test_create_new_recipe(self):\n payload = {\n 'title': 'Cheescake',\n 'time_taken': 35,\n 'price': 5\n }\n\n res = self.client.post(RECIPE_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n for key in payload.keys():\n self.assertEqual((payload)[key], getattr(recipe, key))\n\n # recipe = get_sample_recipe(self.sample_user)\n # db_recipe =\n\n # self.assertEqual(recipe.title, )", "def test_create_a_recommendation(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=2, relationship=Type.UP_SELL)\n self.assertTrue(recommendation != None)\n self.assertEquals(recommendation.relationship, Type.UP_SELL)\n self.assertEquals(recommendation.product_id, 1)\n self.assertEquals(recommendation.recommendation_product_id, 2)", "def __init__(self, nome, tipo):\n self.restaurant_name = nome\n self.cuisine_type = tipo", "def test_user_get_restaurants_list(self):\n response = self.client.get('/api/places/', format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create_pizza(self):\n url = reverse('pizzas-list')\n data = {'name': 'Quattro Formaggio'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Pizza.objects.count(), 1)\n self.assertEqual(Pizza.objects.get().name, 'Quattro Formaggio')", "def test_get_restaurant_by_id_none(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com'))\n db.session.commit()\n\n # Since this is a freshly created table, the only id should be 1.\n # id 2 does not exist.\n resp = self.test_client.get(self.API_BASE + '/2', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 404)", "def test_gear_create(self):\n gear_cam = Gear.objects.get(name='Cam')\n gear_nut = Gear.objects.get(name='Nut')\n self.assertEquals(\n gear_cam,\n Gear(id=1, name='Cam', desc='A cam', brand='OnlyCams', weight_grams='1',\n length_mm='1', width_mm='1', depth_mm='1', locking=False, owner=None)\n )\n self.assertEquals(\n gear_nut,\n Gear(id=2, name='Nut', desc='A Nut', brand='OnlyNuts', weight_grams='1',\n length_mm='1', width_mm='1', depth_mm='1', locking=False, owner=None)\n )", "def Restaurant_get_info() -> Restaurant:\r\n name = input(\"Please enter the restaurant's name: \")\r\n cuisine = input(\"Please enter the kind of food served: \")\r\n phone = input(\"Please enter the phone number: \")\r\n menu = menu_enter()\r\n return Restaurant(name, cuisine, phone, menu)", "def test_create_recipe_with_ingredient(self):\n ingredient1 = sample_ingredient(user=self.user, name='Prawns')\n ingrident2 = sample_ingredient(user=self.user, name ='Ginger')\n\n payload = {\n 'title': 'Thai prawn and curry',\n 'ingredient': [ingredient1.id,ingrident2.id],\n 'time_minuts':60,\n 'price': 250\n }\n res = self.client.post(RECIPE_URL,payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredient.all()\n self.assertEqual(ingredients.count(),2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingrident2,ingredients)", "def test_shoppingitems_creation(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', 'maina@gmail.com')\n # make a post request to add an item\n res = self.app.post(\n '/shoppingitems/Easter', data={'item-name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.add_item(\n 'Easter', 'Bread', 'maina@gmail.com')\n self.assertIsInstance(response, list)\n # check if item was successfully created\n self.assertIn(\"Bread\", str(res.data))", "def test_create_basic_recipe(self):\n\n payload = {'name': 'Focaccia', 'description': 'Detailed description'}\n\n res = self.client.post(RECIPES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n\n self.assertEqual(payload['name'], recipe.name)\n self.assertEqual(payload['description'], recipe.description)", "def setUp(self):\n response = self.client.post('/trainer/create/',\n {\"name\": \"Blue\",\n \"las_name\": \"Oak\"})\n self.trainer_id = response.json()[\"id\"]", "def test_update_restaurant(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Mexicano'\n zip_code = \"94110\"\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com', zip_code=zip_code))\n db.session.commit()\n\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n website = 'www.mexicano-nj.com'\n email = 'mexicano-nj@gmail.com'\n info = {'website': website, 'email': email}\n resp = self.test_client.put(self.API_BASE + '/1', headers=headers, data=json.dumps(info))\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['id'], 1)\n self.assertTrue(name in resp_dict['message'])\n\n # Check that all restaurant fields are returned.\n self.assertTrue('restaurant' in resp_dict)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code)\n\n self.assertTrue('name' in resp_dict['restaurant'])\n self.assertTrue('street' in resp_dict['restaurant'])\n self.assertTrue('suite' in resp_dict['restaurant'])\n self.assertTrue('city' in resp_dict['restaurant'])\n self.assertTrue('state' in resp_dict['restaurant'])\n self.assertTrue('phone_num' in resp_dict['restaurant'])\n self.assertTrue('date_established' in resp_dict['restaurant'])\n self.assertTrue('creator' in resp_dict['restaurant'])\n\n # -----------------------------\n # Make a separate request to retrieve the restaurant and assert that updated fields are as intended\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code) # Make sure this has not changed", "def test_create(self):\n self.app\n pass", "def test_create_goal(self):\n pass", "def describe_restaurant(self):\n\t\tprint(\"name of the restaurant is \" + self.restaurant_name)\n\t\tprint(\"cuisine type is \" + self.cuisine_type)", "def test_create_boat(self):\n pass", "def test_create_restaurant_with_name_creator_only(self):\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n name = 'Restaurant Chinois'\n info = {'name': name, 'creator': 'nobody@gmail.com'}\n resp = self.test_client.post(self.API_BASE + '/create', headers=headers, data=json.dumps(info))\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertTrue(name in resp_dict['message'])", "def create_meal():", "def setUp(self):\n self.prod_1 = Product.objects.create(\n pk=1,\n ean='3350033118072',\n name='test 1',\n category='cat 1',\n image='product_default.png',\n nutriscore='u'\n )\n\n self.user_1 = User.objects.create_user(\n pk=1,\n username='Fav Models Unit Test 1',\n email='boggusmail@boggusmail.net'\n )\n\n self.fav_1 = Favourite.objects.create(\n pk=1,\n date_added='2019-12-20 09:00:00',\n user=self.user_1,\n product=self.prod_1\n )", "def test_create_rating(self):\n rating = RatingFactory()\n # There should be 1 rating\n self.assertEqual(1, Rating.objects.all().count())", "def setUp(cls):\n cls.place = Place()\n cls.place.city_id = \"hawaii808\"\n cls.place.user_id = \"modern123\"\n cls.place.name = \"The Modern Honolulu\"\n cls.place.description = \"The heart of Waikiki\"\n cls.place.number_rooms = 375\n cls.place.number_bathrooms = 1\n cls.place.max_guest = 10000\n cls.place.price_by_night = 300\n cls.place.latitude = 21.306944\n cls.place.longitude = -157.858337\n cls.place.amenity_ids = [\"amenity321\"]", "def test_create_basic_recipe(self):\n payload = {\n 'title': 'Cake',\n 'time_minutes': 40,\n 'price': 20,\n }\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n for key in payload:\n self.assertEqual(payload[key], getattr(recipe, key))\n serializer = RecipeDetailSerializer(recipe)\n self.assertEqual(res.data, serializer.data)", "def test_get_restaurant(self):\n url = \"/get_restaurants\"\n response = app.test_client().get(url)\n response_json = response.json\n with open('expected_responses/restaurants.json', 'r') as f:\n datastore = json.load(f)\n\n assert datastore == response_json, logging.error(\n \"GET Restaurants Failed!\")\n logging.info(\"GET Restaurants API Tested\")", "def test_ingredients_create(self):\n app = self.create_app()\n c = app.test_client()\n\n # test if authorization is required to create an ingredient\n rv = c.get('/ingredients/create')\n self.assertRedirects(rv, \"/auth/login\")\n\n register(c, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n login(c, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n c.get('/ingredients/create')\n self.assert_template_used(\"ingredients/create.html\")\n\n # tests if ingredient already in database\n create_ingredient(c, {'id': 1, 'name': \"ing_unittest1_liquid\", 'portion_size': 4, 'portion_size_unit': \"cup\",\n 'protein': 5.5, 'fat': 7.1, 'carbs': 20.5, 'calories': 98, 'price': 0,\n 'price_size': 0.01, 'price_size_unit': \"gal\", 'tag': \"dairy\", 'notes': \"no notes\"})\n self.assert_message_flashed(\"Ingredient already in the database.\")\n\n # tests inserting new ingredient\n create_ingredient(c, {'id': 1, 'name': \"XXXXX\", 'portion_size': 4, 'portion_size_unit': \"cup\",\n 'protein': 5.5, 'fat': 7.1, 'carbs': 20.5, 'calories': 98, 'price': 0,\n 'price_size': 0.01, 'price_size_unit': \"gal\", 'tag': \"dairy\", 'notes': \"no notes\"})\n self.assert_template_used(\"ingredients/index.html\")", "def test_recommendation(self):\n john_starks = Athlete(first_name=\"John\", last_name=\"Starks\", sport=\"NBA\", recommendation=\"a\")\n self.assertEqual(john_starks.recommendation, \"a\")", "def test_create_run(self):\n pass", "def test_create_recipe_with_ingredients(self):\n\n payload = {\n 'name': 'Gnocchi',\n 'description': 'A detailed description of a yummy recipe!',\n 'ingredients': [\n {'name': 'Potatoes'},\n {'name': 'Flour'},\n {'name': 'Nutmeg'}\n ]\n }\n\n res = self.client.post(RECIPES_URL, payload, format='json')\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n\n self.assertEqual(payload['name'], recipe.name)\n self.assertEqual(payload['description'], recipe.description)\n self.assertEqual(recipe.ingredients.count(), 3)\n self.assertEqual(recipe.ingredients.first().name, 'Potatoes')", "def test_create_customer_rental(self):\n create_rental_url = reverse(\n \"customer_rental_list\", kwargs={\"pk\": self.user1.pk}\n )\n\n data = {\"book\": self.book1.pk}\n response = self.client.post(create_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_get_details(self):\n restaurant_id = 23917\n with self.app.app_context():\n details = ordrin.get_details(restaurant_id)\n\n self.assertEquals(details['name'], 'Test Merchant 20130315',\n 'Check restaurant name on test details.')\n self.assertEquals(details['id'], restaurant_id,\n 'Check restaurant id on test details.')\n self.assertTrue(details['delivers'], 'Check delivery flag on test entry.')\n self.assertTrue(details['allows_asap'],\n 'Check asap flag on test details.')\n self.assertAlmostEqual(details['location'][0], 42.825685,\n 'Check latitude on test details.')\n self.assertAlmostEqual(details['location'][1], -73.879458,\n 'Check longitude on test details.')\n self.assertEquals(details['partner'], 'delivery.com',\n 'Check delivery partner on test details.')\n self.assertEquals(details['address'], '123 FAKE ST',\n 'Check address on test details.')\n self.assertTrue(False)", "def test_create_view(self):\n supplement = SupplementFactory(user=self.user_1)\n time = get_utc_now()\n\n post_data = {\n \"supplement_uuid\": str(supplement.uuid),\n \"time\": time.isoformat(),\n \"quantity\": 5,\n }\n\n response = self.client_1.post(self.url, data=post_data)\n self.assertEqual(response.status_code, 200, response.data)\n\n data = response.data\n supplement_name = data[\"supplement\"][\"name\"]\n self.assertEqual(supplement.name, supplement_name)\n self.assertIsNotNone(data[\"display_name\"])", "def test_create_recipe_with_ingredients(self):\n ing1 = sample_ingredient(user=self.user,name=\"ginger\")\n ing2 = sample_ingredient(user=self.user, name=\"Prawn\")\n payload = {\n 'title':'Prawn curry',\n 'ingredient':[ing1.id,ing2.id],\n 'time_minutes':60,\n 'price':10.00,\n }\n res = self.client.post(RECIPE_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredient.all()\n self.assertEqual(ingredients.count(),2)\n self.assertIn(ing1,ingredients)\n self.assertIn(ing2,ingredients)", "def __init__(self, restaurant_name, cuisine_type):\n super().__init__(restaurant_name, cuisine_type)\n self.flavors = ['chocolate', 'peanut', 'strawberry']", "def __init__(self,restaurant_name, cuisine_type):\r\n\t\tsuper().__init__(restaurant_name,cuisine_type)\r\n\t\tself.flavors = ['chocolate', 'pistachio','mint','vanilla']", "def test_sport(self):\n john_starks = Athlete(first_name=\"John\", last_name=\"Starks\", sport=\"NBA\", recommendation=\"a\")\n self.assertEqual(john_starks.sport, \"NBA\")", "def test_create_from_pear(self):\n pass", "def test_create_recipe_with_ingredients(self):\n ingredient1 = sample_ingredient(user=self.user, name = 'bla')\n ingredient2 = sample_ingredient(user=self.user, name = 'blaa')\n payload = {\n 'title': 'red curry',\n 'ingredients': [ingredient1.id, ingredient2.id],\n 'time_minutes': 30,\n 'price': 30.00\n }\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredients.all()\n\n self.assertEqual(ingredients.count(), 2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingredient2, ingredients)", "def test_create_ingredient(self):\n\n ingredient_payload = {'name': 'Test Ingredient'}\n self.client.post(URL_INGREDIENTS, ingredient_payload)\n\n is_ingredient_created = Ingredient.objects.filter(\n user=self.user,\n name=ingredient_payload['name']\n ).exists()\n\n self.assertTrue(is_ingredient_created)", "def test_create_location(self):\n location = self.location\n\n self.assertTrue(isinstance(location, Location))\n self.assertEqual(location.name, \"Test Location\")", "def restaurant_only():\n work_time = {\n \"Понедельник\": \"8:00-23:00\",\n \"Вторник\": \"8:00-23:00\",\n \"Среда\": \"8:00-23:00\",\n \"Четверг\": \"8:00-23:00\",\n \"Пятница\": \"8:00-23:00\",\n \"Суббота\": \"8:00-23:00\",\n \"Воскресенье\": \"Выходной\",\n }\n restaurant = Restaurant(\"Снежинка\", work_time, False)\n return restaurant", "def test_init(self):\n self.assertEqual(self.ing_mgr.ingredient_list, [])\n self.assertEqual(self.ing_mgr.user_input, True)", "def test_instance(self):\n self.assertIsInstance(self.new_review, Review)", "def test_item_factory_class():\n # __init__()\n factory = ItemFactory()\n pizza_menuitem = MenuItem(\"cheese\", \"Pizzas\", True, 10.0, 1)\n drink_menuitem = MenuItem(\"fanta\", \"Drinks\", True, 10.0, 1)\n side_menuitem = MenuItem(\"fries\", \"Sides\", True, 10.0, 1)\n none_menuitem = MenuItem(\"oreo\", \"oreo\", True, 10.0, 1)\n medium = MenuItem(\"medium\", \"size\", False, 4.0, 1)\n\n # create_item()\n expected_pizza = Pizza(pizza_menuitem, medium)\n expected_drink = Drink(drink_menuitem, medium)\n expected_side = Side(side_menuitem)\n pizza = factory.create_item(pizza_menuitem, medium)\n assert pizza == expected_pizza\n assert factory.create_item(drink_menuitem, medium) == expected_drink\n assert factory.create_item(side_menuitem) == expected_side\n assert not factory.create_item(none_menuitem, medium)", "def test_create_shelf(self, *_):\n form = forms.ShelfForm()\n form.data[\"user\"] = self.local_user.id\n form.data[\"name\"] = \"new shelf name\"\n form.data[\"description\"] = \"desc\"\n form.data[\"privacy\"] = \"unlisted\"\n request = self.factory.post(\"\", form.data)\n request.user = self.local_user\n\n views.create_shelf(request)\n\n shelf = models.Shelf.objects.get(name=\"new shelf name\")\n self.assertEqual(shelf.privacy, \"unlisted\")\n self.assertEqual(shelf.description, \"desc\")\n self.assertEqual(shelf.user, self.local_user)", "def test_create_item(self):\n item = self.item\n\n self.assertTrue(isinstance(item, Item))\n self.assertEqual(item.name, \"Test Item\")", "def __init__(self, restaurant_name, cuisine_type):\n super().__init__(restaurant_name, cuisine_type)\n self.flavors = [\"vanilla\", \"chocolate\", \"strawberry\", \"raspberry\",\n \"cream cheese\", \"blueberry\", \"snickers\", \"chocolate chip\"]", "def test_new(self):", "def test_new(self):", "def test_creating_recipe_with_ingredients(self):\n ingredient1 = create_sample_ingredient(user=self.user, name=\"Paprika\")\n ingredient2 = create_sample_ingredient(user=self.user, name=\"Salad\")\n\n payload = {\n \"title\": \"Green Salad\",\n \"time_minutes\": 34,\n \"price\": 4.66,\n \"ingredients\": [ingredient1.id, ingredient2.id]\n }\n res = self.client.post(RECIPE_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredients.all()\n self.assertEqual(ingredients.count(), 2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingredient2, ingredients)", "def describe_restaurant(self):\n print(f\"\\nRestaurant name: {self.restaurant_name}\")\n print(f\"Cuisine type: {self.cuisine_type}\")", "def test_client_tax_information_create(self):\n pass", "def test_todo_create(client):\n # creates Model.Todo object\n with db.atomic():\n todo = models.Todo.create(name='Shopping')\n\n assert isinstance(todo, models.Todo)\n assert todo.name == 'Shopping'\n assert hasattr(todo, 'id')\n assert todo.id is 1", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = User.object.get(id=user_data.id)\n restaurant, created = Restaurant.objects.update_or_create(user=user, data=validated_data)\n return restaurant", "def test_create(self):\n filter = Bleach()\n self.assertIsInstance(filter, Bleach)", "def __init__(self, restaurant_name, cuisine_type):\n super().__init__(restaurant_name, cuisine_type)\n self.flavors = [\"Chocolate\", \"Vanilla\", \"Strawberryes\"]", "def setUp(self):\n self.place = Place()", "def test_create(self):\n self.assertTrue(WayPoint.objects.exists())", "def describe_restaurant(self):\r\n\t\tprint(\"Our restaurant is \" + self.restaurant_name.title() + \".\")\r\n\t\tprint(\"We are known for our \" + self.cuisine_type.title())", "def test_customer_creation():\n agent = AgentFactory()\n customer = CustomerFactory(agent=agent)\n assert agent == customer.agent\n\n customer.name = 'customer test name 1'\n customer.customer_type = 'hom'\n customer.save()\n assert customer.name == 'customer test name 1'\n\n customer.name = 'customer test name 2'\n customer.customer_type = 'oth'\n customer.save()\n assert customer.name == 'customer test name 2'", "def setUp(self):\n super(RestaurantTest, self).setUp()\n qsa = {'username': 'davedash.livejournal.com',\n 'password': 'sexy'}\n response = self.client.post('/login', qsa, follow=True)\n # print dir(response) ['__class__', '__contains__', '__delattr__',\n # '__delitem__', '__dict__', '__doc__', '__getattribute__',\n # '__getitem__', '__hash__', '__init__', '__iter__', '__module__',\n # '__new__', '__reduce__', '__reduce_ex__', '__repr__',\n # '__setattr__', '__setitem__', '__str__', '__weakref__',\n # '_charset', '_container', '_convert_to_ascii', '_get_content',\n # '_headers', '_is_string', '_set_content', 'client', 'close',\n # 'content', 'context', 'cookies', 'delete_cookie', 'flush', 'get',\n # 'has_header', 'items', 'next', 'request', 'set_cookie',\n # 'status_code', 'tell', 'template', 'write']" ]
[ "0.80320793", "0.7007531", "0.6976299", "0.69665337", "0.68096566", "0.67881805", "0.67846525", "0.67737657", "0.6733063", "0.6733063", "0.6733063", "0.67071205", "0.6684821", "0.6684315", "0.6668093", "0.6642441", "0.6629448", "0.6628427", "0.6583304", "0.6565177", "0.6565177", "0.6562474", "0.65474206", "0.6518443", "0.64945996", "0.6494415", "0.649158", "0.649158", "0.6480835", "0.6467143", "0.6444726", "0.6444726", "0.64298654", "0.6423784", "0.64222455", "0.6405405", "0.63803804", "0.6351496", "0.6344309", "0.63426805", "0.63406307", "0.63232106", "0.6284499", "0.62831444", "0.62758106", "0.6267643", "0.6267458", "0.6266938", "0.62290573", "0.6226554", "0.62040365", "0.61926293", "0.6165841", "0.6165765", "0.6156342", "0.61474997", "0.61378276", "0.61167794", "0.6108232", "0.61071366", "0.6104978", "0.609561", "0.6095392", "0.60943484", "0.60846037", "0.60830504", "0.60796195", "0.60789263", "0.6076168", "0.6073066", "0.60580957", "0.60549265", "0.6053773", "0.6052345", "0.60494053", "0.60451144", "0.6042889", "0.6041034", "0.6033946", "0.6031268", "0.6016455", "0.60129815", "0.6011785", "0.60104454", "0.6000951", "0.59947795", "0.5992423", "0.5992423", "0.59633046", "0.59567475", "0.59531534", "0.59479696", "0.5940478", "0.5939602", "0.59342754", "0.59311223", "0.5924969", "0.5916948", "0.5916328", "0.59156334" ]
0.6356295
37
Simple restaurant instance creating test with correct Kitchen, Hall and Delivery
def test_full_restaurant(restaurant_full): assert restaurant_full
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n valid_name = \"Tungalo\"\n valid_location = \"Rivne\"\n valid_status = 0\n valid_tables_count = 10\n valid_description = \"description\"\n\n self.restaurant = Restaurant()\n self.restaurant.name = valid_name\n self.restaurant.location = valid_location\n self.restaurant.status = valid_status\n self.restaurant.tables_count = valid_tables_count\n self.restaurant.description = valid_description", "def test_create_restaurant_with_all_fields(self):\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n name = 'Restaurant Chinois'\n street = '999 Sutter St'\n suite = '510'\n city = 'Wood-Ridge'\n state = 'NJ'\n zip_code = '07075'\n phone_num = '201-555-7777'\n website = 'www.chinois-nj.com'\n email = 'chinois-nj@gmail.com'\n date_established = '2014'\n creator = 'some-user@gmail.com'\n info = {'name': name, 'street': street, 'suite': suite,\n 'city': city, 'state': state, 'zip_code': zip_code,\n 'phone_num': phone_num, 'website': website, 'email': email,\n 'date_established': date_established, 'creator': creator\n }\n resp = self.test_client.post(self.API_BASE + '/create', headers=headers, data=json.dumps(info))\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['id'], 1)\n self.assertTrue(name in resp_dict['message'])\n\n # Check that all restaurant fields are returned.\n self.assertTrue('restaurant' in resp_dict)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['name'], name)\n self.assertEqual(resp_dict['restaurant']['street'], street)\n self.assertEqual(resp_dict['restaurant']['suite'], suite)\n self.assertEqual(resp_dict['restaurant']['city'], city)\n self.assertEqual(resp_dict['restaurant']['state'], state)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code)\n self.assertEqual(resp_dict['restaurant']['phone_num'], phone_num)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['date_established'], date_established)\n self.assertEqual(resp_dict['restaurant']['creator'], creator)\n\n # -----------------------------\n # Make a separate request to retrieve the restaurant and assert that all fields are as created\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['name'], name)\n self.assertEqual(resp_dict['restaurant']['street'], street)\n self.assertEqual(resp_dict['restaurant']['suite'], suite)\n self.assertEqual(resp_dict['restaurant']['city'], city)\n self.assertEqual(resp_dict['restaurant']['state'], state)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code)\n self.assertEqual(resp_dict['restaurant']['phone_num'], phone_num)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['date_established'], date_established)\n self.assertEqual(resp_dict['restaurant']['creator'], creator)", "def test_foodtrucks_create(self):\n\t\tprint 'API Test: create a new foodtruck'\n\t\turl = reverse('foodtruck_list')\n\t\tdata = {\"status\" : \"APPROVED\",\\\n\t\t \"expirationdate\" : \"2015-03-15T00:00:00\",\\\n\t\t \"permit\" : \"14MFF-0107\",\\\n\t\t \"block\" : \"3794\",\\\n\t\t \"received\" : \"Jun 24 2014 1:49PM\",\\\n\t\t \"facilitytype\" : \"Truck\",\\\n\t\t \"blocklot\" : \"3794002A\",\\\n\t\t \"locationdescription\" : \"02ND ST: TOWNSEND ST to KING ST (700 - 799)\",\\\n\t\t \"cnn\" : 148000,\\\n\t\t \"priorpermit\" : 1,\\\n\t\t \"approved\" : \"2014-06-24T13:55:30\",\\\n\t\t \"noisent\" : \"2013-07-25T00:00:00\",\\\n\t\t \"schedule\" : \"http://bsm.sfdpw.org/PermitsTracker/reports/report.aspx?title=schedule&report=rptSchedule&params=permit=14MFF-0107&ExportPDF=1&Filename=14MFF-0107_schedule.pdf\",\\\n\t\t \"address\" : \"750 02ND ST\",\\\n\t\t \"applicant\" : \"Steve's Mobile Deli\",\\\n\t\t \"lot\" : \"002A\",\\\n\t\t \"fooditems\" : \"Cold Truck: Pre-packaged sandwiches: Burgers: Hot Dogs: Muffin Sandwiches: Enchiladas: Bagels: Burritos: Salads: Snacks: Beverages\",\\\n\t\t \"longitude\" : -122.402978526686,\\\n\t\t \"latitude\" : 37.7302216813049, \\\n\t\t \"y\" : 2093947.369,\\\n\t\t \"x\" : 6011371.493,\\\n\t\t \"objectid\" : 554527}\n\t\t\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\t\n\t\tquant = '1.000000'\n\t\tfor k, v in data.iteritems():\n\t\t\tif v is not None and (k is \"y\" or k is \"x\" or k is \"latitude\" or k is \"longitude\"):\n\t\t\t\tself.assertEqual(response.data[k].quantize(Decimal(quant)), Decimal(v).quantize(Decimal(quant)))\n\t\t\telif v is not None and (k is \"approved\" or k is \"received\" or k is \"expirationdate\" or k is \"noisent\"):\n\t\t\t\tself.assertEqual(response.data[k], parse(v))\n\t\t\telse:\n\t\t\t\tself.assertEqual(response.data[k], v)\n\t\t\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\t\tfor k, v in data.iteritems():\n\t\t\tif v is not None and (k is \"y\" or k is \"x\" or k is \"latitude\" or k is \"longitude\"):\n\t\t\t\tself.assertEqual(response.data[0][k].quantize(Decimal(quant)), Decimal(v).quantize(Decimal(quant)))\n\t\t\telif v is not None and (k is \"approved\" or k is \"received\" or k is \"expirationdate\" or k is \"noisent\"):\n\t\t\t\tself.assertEqual(response.data[0][k], parse(v))\n\t\t\telse:\n\t\t\t\tself.assertEqual(response.data[0][k], v)\n\t\tprint 'pass'", "def test_create(self):\n retreat = Retreat.objects.create(\n name=\"random_retreat\",\n details=\"This is a description of the retreat.\",\n seats=40,\n address_line1=\"123 random street\",\n postal_code=\"123 456\",\n state_province=\"Random state\",\n country=\"Random country\",\n timezone=\"America/Montreal\",\n price=3,\n start_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 15, 8)),\n end_time=LOCAL_TIMEZONE.localize(datetime(2130, 1, 17, 12)),\n min_day_refund=7,\n min_day_exchange=7,\n refund_rate=100,\n is_active=True,\n accessibility=True,\n form_url=\"example.com\",\n carpool_url='example2.com',\n review_url='example3.com',\n has_shared_rooms=True,\n room_type=Retreat.DOUBLE_OCCUPATION,\n toilet_gendered=True,\n )\n\n self.assertEqual(retreat.__str__(), \"random_retreat\")", "def setUp(self):\n self.client = APIClient()\n self.apple = Food.objects.create(name=\"apple\", calories=50)\n self.oatmeal = Food.objects.create(name=\"oatmeal\", calories=400)\n self.breakfast = Meal.objects.create(name=\"breakfast\")\n self.snack = Meal.objects.create(name=\"snack\")\n self.lunch = Meal.objects.create(name=\"lunch\")\n self.dinner = Meal.objects.create(name=\"dinner\")\n self.breakfast.foods.add(self.apple)", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.food = cuisine_type", "def test_create_hotel(self):\n amsterdam = City.objects.get(name=\"Amsterdam\")\n ibis = Hotel.objects.get(name=\"Ibis\")\n\n self.assertEqual(ibis.city, amsterdam)\n self.assertEqual(ibis.code, \"AMS01\")\n self.assertEqual(ibis.name, \"Ibis\")", "def test_create_restaurant_no_creator(self):\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n info = {'name': 'Ping Yan', 'city': 'Chicago'}\n resp = self.test_client.post(self.API_BASE + '/create', headers=headers, data=json.dumps(info))\n self.assertEqual(resp.status_code, 400)", "def test_create_restaurant_no_name(self):\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n info = {'creator': 'nobody@gmail.com', 'city': 'Chicago'}\n resp = self.test_client.post(self.API_BASE + '/create', headers=headers, data=json.dumps(info))\n self.assertEqual(resp.status_code, 400)", "def test_get_two_restaurants(self):\n from espresso import db\n from espresso import Restaurant\n\n name_1 = 'Restaurant Italiano'\n db.session.add(Restaurant(name=name_1, creator='test-user@gmail.com'))\n name_2 = 'Restaurant Français'\n db.session.add(Restaurant(name=name_2, creator='test-user@gmail.com'))\n db.session.commit()\n\n resp = self.test_client.get(self.API_BASE, headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(len(resp_dict['restaurants']), 2)\n self.assertEqual(resp_dict['restaurants'][0]['name'], name_1)\n self.assertEqual(resp_dict['restaurants'][1]['name'], name_2)", "def test_get_restaurant_by_id(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com'))\n db.session.commit()\n\n # Since this is a freshly created table, the first id should be 1\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['name'], name)", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type", "def test_get_food(self):\n pass", "def __init__(self, restaurant_name, cuisine_type):\r\n\t\tself.restaurant_name = restaurant_name\r\n\t\tself.cuisine_type = cuisine_type", "def test_get_restaurants(self):\n address = {'number': '375',\n 'street': 'Noe St',\n 'city': 'San Francisco',\n 'zip': '94114'}\n\n with self.app.app_context():\n restaurants = ordrin.get_restaurants(address)\n\n # Ordr.in returns a test entry as the first item in the list when\n # when hitting their testing servers.\n entry = restaurants[0]\n self.assertEquals(entry['na'], 'Test Merchant 20130315')\n self.assertEquals(entry['id'], 23917)", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.type = cuisine_type", "def __init__(self, restaurant_name,cuisine_type):\r\n self.restaurant = restaurant_name\r\n self.cuisine = cuisine_type", "def test_basic_instance_creation(self):\n first = self.constituencies[0]\n self.assertEqual(first.slug, 'my-place')\n self.assertEqual(first.get_absolute_url(),\n u\"/constituency/%s/\" % first.slug)\n count = 0\n for user in self.users:\n self.assertEqual(user.postcode, USERS[count]['postcode'])\n count += 1", "def test_add_to_fav_(self):\n result = self.client.post(\"/add_to_fav\", data={\"yelp_biz_id\":\"JA_V9TqDCrkgknqrcUndIQ\", \n \"yelp_rest_name\":\"Siam\", \"yelp_rating\":\"4\", \n \"yelp_category\":\"Thai\", \"yelp_price\":\"$$\", \n \"yelp_image_url\":\"https://s3-media2.fl.yelpcdn.com/bphoto/1SkZwZrRZkQSzRMn_Trs3w/o.jpg\" })\n\n DB_result = Restaurant_details.query.filter_by(biz_id = \"JA_V9TqDCrkgknqrcUndIQ\").first()\n self.assertIsNotNone(DB_result) #testing that the returned result is not NONE\n self.assertEqual(DB_result.restaurant_name, 'Siam') #testing restaurant name is what it should be\n \n self.assertIn(b\"Your Favourite has been saved\", result.data)", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.name = restaurant_name\n\t\tself.type = cuisine_type", "def test_update_restaurant(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Mexicano'\n zip_code = \"94110\"\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com', zip_code=zip_code))\n db.session.commit()\n\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n website = 'www.mexicano-nj.com'\n email = 'mexicano-nj@gmail.com'\n info = {'website': website, 'email': email}\n resp = self.test_client.put(self.API_BASE + '/1', headers=headers, data=json.dumps(info))\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['id'], 1)\n self.assertTrue(name in resp_dict['message'])\n\n # Check that all restaurant fields are returned.\n self.assertTrue('restaurant' in resp_dict)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code)\n\n self.assertTrue('name' in resp_dict['restaurant'])\n self.assertTrue('street' in resp_dict['restaurant'])\n self.assertTrue('suite' in resp_dict['restaurant'])\n self.assertTrue('city' in resp_dict['restaurant'])\n self.assertTrue('state' in resp_dict['restaurant'])\n self.assertTrue('phone_num' in resp_dict['restaurant'])\n self.assertTrue('date_established' in resp_dict['restaurant'])\n self.assertTrue('creator' in resp_dict['restaurant'])\n\n # -----------------------------\n # Make a separate request to retrieve the restaurant and assert that updated fields are as intended\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code) # Make sure this has not changed", "def test_create_restaurant_with_token(self):\n url = '/api/places/'\n client = APIClient()\n\n client.credentials(HTTP_AUTHORIZATION=self.test_user1_token)\n response = client.post(url, self.restaurant_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_customer_creation():\n agent = AgentFactory()\n customer = CustomerFactory(agent=agent)\n assert agent == customer.agent\n\n customer.name = 'customer test name 1'\n customer.customer_type = 'hom'\n customer.save()\n assert customer.name == 'customer test name 1'\n\n customer.name = 'customer test name 2'\n customer.customer_type = 'oth'\n customer.save()\n assert customer.name == 'customer test name 2'", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.restaurant_name = restaurant_name.title()\n\t\tself.cuisine_type = cuisine_type", "def test_create_shelf(self, *_):\n form = forms.ShelfForm()\n form.data[\"user\"] = self.local_user.id\n form.data[\"name\"] = \"new shelf name\"\n form.data[\"description\"] = \"desc\"\n form.data[\"privacy\"] = \"unlisted\"\n request = self.factory.post(\"\", form.data)\n request.user = self.local_user\n\n views.create_shelf(request)\n\n shelf = models.Shelf.objects.get(name=\"new shelf name\")\n self.assertEqual(shelf.privacy, \"unlisted\")\n self.assertEqual(shelf.description, \"desc\")\n self.assertEqual(shelf.user, self.local_user)", "def __init__(self, restaurant_name, cuisine_type, number_served=0):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.number_served = number_served", "def test_create(self):\n pass", "def test_simple_restaurant(restaurant_only):\n assert restaurant_only", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.type = cuisine_type\n self.number_served = 0", "def __init__(self, restaurant_name, cuisine_type):\n self.name = restaurant_name\n self.type = cuisine_type\n self.number_served = 0", "def test_create_restaurant_with_name_creator_only(self):\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n name = 'Restaurant Chinois'\n info = {'name': name, 'creator': 'nobody@gmail.com'}\n resp = self.test_client.post(self.API_BASE + '/create', headers=headers, data=json.dumps(info))\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertTrue(name in resp_dict['message'])", "def test_create_recipe_with_ingredient(self):\n ingredient1 = sample_ingredient(user=self.user, name='Prawns')\n ingrident2 = sample_ingredient(user=self.user, name ='Ginger')\n\n payload = {\n 'title': 'Thai prawn and curry',\n 'ingredient': [ingredient1.id,ingrident2.id],\n 'time_minuts':60,\n 'price': 250\n }\n res = self.client.post(RECIPE_URL,payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredient.all()\n self.assertEqual(ingredients.count(),2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingrident2,ingredients)", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.number_served = 0", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.number_served = 0", "def test_create_customer_rental(self):\n create_rental_url = reverse(\n \"customer_rental_list\", kwargs={\"pk\": self.user1.pk}\n )\n\n data = {\"book\": self.book1.pk}\n response = self.client.post(create_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_get_restaurant_by_id_none(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com'))\n db.session.commit()\n\n # Since this is a freshly created table, the only id should be 1.\n # id 2 does not exist.\n resp = self.test_client.get(self.API_BASE + '/2', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 404)", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.number_served = 0", "def test_delivery_factory_class():\n # __init__()\n factory = DeliveryFactory()\n order = Order(1)\n file = \"This is a file.\"\n\n expected_uber = UberEatsDelivery(order, file)\n expected_foodora = FoodoraDelivery(order, file)\n expected_delivery = Delivery(order, \"not uber or foodora\")\n\n assert factory.create_delivery(order, UBER_EATS, file).get_deliverer() == \\\n expected_uber.get_deliverer()\n assert factory.create_delivery(order, FOODORA, file).get_deliverer() == \\\n expected_foodora.get_deliverer()\n assert factory.create_delivery(order, \"not uber or foodora\", file).\\\n get_deliverer() == expected_delivery.get_deliverer()", "def test_create_a_recommendation(self):\n recommendation = Recommendation(product_id=1, recommendation_product_id=2, relationship=Type.UP_SELL)\n self.assertTrue(recommendation != None)\n self.assertEquals(recommendation.relationship, Type.UP_SELL)\n self.assertEquals(recommendation.product_id, 1)\n self.assertEquals(recommendation.recommendation_product_id, 2)", "def test_delete_restaurant(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com'))\n db.session.commit()\n\n # Since this is a freshly created table, the first id should be 1\n resp = self.test_client.delete(self.API_BASE + '/1', headers=auth_header_all_permissions)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['success'], True)\n\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_all_permissions)\n self.assertEqual(resp.status_code, 404)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['success'], False)", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.restaurant_name = restaurant_name\n\t\tself.cuisine_type = cuisine_type\n\t\tself.number_served = 0", "def __init__(self, restaurant_name, cuisine_type):\n\t\tself.restaurant_name = restaurant_name\n\t\tself.cuisine_type = cuisine_type\n\t\tself.number_served = 0", "def setUp(self):\n self.prod_1 = Product.objects.create(\n pk=1,\n ean='3350033118072',\n name='test 1',\n category='cat 1',\n image='product_default.png',\n nutriscore='u'\n )\n\n self.user_1 = User.objects.create_user(\n pk=1,\n username='Fav Models Unit Test 1',\n email='boggusmail@boggusmail.net'\n )\n\n self.fav_1 = Favourite.objects.create(\n pk=1,\n date_added='2019-12-20 09:00:00',\n user=self.user_1,\n product=self.prod_1\n )", "def test_creating_recipe_with_ingredients(self):\n ingredient1 = sample_ingredients(user=self.user, name='Prawns')\n ingredient2 = sample_ingredients(user=self.user, name='Garlic')\n\n payload = {\n 'title': 'Avocado lime cheesecake',\n 'time_minutes': 20,\n 'price': 500.00,\n 'currency': 'NGN',\n 'ingredients': [ingredient1.id, ingredient2.id]\n }\n self.evaluate_recipe(ingredient1, ingredient2, payload, 'ingredient')", "def test_get_details(self):\n restaurant_id = 23917\n with self.app.app_context():\n details = ordrin.get_details(restaurant_id)\n\n self.assertEquals(details['name'], 'Test Merchant 20130315',\n 'Check restaurant name on test details.')\n self.assertEquals(details['id'], restaurant_id,\n 'Check restaurant id on test details.')\n self.assertTrue(details['delivers'], 'Check delivery flag on test entry.')\n self.assertTrue(details['allows_asap'],\n 'Check asap flag on test details.')\n self.assertAlmostEqual(details['location'][0], 42.825685,\n 'Check latitude on test details.')\n self.assertAlmostEqual(details['location'][1], -73.879458,\n 'Check longitude on test details.')\n self.assertEquals(details['partner'], 'delivery.com',\n 'Check delivery partner on test details.')\n self.assertEquals(details['address'], '123 FAKE ST',\n 'Check address on test details.')\n self.assertTrue(False)", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = User.object.get(id=user_data.id)\n restaurant, created = Restaurant.objects.update_or_create(user=user, data=validated_data)\n return restaurant", "def __init__(self, restaurant_name, cuisine_type):\n self.restaurant_name = restaurant_name\n self.cuisine_type = cuisine_type\n self.numbers_served = 0", "def test_create_new_recipe(self):\n payload = {\n 'title': 'Cheescake',\n 'time_taken': 35,\n 'price': 5\n }\n\n res = self.client.post(RECIPE_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n for key in payload.keys():\n self.assertEqual((payload)[key], getattr(recipe, key))\n\n # recipe = get_sample_recipe(self.sample_user)\n # db_recipe =\n\n # self.assertEqual(recipe.title, )", "def describe_restaurant(self):\n print(f\"{self.restaurant_name} is a new restaurant opening on Main Street!\")\n print(f\"The restaurant specializes in {self.cuisine_type}.\")", "def test_create_home(self):\n factory = APIRequestFactory()\n request = factory.post('/homes/', {\n 'area_unit': 'SqFt',\n 'bathrooms': 2,\n 'bedrooms': 4,\n 'home_size': 1372,\n 'home_type': 'SingleFamily',\n 'last_sold_date': '',\n 'last_sold_price': '',\n 'link': 'https://www.zillow.com/homedetails/7417-Quimby-Ave-West-Hills-CA-91307/19866015_zpid/',\n 'price': '$739K',\n 'property_size': 10611,\n 'rent_price': '',\n 'rentzestimate_amount': 2850,\n 'rentzestimate_last_updated': '08/07/2018',\n 'tax_value': 215083,\n 'tax_year': 2017,\n 'year_built': 1956,\n 'zestimate_amount': 709630,\n 'zestimate_last_updated': '08/07/2018',\n 'zillow_id': 19866015,\n 'address': '7417 Quimby Ave',\n 'city': 'West Hills',\n 'state': 'CA',\n 'zipcode': 91307})\n\n self.assertEqual(Home.objects.count(), 0) # Bogus test", "def test_api_can_add_food_to_a_meal(self):\n response = self.client.post(f'/api/v1/meals/{self.breakfast.id}/foods/{self.oatmeal.id}')\n # import code; code.interact(local=dict(globals(), **locals()))\n\n self.assertEqual(response.data['message'], \"Successfully added oatmeal to breakfast\")", "def test_gear_create(self):\n gear_cam = Gear.objects.get(name='Cam')\n gear_nut = Gear.objects.get(name='Nut')\n self.assertEquals(\n gear_cam,\n Gear(id=1, name='Cam', desc='A cam', brand='OnlyCams', weight_grams='1',\n length_mm='1', width_mm='1', depth_mm='1', locking=False, owner=None)\n )\n self.assertEquals(\n gear_nut,\n Gear(id=2, name='Nut', desc='A Nut', brand='OnlyNuts', weight_grams='1',\n length_mm='1', width_mm='1', depth_mm='1', locking=False, owner=None)\n )", "def describe_restaurant(self):\n print(f\"{self.restaurant_name} is a new restaurant opening on Main Street!\")\n print(f\"The restaurant specializes in {self.cuisine_type}-style food.\")", "def test_client_tax_information_create(self):\n pass", "def test_create_pizza(self):\n url = reverse('pizzas-list')\n data = {'name': 'Quattro Formaggio'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Pizza.objects.count(), 1)\n self.assertEqual(Pizza.objects.get().name, 'Quattro Formaggio')", "def test_create_boat(self):\n pass", "def test_create_confirm_delivery_details(self):\n pass", "def test_create_shipment(self):\n pass", "def main():\n\n restaurant = Restaurant(\n name=\"Table By Basant\",\n description=\"North India, Italian, Chinese\",\n address=\"Redwood Shores\",\n operating_hours=\"11AM to 11PM\",\n ratings=4.8,\n menu=Menu(\n title=\"Delights\",\n num_of_dishes=5,\n dishes=[\n Dish(name=\"Gajrela\", price=300, discount=0.50, description=\"indian sweet\", ratings=4.5),\n Dish(name=\"Noodles\", price=200, discount=0.20, description=\"chinese\", ratings=4.3),\n Dish(name=\"Burger\", price=100, discount=0.40, description=\"western\", ratings=5.0),\n Dish(name=\"Pizza\", price=500, discount=0.10, description=\"italian\", ratings=4.8),\n Dish(name=\"Samosa\", price=30, discount=0.0, description=\"indian snacks\", ratings=5.0)\n ]\n )\n\n )\n\n restaurant.show_restaurant()", "def test_shoppingitems_creation(self):\n # register and login a user\n self.app.post('/register', data=self.user_reg_details)\n self.app.post('/login', data=self.user_login_details)\n # create a shopping list\n self.shopping_class_obj.create_list(\n 'Easter', 'maina@gmail.com')\n # make a post request to add an item\n res = self.app.post(\n '/shoppingitems/Easter', data={'item-name': 'Bread'})\n self.assertEqual(res.status_code, 200)\n response = self.item_class_obj.add_item(\n 'Easter', 'Bread', 'maina@gmail.com')\n self.assertIsInstance(response, list)\n # check if item was successfully created\n self.assertIn(\"Bread\", str(res.data))", "def test_user_get_restaurants_list(self):\n response = self.client.get('/api/places/', format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def setUp(self):\n response = self.client.post('/trainer/create/',\n {\"name\": \"Blue\",\n \"las_name\": \"Oak\"})\n self.trainer_id = response.json()[\"id\"]", "def test_create(self):\n self.assertEqual(Product.objects.count(), 2)\n payload = {\n 'name': 'New product',\n 'category': self.category_1.id,\n 'sku': '11111111',\n 'description': 'New product description',\n 'price': 39.99\n }\n\n headers = {\n 'HTTP_AUTHORIZATION': 'Token ' + str(self.token_admin)\n }\n response = self.client.post(\n '/api/products/', data=payload, content_type='application/json', **headers)\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response['Content-Type'], 'application/json')\n self.assertEqual(Product.objects.count(), 3)\n\n product = Product.objects.get(name='New product')\n self.assertEqual(product.name, 'New product')\n self.assertEqual(product.category, self.category_1)\n self.assertEqual(product.sku, '11111111')\n self.assertEqual(product.description, 'New product description')\n self.assertEqual(float(product.price), 39.99)", "def test_create_recipe_with_ingredients(self):\n ing1 = sample_ingredient(user=self.user,name=\"ginger\")\n ing2 = sample_ingredient(user=self.user, name=\"Prawn\")\n payload = {\n 'title':'Prawn curry',\n 'ingredient':[ing1.id,ing2.id],\n 'time_minutes':60,\n 'price':10.00,\n }\n res = self.client.post(RECIPE_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredient.all()\n self.assertEqual(ingredients.count(),2)\n self.assertIn(ing1,ingredients)\n self.assertIn(ing2,ingredients)", "def setUp(self):\n\n self.caffe = Caffe.objects.create(\n name='kafo',\n city='Gliwice',\n street='Wieczorka',\n house_number='14',\n postal_code='44-100'\n )\n self.filtry = Caffe.objects.create(\n name='filtry',\n city='Warszawa',\n street='Filry',\n house_number='14',\n postal_code='44-100'\n )\n\n self.kate = Employee.objects.create(\n username='KateT',\n first_name='Kate',\n last_name='Tempest',\n telephone_number='12345678',\n email='kate@tempest.com',\n favorite_coffee='flat white',\n caffe=self.caffe\n )\n\n self.cash_report = CashReport.objects.create(\n creator=self.kate,\n caffe=self.caffe,\n cash_before_shift=2000,\n cash_after_shift=3000,\n card_payments=500,\n amount_due=1900\n )\n\n Company.objects.create(name='GoodCake', caffe=self.caffe)\n Company.objects.create(name='Tesco', caffe=self.caffe)\n\n Expense.objects.create(\n name='Cakes',\n company=Company.objects.get(name='GoodCake'),\n caffe=self.caffe\n )\n\n Expense.objects.create(\n name='Supply',\n company=Company.objects.get(name='Tesco'),\n caffe=self.caffe\n )\n\n FullExpense.objects.create(\n expense=Expense.objects.get(name='Cakes'),\n amount=50,\n cash_report=CashReport.objects.first(),\n caffe=self.caffe\n )\n\n FullExpense.objects.create(\n expense=Expense.objects.get(name='Supply'),\n amount=500,\n cash_report=CashReport.objects.first(),\n caffe=self.caffe\n )", "def test_create_view(self):\n supplement = SupplementFactory(user=self.user_1)\n time = get_utc_now()\n\n post_data = {\n \"supplement_uuid\": str(supplement.uuid),\n \"time\": time.isoformat(),\n \"quantity\": 5,\n }\n\n response = self.client_1.post(self.url, data=post_data)\n self.assertEqual(response.status_code, 200, response.data)\n\n data = response.data\n supplement_name = data[\"supplement\"][\"name\"]\n self.assertEqual(supplement.name, supplement_name)\n self.assertIsNotNone(data[\"display_name\"])", "def setUp(self):\n # Create table\n db.create_all()\n\n #Create test registree\n mcdonalds = Store(name='mcdonalds', shop_address='63 Northbrook st', shop_postcode='rg14 1ae', takeaway=True)\n tesco = Store(name='tesco', shop_address='London rd, Newbury', shop_postcode='rg14 2bp', takeaway=False)\n coop = Store(name='coop', shop_address='Andover rd', shop_postcode='rg19 3bp', takeaway=False)\n \n #adding test receipts to db\n receipt1 = Receipts(most_expensive=5.09, cost_of_alcohol=0, receipt_total=11.36, takeaway=True, delivery_fee=1.99, delivery_time_mins=28, store_id=1, shop=mcdonalds)\n receipt2 = Receipts(most_expensive=2.80, cost_of_alcohol=16, receipt_total=11.90, store_id=2, shop=tesco)\n receipt3 = Receipts(most_expensive=3.00, cost_of_alcohol=0, receipt_total=18.76, store_id=2, shop=tesco)\n receipt4 = Receipts(most_expensive=2.00, cost_of_alcohol=0, receipt_total=20.91, store_id=2, shop=tesco)\n \n #Add and save to database\n store_list = [mcdonalds, tesco, coop]\n receipt_list = [receipt1, receipt2, receipt3, receipt4]\n for i in store_list:\n db.session.add(i)\n for i in receipt_list:\n db.session.add(i)\n db.session.commit()", "def test_create_goal(self):\n pass", "def create_meal():", "def test_attendant_make_a_sale(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)", "def test_create_ingredient(self):\n\n ingredient_payload = {'name': 'Test Ingredient'}\n self.client.post(URL_INGREDIENTS, ingredient_payload)\n\n is_ingredient_created = Ingredient.objects.filter(\n user=self.user,\n name=ingredient_payload['name']\n ).exists()\n\n self.assertTrue(is_ingredient_created)", "def Restaurant_get_info() -> Restaurant:\r\n name = input(\"Please enter the restaurant's name: \")\r\n cuisine = input(\"Please enter the kind of food served: \")\r\n phone = input(\"Please enter the phone number: \")\r\n menu = menu_enter()\r\n return Restaurant(name, cuisine, phone, menu)", "def test_create_basic_recipe(self):\n payload = {\n 'title': 'Cake',\n 'time_minutes': 40,\n 'price': 20,\n }\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n for key in payload:\n self.assertEqual(payload[key], getattr(recipe, key))\n serializer = RecipeDetailSerializer(recipe)\n self.assertEqual(res.data, serializer.data)", "def test_create_recipe_with_ingredients(self):\n ingredient1 = sample_ingredient(user=self.user, name = 'bla')\n ingredient2 = sample_ingredient(user=self.user, name = 'blaa')\n payload = {\n 'title': 'red curry',\n 'ingredients': [ingredient1.id, ingredient2.id],\n 'time_minutes': 30,\n 'price': 30.00\n }\n res = self.client.post(RECIPE_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredients.all()\n\n self.assertEqual(ingredients.count(), 2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingredient2, ingredients)", "def setUp(self):\n Beneficiary.objects.create(id=1, lastname='Doe', lastname2='', middlename='', firstname='Jane', nativename='',\n nationality_country_iso_code='FRA', code='', date_of_birth='1970-07-01',\n country_of_birth_iso_code='FRA', gender='Male', address='42 Rue des fleurs',\n postal_code='75000', city='Paris', country_iso_code='FRA', msisdn='1123131413',\n email='kzhang@microfocus.com', id_type='PASSPORT', id_country_iso_code='',\n id_number='1123131413', occupation='Teacher', bank_accout_holder_name='',\n province_state='')\n self.client = Client()", "def test_set_new_objects(self):\n\n name = 'Juan'\n\n partner = create_partner(name)\n\n amount = 5000000\n interest_rate = 1.5\n\n loan_capital_data = {\n 'amount': amount,\n 'interest_rate':interest_rate,\n 'partner_id': partner.id\n }\n\n loan_capital = create_loan_capital(loan_capital_data)\n\n client = APIClient()\n\n response = self.client.get('/loan_quotation/4000000')\n\n # Este request al endpoint cumplirá con lo solicitado y mostrará\n # el siguiente mensaje:\n self.assertEqual(json.loads(response.content), {\n 'Socio': 'Juan',\n 'Cuota_mensual': '171111.11',\n 'Pago_total_credito': '6160000.00',\n 'Tasa_interes_mensual': '1.50'\n })\n\n # Este request al endpoint no cumplirá con lo solicitado y mostrará\n # el siguiente mensaje:\n response = self.client.get('/loan_quotation/8000000')\n self.assertEqual(json.loads(response.content), {\n 'message': 'No hay socio disponible'\n })", "def test_ingredients_create(self):\n app = self.create_app()\n c = app.test_client()\n\n # test if authorization is required to create an ingredient\n rv = c.get('/ingredients/create')\n self.assertRedirects(rv, \"/auth/login\")\n\n register(c, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n login(c, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n c.get('/ingredients/create')\n self.assert_template_used(\"ingredients/create.html\")\n\n # tests if ingredient already in database\n create_ingredient(c, {'id': 1, 'name': \"ing_unittest1_liquid\", 'portion_size': 4, 'portion_size_unit': \"cup\",\n 'protein': 5.5, 'fat': 7.1, 'carbs': 20.5, 'calories': 98, 'price': 0,\n 'price_size': 0.01, 'price_size_unit': \"gal\", 'tag': \"dairy\", 'notes': \"no notes\"})\n self.assert_message_flashed(\"Ingredient already in the database.\")\n\n # tests inserting new ingredient\n create_ingredient(c, {'id': 1, 'name': \"XXXXX\", 'portion_size': 4, 'portion_size_unit': \"cup\",\n 'protein': 5.5, 'fat': 7.1, 'carbs': 20.5, 'calories': 98, 'price': 0,\n 'price_size': 0.01, 'price_size_unit': \"gal\", 'tag': \"dairy\", 'notes': \"no notes\"})\n self.assert_template_used(\"ingredients/index.html\")", "def test_create_bookings(self):\n baker.make_recipe('booking.user', _quantity=3)\n baker.make_recipe('booking.future_EV', _quantity=2)\n self.assertEqual(Booking.objects.all().count(), 0)\n management.call_command('create_bookings')\n self.assertEqual(Booking.objects.all().count(), 6)", "def __init__(self, restaurant_name, cuisine_type):\n super().__init__(restaurant_name, cuisine_type)\n self.flavors = ['chocolate', 'peanut', 'strawberry']", "def test_update_restaurant_blank_name(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Mexicano'\n db.session.add(Restaurant(name=name, creator='test-user@gmail.com'))\n db.session.commit()\n\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n info = {'name': ''}\n resp = self.test_client.put(self.API_BASE + '/1', headers=headers, data=json.dumps(info))\n self.assertEqual(resp.status_code, 400)", "def test_get_restaurant(self):\n url = \"/get_restaurants\"\n response = app.test_client().get(url)\n response_json = response.json\n with open('expected_responses/restaurants.json', 'r') as f:\n datastore = json.load(f)\n\n assert datastore == response_json, logging.error(\n \"GET Restaurants Failed!\")\n logging.info(\"GET Restaurants API Tested\")", "def test_create(self):\n self.assertTrue(WayPoint.objects.exists())", "def setUp(cls):\n cls.place = Place()\n cls.place.city_id = \"hawaii808\"\n cls.place.user_id = \"modern123\"\n cls.place.name = \"The Modern Honolulu\"\n cls.place.description = \"The heart of Waikiki\"\n cls.place.number_rooms = 375\n cls.place.number_bathrooms = 1\n cls.place.max_guest = 10000\n cls.place.price_by_night = 300\n cls.place.latitude = 21.306944\n cls.place.longitude = -157.858337\n cls.place.amenity_ids = [\"amenity321\"]", "def describe_restaurant(self):\n\t\tprint(\"name of the restaurant is \" + self.restaurant_name)\n\t\tprint(\"cuisine type is \" + self.cuisine_type)", "def test_item_factory_class():\n # __init__()\n factory = ItemFactory()\n pizza_menuitem = MenuItem(\"cheese\", \"Pizzas\", True, 10.0, 1)\n drink_menuitem = MenuItem(\"fanta\", \"Drinks\", True, 10.0, 1)\n side_menuitem = MenuItem(\"fries\", \"Sides\", True, 10.0, 1)\n none_menuitem = MenuItem(\"oreo\", \"oreo\", True, 10.0, 1)\n medium = MenuItem(\"medium\", \"size\", False, 4.0, 1)\n\n # create_item()\n expected_pizza = Pizza(pizza_menuitem, medium)\n expected_drink = Drink(drink_menuitem, medium)\n expected_side = Side(side_menuitem)\n pizza = factory.create_item(pizza_menuitem, medium)\n assert pizza == expected_pizza\n assert factory.create_item(drink_menuitem, medium) == expected_drink\n assert factory.create_item(side_menuitem) == expected_side\n assert not factory.create_item(none_menuitem, medium)", "def test_create_basic_recipe(self):\n payload = {\"title\": \"Vietnamese Cake\",\n \"time_minutes\": 45,\n \"price\": 5.55}\n res = self.client.post(RECIPE_URL, payload)\n recipe = Recipe.objects.get(id=res.data['id'])\n for key in payload.keys():\n if key == \"price\":\n self.assertEqual(round(Decimal(payload[key]), 2), getattr(recipe, key))\n else:\n self.assertEqual(payload[key], getattr(recipe, key))\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)", "def create_test_data(self):\n fake = Faker(['en_US', 'ja_JP', 'el_GR', 'de_DE'])\n\n self.actor_request = {\n 'name': fake.name(),\n 'age': random.randint(22, 88),\n 'gender': random.choice(['M', 'F'])\n }\n\n self.movie_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n 'releaseDate': str(fake.date_between())\n }\n\n self.actor_update_request = {\n 'name': fake.name(),\n }\n\n self.movie_update_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n }\n\n for _ in range(30):\n actor_name = fake.name()\n actor_age = random.randint(22, 88)\n actor_gender = random.choice(['M', 'F'])\n\n movie_title = fake.color_name() + ' ' + fake.street_suffix()\n movie_release_date = str(fake.date_between())\n\n actor = Actor(actor_name, actor_age, actor_gender)\n actor.insert()\n\n movie = Movie(movie_title, movie_release_date)\n movie.insert()\n\n for _ in range(20):\n actors = Actor.query.all()\n movies = Movie.query.all()\n\n actor_to_update = random.choice(actors)\n movie_to_update = random.choice(movies)\n actor_to_update.movies.append(movie_to_update)", "def test_create_basic_recipe(self):\n\n payload = {'name': 'Focaccia', 'description': 'Detailed description'}\n\n res = self.client.post(RECIPES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n\n self.assertEqual(payload['name'], recipe.name)\n self.assertEqual(payload['description'], recipe.description)", "def setUp(self):\n self.client = APIClient()\n\n self.speciality = models.Speciality.objects.create(\n name='Speciality'\n )\n\n self.payload = {\n 'name': \"Knee Replacement\",\n 'speciality': [self.speciality.pk],\n 'days_in_hospital': 2,\n 'days_in_destination': 2,\n 'duration_minutes': 120,\n 'overview': '<strong>Bla</strong> bla bla',\n }", "def test_creating_recipe_with_ingredients(self):\n ingredient1 = create_sample_ingredient(user=self.user, name=\"Paprika\")\n ingredient2 = create_sample_ingredient(user=self.user, name=\"Salad\")\n\n payload = {\n \"title\": \"Green Salad\",\n \"time_minutes\": 34,\n \"price\": 4.66,\n \"ingredients\": [ingredient1.id, ingredient2.id]\n }\n res = self.client.post(RECIPE_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredients.all()\n self.assertEqual(ingredients.count(), 2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingredient2, ingredients)", "def test_fuels_created(self):\n # Currently, there are no Fuels or FuelCategory objects in the database\n self.assertEqual(Fuel.objects.count(), 0)\n self.assertEqual(FuelCategory.objects.count(), 0)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # The file has the following fuels in the following fuel categories:\n # 'Ocean' category: 'Tidal', 'Wave'\n # 'Hydro' category: 'Hydro',\n # 'Wind' category: 'Wind'\n self.assertEqual(FuelCategory.objects.count(), 3)\n self.assertEqual(Fuel.objects.count(), 4)\n fuel_cat_ocean = FuelCategory.objects.get(name='Ocean')\n fuel_cat_hydro = FuelCategory.objects.get(name='Hydro')\n fuel_cat_wind = FuelCategory.objects.get(name='Wind')\n fuel_tidal = Fuel.objects.get(name='Tidal')\n fuel_wave = Fuel.objects.get(name='Wave')\n fuel_hydro = Fuel.objects.get(name='Hydro')\n fuel_wind = Fuel.objects.get(name='Wind')\n self.assertEqual(set(fuel_cat_ocean.fuel_set.all()), set([fuel_tidal, fuel_wave]))\n self.assertEqual(set(fuel_cat_hydro.fuel_set.all()), set([fuel_hydro]))\n self.assertEqual(set(fuel_cat_wind.fuel_set.all()), set([fuel_wind]))\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n # The Fuels have been assigned to the correct PowerPlants and Projects\n self.assertEqual(set(powerplant_ouessant.fuels.all()), set([fuel_tidal]))\n self.assertEqual(set(project_ouessant1.fuels.all()), set([fuel_tidal]))\n self.assertEqual(set(project_ouessant2.fuels.all()), set([fuel_wave]))\n self.assertEqual(set(powerplant_ilarionas.fuels.all()), set([fuel_hydro]))\n self.assertEqual(set(project_liaoning.fuels.all()), set([fuel_wind]))\n self.assertEqual(set(powerplant_tonstad.fuels.all()), set([fuel_wind, fuel_hydro]))", "def __init__(self, restaurant_name, cuisine_type):\n super().__init__(restaurant_name, cuisine_type)\n self.flavors = [\"vanilla\", \"chocolate\", \"strawberry\", \"raspberry\",\n \"cream cheese\", \"blueberry\", \"snickers\", \"chocolate chip\"]", "def test_create_from_pear(self):\n pass", "def test_init(self):\n test_order = Order(\"1\", \"Large\", \"Thin\", \"Cheese\")\n self.assertEqual(test_order.quantity, \"1\")\n self.assertEqual(test_order.size, \"Large\")\n self.assertEqual(test_order.crust, \"Thin\")\n self.assertEqual(test_order.toppings, \"Cheese\")", "def test_shoppingcart_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n self._create_model(\"shoppingcart\", data, [ \"quantity\", \"discount_value\", \"is_closed\" ])\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def __init__(self, restaurant_name, cuisine_type):\n super().__init__(restaurant_name, cuisine_type)\n self.flavors = [\"Chocolate\", \"Vanilla\", \"Strawberryes\"]", "def test_creating_new_post(self):\n\n form_data = {\"meal-time\": \"2020-02-25 08:00:00\", \n \"meal-setting\": \"At home!\", \"TEB\": \"Some thoughts..\",\n \"hunger\": 2, \"fullness\": 8, \"satisfaction\": 5,\n \"meal-notes\": \"Some notes.\"}\n \n create_new_post(1, \"/static/images/uploads/2.jpg\", form_data)\n\n post = Post.query.get(3)\n\n self.assertIsInstance(post, Post)\n self.assertEqual(post.meal_setting, \"At home!\")", "def __init__(self,restaurant_name, cuisine_type):\r\n\t\tsuper().__init__(restaurant_name,cuisine_type)\r\n\t\tself.flavors = ['chocolate', 'pistachio','mint','vanilla']" ]
[ "0.8021308", "0.7417864", "0.7310501", "0.7144182", "0.70938045", "0.6831627", "0.6762305", "0.67300665", "0.669786", "0.66929805", "0.6612574", "0.65892005", "0.65892005", "0.65892005", "0.65802336", "0.65279776", "0.6520043", "0.6517237", "0.648618", "0.6472757", "0.6470774", "0.6464405", "0.64380455", "0.6421872", "0.6375169", "0.6358613", "0.6352045", "0.6347374", "0.63401073", "0.63373995", "0.63264835", "0.63264835", "0.6325288", "0.6308818", "0.6303401", "0.6303401", "0.6300795", "0.62981546", "0.62979335", "0.62894505", "0.62816715", "0.6278688", "0.6275892", "0.6275892", "0.62704957", "0.62622666", "0.624095", "0.6235468", "0.62346864", "0.6234505", "0.62180024", "0.621563", "0.62092113", "0.62045103", "0.6203058", "0.6195203", "0.6190841", "0.6190113", "0.6157362", "0.61555225", "0.61447376", "0.6142901", "0.6138504", "0.612906", "0.6116623", "0.61141056", "0.6102581", "0.6078692", "0.6072345", "0.6056387", "0.6045407", "0.6041312", "0.60407704", "0.6039851", "0.60329485", "0.6029186", "0.6023715", "0.6012597", "0.6005975", "0.600296", "0.5990297", "0.59853387", "0.5979899", "0.5974672", "0.59707665", "0.5966657", "0.59660566", "0.5963235", "0.5962023", "0.596113", "0.59599036", "0.5959158", "0.5956236", "0.59374404", "0.5935339", "0.5934657", "0.5932225", "0.5927658", "0.591988", "0.591741" ]
0.6852933
5
Test Restaurant.__check_conditions decorator Test must be passed if functions with this decorator raised error cause of Hall, Delivery or Kitchen was not setted.
def test_open_no_setup(restaurant_only, hall_only, kitchen_only, delivery_only): # Here checks not all variants, cause restaurant_only is not isolated # object. They were previously check and working alongside # but affects result if together. # no setups with pytest.raises(CustomWarning): restaurant_only.open() assert restaurant_only.is_working is False, "You need to setup Kitchen, Delivery and Hall" # only kitchen with pytest.raises(CustomWarning): restaurant_only.set_kitchen(kitchen_only) restaurant_only.open() assert restaurant_only.is_working is False, "You need to setup Kitchen, Delivery and Hall" # only delivery and kitchen with pytest.raises(CustomWarning): restaurant_only.set_delivery(delivery_only) restaurant_only.set_kitchen(kitchen_only) restaurant_only.open() assert restaurant_only.is_working is False, "You need to setup Kitchen, Delivery and Hall"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_available(self):\n feature_guard = _make_requires(True, \"Error text\")\n results = []\n\n @feature_guard\n def inner():\n results.append(True)\n return True\n\n assert inner() is True\n assert [True] == results", "def test_simple_restaurant(restaurant_only):\n assert restaurant_only", "def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)", "def check_requirements(self): # pylint: disable=no-self-use\n self.is_skipped = False", "def _check(self):\n if self.action_on_failure not in self.ACTION_ON_FAILURE:\n raise type_utils.TestListError(\n 'action_on_failure must be one of \"NEXT\", \"PARENT\", \"STOP\"')\n\n if self.parallel:\n if not self.subtests:\n raise type_utils.TestListError(\n '`parallel` should be set on test group')\n for subtest in self.subtests:\n if not subtest.IsLeaf():\n raise type_utils.TestListError(\n 'Test %s: all subtests in a parallel test should be leaf nodes' %\n self.id)\n if subtest.enable_services or subtest.disable_services:\n raise type_utils.TestListError(\n 'Test %s cannot be parallel with enable_services or '\n 'disable_services specified.' % subtest.id)\n\n # all subtests should come before teardown tests\n it = iter(self.subtests)\n if not self.teardown:\n # find first teardown test\n it = itertools.dropwhile(lambda subtest: not subtest.teardown, it)\n for subtest in it:\n if not subtest.teardown:\n raise type_utils.TestListError(\n '%s: all subtests should come before teardown tests' % self.id)\n\n for subtest in self.subtests:\n subtest._check() # pylint: disable=protected-access", "def _check(self):\n assert isinstance(self._price, int)\n assert self._price >= 0\n assert isinstance(self._units, int)\n assert self._units > 0\n assert self._side == OrderSide.BUY or self._side == OrderSide.SELL\n assert self._type == OrderType.LIMIT or self._type == OrderType.CANCEL\n assert isinstance(self._market, int)\n assert self._market > 0", "def test_full_restaurant(restaurant_full):\n assert restaurant_full", "def test_defining_only_and_defer_fails(self):", "def _is_valid(self):\n # Test vol_id:\n assert isinstance(self.volume_id, int), ('The volume id vol_id must be an '\n f'integer, but {self.volume_id} was '\n 'given.')\n assert self.volume_id > 0, ('The volume id vol_id must be greater zero, '\n f'but {self.volume_id} was given.')\n\n # Test if ROI function is defined properly:\n assert callable(self.roi), ('roi must be a callable function '\n 'which depends on x,y,z.')\n\n # Testing the electric field:\n if not (callable(self.electric_field) or\n isinstance(self.electric_field, (int, float))):\n raise ValueError('e_field must be either a function or '\n 'a constant!')\n\n if callable(self.electric_field):\n args = inspect.getfullargspec(self.electric_field).args\n m = np.all(np.isin(['x', 'y', 'z'], args))\n m = m & (len(args) == 3)\n assert m, ('Wrong arguments for e_field. Expected arguments: '\n f'\"x\", \"y\" and \"z\" but {args} were given.')\n # Cannot add a specific if **kwargs are valid properties. Cannot\n # inspect nestpy functions.", "def testConditionChecking(self):\n\n state = State.from_problem(self.prob)\n \n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n self.assert_(state.is_satisfied(drive.precondition))\n\n with drive.instantiate([\"agent\", \"tru1\", \"apt2\"], self.prob):\n self.assertFalse(state.is_satisfied(drive.precondition))", "def _backtest_model_specific_sanity_checks(self, *args: Any, **kwargs: Any) -> None:\n pass", "def test_check_args_weekend(self):\n test_date = dt.datetime(2021, 6, 20, 11, 0, 0)\n with self.assertRaises(ValueError) as context:\n self.duedate.check_args(test_date, self.test_turn_time)\n self.assertTrue(\n \"You can submit requests during weekdays only.\" in str(context.exception))", "def _check_parameters(self, target_function, **kwargs):\n # Ensure all arguments are =< 0 where relevant\n for keyword, value in kwargs.items():\n # Two conditions\n value_is_less_than_zero = value < 0\n keyword_is_relevant = keyword in ['mean', 'constant', 'low', 'mode', 'high']\n # Test conditions\n if keyword_is_relevant and value_is_less_than_zero:\n raise FairException('\"{}\" is less than zero.'.format(keyword))\n # Check that all required keywords are provided\n required_keywords = self._required_keywords[target_function]\n for required_keyword in required_keywords:\n if required_keyword in kwargs.keys():\n pass\n else:\n raise FairException('\"{}\" is missing \"{}\".'.format(str(target_function), required_keyword))", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def validate(self):\n try:\n assert self.__age_calculate() is True, Exception('Age is less than expected')\n assert self.__is_user_repeated() is True,Exception(\n 'Recently request received in last 5 days')\n assert self.__is_indian_or_american() is True, Exception(\n 'Nationality should be india or america')\n assert self.__check_state() is True, Exception('State should be valid')\n assert self.__check_salary() is True, Exception(\n 'Salary should be below 90k and above 10k')\n self.__log.write_log(\"All Validation is Successful\")\n self.__response = {'response':'success'}\n return True\n except AssertionError as error:\n self.__response = {'response':f\"{error}\"}\n self.__log.write_log(\"Validation Error...Check the Eligibility Criteria...\")\n return False", "def precondition(self, *args, **kwargs):\n pass", "def test_freeze_user_final_grade_error1_improperly_configured(self, raise_on_exception, mock_refr, mock_get_fg):\n # case without freeze date\n with self.assertRaises(ImproperlyConfigured):\n api.freeze_user_final_grade(self.user, self.run_fa_with_cert, raise_on_exception=raise_on_exception)\n assert mock_refr.called is False\n assert mock_get_fg.called is False\n assert FinalGrade.objects.filter(user=self.user, course_run=self.run_fa_with_cert).exists() is False", "def test_check_is_required(fake_check):\n assert fake_check.is_required()", "def conditions():\n pass", "def checkFood(self, food):\n pass", "def check_errors(self) -> None:", "def check_validity(self):", "def test_unavailable(self):\n feature_guard = _make_requires(False, \"Error text\")\n\n @feature_guard\n def inner(): # pragma: nocover\n pytest.fail(\"Should not be called\")\n\n with pytest.raises(NotImplementedError) as e:\n inner()\n\n assert \"Error text\" in str(e.value)", "def testCheck(self):\n change = ChangeState(self.config, \"changestate_t\")\n\n # Run through all good state transitions and assert that they work\n for state in self.transitions:\n for dest in self.transitions[state]:\n change.check(dest, state)\n dummystates = ['dummy1', 'dummy2', 'dummy3', 'dummy4']\n\n # Then run through some bad state transistions and assertRaises(AssertionError)\n for state in self.transitions:\n for dest in dummystates:\n self.assertRaises(AssertionError, change.check, dest, state)\n return", "def check(self) -> None:", "def test_validation_called(self, mock_field_validator, mock_error_builder):\n\n LandCompensationLandSoldValidator.validate(VALID_LAND_SOLD, VALID_WORK_DONE)\n\n calls = [\n call(VALID_LAND_SOLD, 'land-sold-description', 'Description of the charge', mock_error_builder(),\n summary_message='Describe the land sold',\n inline_message='This is the land bought by the authority, so they can do public works on the land.'),\n call().is_required(),\n\n call(VALID_LAND_SOLD, 'land-sold-description', 'Description of the charge',\n mock_error_builder(), summary_message=\"Answer too long\",\n inline_message=\"Answer must be shorter than 400 characters (about 60 words)\"),\n call().is_length_less_than_or_equal_to(400),\n\n call(VALID_WORK_DONE, 'land-works-particulars', 'The description of the work planned',\n mock_error_builder(), summary_message='Describe the work',\n inline_message='This is the work that the authority wants to do on the land they have bought.'),\n call().is_required(),\n\n call(VALID_WORK_DONE, 'land-works-particulars', 'The description of the work planned',\n mock_error_builder(), summary_message=\"Answer too long\",\n inline_message=\"Answer must be shorter than 400 characters (about 60 words)\"),\n call().is_length_less_than_or_equal_to(400)\n ]\n mock_field_validator.assert_has_calls(calls)", "def test_check_all_set(self):\n try:\n self.settings1.check_all_set()\n except AssertionError:\n self.fail(\"check_all_set() raised unexpected AssertionError.\")\n\n with self.assertRaises(AssertionError):\n self.settings2.check_all_set()", "def test_case_01(self):\n if True:\n self.fail()", "def test_properties(self):\n with pytest.raises(AssertionError):\n self.ownership_state.amount_by_currency_id\n\n with pytest.raises(AssertionError):\n self.ownership_state.quantities_by_good_id", "def test_required(self):\n \n from pystarlab.starlab import Makeking\n self.assertRaises(ValueError, Makeking)\n \n self.assertRaises(ValueError, Makeking, n=500)\n \n self.assertRaises(ValueError, Makeking, w=1.4)\n \n # this will fail if it raises any exceptions\n king_nonfailing = Makeking(n=500, w=1.4, s=12345678)", "def check_requirement(self):\n raise NotImplementedError", "def test_check_args_working_hours(self):\n test_date = dt.datetime(2021, 6, 18, 7, 0, 0)\n with self.assertRaises(ValueError) as context:\n self.duedate.check_args(test_date, self.test_turn_time)\n self.assertTrue(\n \"You can submit requests from 9AM till 5PM.\" in str(context.exception))", "def test_validate(self):\n assert \"skip_validation\" not in self.route.route\n\n route = self.route.validate()\n assert \"skip_validation\" not in route.route\n\n route = self.route.validate(False)\n assert \"skip_validation\" in route.route", "def __call__(self): # run test\n\n try: # Check if any errors were raised during calling of self.func\n return abs(self.func(*self.args, **self.kwargs) - self.res) < self._tolerance\n\n except IndexError:\n return False", "def _backtest_sanity_checks(self, *args: Any, **kwargs: Any) -> None:\n # parse args and kwargs\n training_series = args[0]\n n = SimpleNamespace(**kwargs)\n\n # check target and training series\n if n.target_series is None:\n target_series = training_series\n else:\n target_series = n.target_series\n\n raise_if_not(all(training_series.time_index() == target_series.time_index()), \"the target and training series\"\n \" must have the same time indices.\")\n\n _backtest_general_checks(training_series, kwargs)", "def sanity_check(self):\n pass", "def validate_params(self) -> None:\n # cap must be given when using logistic growth\n if (self.growth == \"logistic\") and (self.cap is False):\n msg = \"Capacity must be provided for logistic growth\"\n logging.error(msg)\n raise ValueError(msg)\n\n # If custom_seasonalities passed, ensure they contain the required keys.\n reqd_seasonality_keys = [\"name\", \"period\", \"fourier_order\"]\n if not all(\n req_key in seasonality\n for req_key in reqd_seasonality_keys\n for seasonality in self.custom_seasonalities\n ):\n msg = f\"Custom seasonality dicts must contain the following keys:\\n{reqd_seasonality_keys}\"\n logging.error(msg)\n raise ValueError(msg)\n\n # If extra_regressors passed, ensure they contain the required keys.\n all_regressor_keys = {\"name\", \"prior_scale\", \"mode\"}\n for regressor in self.extra_regressors:\n if not isinstance(regressor, dict):\n msg = f\"Elements in `extra_regressor` should be a dictionary but receives {type(regressor)}.\"\n _error_msg(msg)\n if \"name\" not in regressor:\n msg = \"Extra regressor dicts must contain the following keys: 'name'.\"\n _error_msg(msg)\n if not set(regressor.keys()).issubset(all_regressor_keys):\n msg = f\"Elements in `extra_regressor` should only contain keys in {all_regressor_keys} but receives {regressor.keys()}.\"\n _error_msg(msg)\n self._reqd_regressor_names = [\n regressor[\"name\"] for regressor in self.extra_regressors\n ]\n # check floor and cap\n if (self.cap is not False) and (\"cap\" not in self._reqd_cap_floor_names):\n self._reqd_cap_floor_names.append(\"cap\")\n if self.floor is not False and (\"floor\" not in self._reqd_cap_floor_names):\n self._reqd_cap_floor_names.append(\"floor\")", "def test_validation_passes_with_valid_input(self):\n\n result = LandCompensationLandSoldValidator.validate(VALID_LAND_SOLD, VALID_WORK_DONE)\n self.assertEqual(0, len(result.errors))", "def sanity_checks(self):\n if not self.is_trigger_rbr_safe:\n raise OSCError(\"NOT_RBR_SAFE\")", "def validate_args(*args: Any) -> bool:\n\n return len(args) == 4 and Item.validate_price(args[2]) and Entity.validate_discount(args[3])", "def _check_validity(self):\n pass", "def fcheck(*args, **kwargs)->None:\n pass", "def testRequirement(self, truck, measurement):\n\n if (self.function(truck, measurement)):\n return True\n else:\n print(self.errorMessage)\n return False", "def check(self):\n invalid = []\n\n if not self.route:\n invalid.append(('route', 'missing'))\n elif not self.route[1] in ['GET', 'POST', 'PUT']:\n invalid.append(('route', 'invalid method: %s' % self.route[1]))\n\n has_2xx = False\n for rcode in self.return_codes:\n code = rcode[0]\n if code >= 200 and code < 300:\n has_2xx = True\n break\n if not has_2xx:\n invalid.append(('return_codes', 'Missing succes return code doc'))\n\n if self.client_auth is None:\n invalid.append(\n ('client_auth', 'Please provide client auth requirement'))\n\n if self.user_auth is None:\n invalid.append(\n ('user_auth', 'Please provide user auth requirement'))\n\n if invalid:\n msgs = []\n for error in invalid:\n msgs.append(\"%s: %s\" % error)\n raise ValueError(\n \"APIFunc for %s is invalid: %s\"\n % (self.viewfunc.__name__,\n ', '.join(msgs)))", "def test_check(self):\n return self._testCheck()", "def Check(self, parameters):", "def check_inputs(self, item_data):\n if not item_data[0] in self.data['pizza']:\n print('Error: ' + item_data[0] + ' pizza does not exist.')\n return False\n\n if not item_data[1] in self.data['pizza'][item_data[0]]:\n print('Error: ' + item_data[1] + ' size does not exist for '\n + item_data[0] + ' pizza.')\n return False\n\n for topping in item_data[2]:\n if not topping in self.data['topping']:\n print('Error: Pizza topping ' + topping + ' does not exist.')\n return False\n return True", "def test_if_bake_and_not_prepare_should_raise_an_error(data):\n step = DropZVColumnsStep()\n\n with pytest.raises(YeastBakeError):\n step.bake(data)", "def run_parameters_validations(self):\n if self.risk_rule:\n if 'connectApi' not in self.services:\n return_error(\"You entered a risk rule but the 'connectApi' service is not chosen. \"\n \"Add the 'connectApi' service to the list or remove the risk rule.\")\n else:\n for risk_rule in self.risk_rule:\n if not is_valid_risk_rule(self, risk_rule):\n return_error(f\"The given risk rule: {risk_rule} does not exist,\"\n f\"please make sure you entered it correctly. \\n\"\n f\"To see all available risk rules run the '!rf-get-risk-rules' command.\")\n\n if self.fusion_file_path is not None:\n if 'fusion' not in self.services:\n return_error(\"You entered a fusion file path but the 'fusion' service is not chosen. \"\n \"Add the 'fusion' service to the list or remove the fusion file path.\")", "def testConditionReasons(self):\n \n state = State.from_problem(self.prob)\n\n relevantVars = []\n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n self.assert_(state.is_satisfied(drive.precondition, relevantVars))\n\n relevantVars = set(relevantVars)\n \n s1 = StateVariable(self.prob.functions[\"city-of\"][0], [self.prob[\"pos1\"]])\n s2 = StateVariable(self.prob.functions[\"city-of\"][0], [self.prob[\"apt1\"]])\n s3 = StateVariable(self.prob.functions[\"location-of\"][0], [self.prob[\"tru1\"]])\n \n self.assertEqual(len(relevantVars), 3)\n self.assert_(s1 in relevantVars)\n self.assert_(s2 in relevantVars)\n self.assert_(s3 in relevantVars)", "def test_lta_good(self):\n self.assertIsNone(api.inventory.check(self.lta_order_good))", "def validate(self):\n\n\tmissing = []\n\tbadcheck = []\n\tfor name, checkfunc, params in self._required:\n\t try:\n\t\targ = self.make_required(name)\n\t\tif checkfunc is not None:\n\t\t if params is not None:\n\t\t\tparams = (self.param_map[name], arg) + params\n\t\t else:\n\t\t\tparams = (self.param_map[name], arg)\n\t\t try:\n\t\t\tapply(checkfunc, params)\n\t\t except ValidationError, msg:\n\t\t\tbadcheck.append(msg)\n\t except ValidationError, args:\n\t\tmissing.append(args)\n\n\tfor (name, checkfunc, params) in self._optional:\n\t tup = self.make_optional(name)\n\t if tup and checkfunc is not None:\n\t\tif params is not None:\n\t\t params = (self.param_map[name], tup) + params\n\t\telse:\n\t\t params = (self.param_map[name], tup)\n\t\ttry:\n\t\t apply(checkfunc, params)\n\t\texcept ValidationError, msg:\n\t\t badcheck.append(msg)\n\n\tif (missing or badcheck) and self.log_errors:\n\t self.log_error(missing, badcheck)\n\n\tif (missing or badcheck) and self.generate_error_page:\n\t self.generate_HTML(missing, badcheck)\n\n\tself.missing = missing\n\tself.badcheck = badcheck\n\n\treturn not (missing or badcheck)", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def self_check(self):\r\n \r\n try:\r\n #tries to get a value from each sensor\r\n \r\n sensor_1_value = self.sen.get_sensor_value()\r\n\r\n # checks if the value is a float else rase exception\r\n\r\n if type(sensor_1_value) != float:\r\n raise Exception()\r\n\r\n #if the sensors dont return a value or is in the wrong type\r\n #the code will fail before here and get caught by the catch.\r\n #otherwise its sets the pass or fail condition to true\r\n \r\n pass_or_fail = True\r\n \r\n\r\n except:\r\n #if the self check fails then it sets the pass or fail\r\n #condition to false\r\n \r\n pass_or_fail = False\r\n \r\n \r\n return(pass_or_fail)", "def test_recheck_fails(self):\n raise NotImplementedError", "def test_rules():", "def test_cook_twice(cook_not_busy, product_for_cook):\n\n cook_not_busy.cook_dish(product_for_cook)\n with pytest.raises(ValueError):\n cook_not_busy.cook_dish(product_for_cook)", "def test_setup_errors(self):\n with self.assertRaises(ValueError):\n _ = RandomForest(n_estimators=0, max_depth=1, criterion='entropy')\n\n with self.assertRaises(ValueError):\n _ = RandomForest(n_estimators=1, max_depth=0, criterion='entropy')\n\n with self.assertRaises(ValueError):\n _ = RandomForest(n_estimators=1, max_depth=1, criterion='test')", "def setup_validation(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def setup_validation(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def sanity_check(self):\n return True", "def test_order_constraint(self):\n orders_placed = [25, 25, 25]\n with self.assertRaises(Exception):\n analyse_uncertain_demand.UncertainDemand(\n orders=orders_placed,\n sku='Rx493-90',\n lead_time=Decimal(4),\n unit_cost=Decimal(40),\n reorder_cost=Decimal(400),\n retail_price=Decimal(600),\n currency='USD'\n )", "def test_xfail_with_run_false_and_with_reason():\n pass", "def _check_pert(self, **kwargs):\n conditions = {\n 'mode >= low' : kwargs['mode'] >= kwargs['low'],\n 'high >= mode' : kwargs['high'] >= kwargs['mode'],\n }\n for condition_name, condition_value in conditions.items():\n if condition_value == False:\n err = 'Param \"{}\" fails PERT requirement \"{}\".'.format(kwargs, condition_name)\n raise FairException(err)", "def test_test(self):\n\n # The following should do nothing as the pipes exist.\n pipes.check_pipe()\n pipes.check_pipe('orig')\n pipes.check_pipe('empty')\n\n # Assert that a RelaxNoPipeError occurs when the pipe doesn't exist.\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'x')\n\n # Reset relax.\n reset()\n\n # Now none of the following pipes exist, hence errors should be thrown.\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe)\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'orig')\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'empty')", "def check():", "def test_validation(self):\n def test(**invalid_fields):\n with self.assertRaises(AssertionError):\n c = models.Citizen(**{'relatives': [], **invalid_fields})\n\n # Must throw errors in every case\n test(citizen_id=-1, )\n test(apartment='WRONG-STRING')\n test(town='', )\n test(street='', )\n test(building='', )\n test(name=None, )\n test(birth_date='WRONG-STRING')\n test(birth_date='31.02.2019')\n test(relatives=['a'])", "def test_exactly_one_required(self):\n\n @mutually_exclusive_parameters('arg1', 'arg2')\n @at_least_one_of('arg1', 'arg2')\n def _func1_decorated(arg1=None, arg2=None, arg3=None):\n return 'foo'\n\n from plone.api.exc import InvalidParameterError\n from plone.api.exc import MissingParameterError\n\n # test it errors if you provide none\n with self.assertRaises(MissingParameterError):\n _func1_decorated()\n\n # test that it errors if you provide both\n with self.assertRaises(InvalidParameterError):\n _func1_decorated('ahoy', 'there')\n\n # everything ok\n self.assertEqual(_func1_decorated('ahoy'), 'foo')\n self.assertEqual(_func1_decorated('ahoy', arg3='there'), 'foo')", "def check(self,):\n self.is_valid_according_policy()", "def check_validity(self) -> None: # pylint: disable=no-self-use # pragma: nocover\n return None", "def test_require_in_call_silently_succeeds_for_available_tests(self, test_generator):\n # pylint: disable=function-redefined\n\n with self.subTest(\"direct decorator\"):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n\n @feature.require_in_call\n def decorated():\n pass\n\n check.assert_not_called()\n decorated()\n check.assert_called_once()\n\n with self.subTest(\"named decorator\"):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n\n @feature.require_in_call(\"sentinel name\")\n def decorated():\n pass\n\n check.assert_not_called()\n decorated()\n check.assert_called_once()", "def test_check_occurs_once(self, test_generator):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n if feature:\n pass\n check.assert_called_once()\n\n if feature:\n feature.require_now(\"no message\")\n feature.require_in_call(lambda: None)()\n feature.require_in_call(\"no message\")(lambda: None)()\n feature.require_in_instance(type(\"Dummy\", (), {}))()\n feature.require_in_instance(\"no message\")(type(\"Dummy\", (), {}))()\n\n check.assert_called_once()", "def test_if(self):", "def violated(self) -> bool:\n ...", "def test_do_check_number_of_terms(self):\n self.assertTrue(self.a.do_check_number_of_terms(self.b))\n self.assertFalse(self.a.do_check_number_of_terms(self.c))", "def test_invalid_action(self):\n\n # Prepare.\n app = self.factory()\n request = self.getRequest(app)\n context = model.factory()\n\n # Ask for permission.\n state_changer = request.state_changer\n self.assertFalse(state_changer.can_perform(context, a.COMPLETE))\n\n # Beg for forgiveness.\n err = fysom.FysomError\n self.assertRaises(err, state_changer.perform, context, a.COMPLETE, None)", "def special_condition_checker(special_tiles, items,\n player_new):\n tile = special_tiles.get(\"{0},{1}\".format(player_new[0], player_new[1]), None)\n valid = True\n\n # Victory conditions\n if tile == \"end\":\n if items[\"legend_rod\"] == True:\n raise PlayerWin\n else:\n color.write(\"My current rod won't be sufficient for this catch. I must go find my grandma~~\",\"ERROR\")\n elif tile == \"shop\":\n pass\n\n # Stage Change Condition\n elif tile == \"cave_entrance_1\":\n raise PlayerCaveEnter1\n elif tile == \"cave_entrance_2\":\n raise PlayerCaveEnter2\n elif tile == \"cave_exit_1\":\n raise PlayerCaveLeave1\n elif tile == \"cave_exit_2\":\n raise PlayerCaveLeave2\n\n # NPC Condiitions\n elif tile == \"npc_grandma\":\n if items[\"legend_rod\"] == True:\n slow_print(\"There is nothing more I can grant you child. Noho ora mai~\",\"STRING\")\n\n else:\n items[\"legend_rod\"] = True\n color.write(\"=============================================\\n\",\"BUILTIN\")\n try:\n print(\"Ctrl + C to skip!\")\n slow_print(\"Mokopuna. Finally you arrive. Taranga has informed me of your quest~\",\"STRING\")\n time.sleep(1)\n slow_print(\"Tēnā koe Muri-ranga-whenua.~Since you know of my quest, you must know my purpose in coming here. ~\")\n time.sleep(1)\n slow_print(\"Indeed. I have prepared for you a koha.~\",\"STRING\")\n time.sleep(0.2)\n slow_print(\"I bestow a fishook upon you, fashioned out of my lower jawbone.~\",\"STRING\")\n time.sleep(0.2)\n slow_print(\"It is strong and will not break under the greatest of strains. Go now mokopuna, and claim your destiny.~\",\"STRING\")\n time.sleep(0.5)\n slow_print(\"Kuai... This is a great gift. I will use it well. I thank you.~\")\n time.sleep(0.5)\n slow_print(\"This counts as the blessing from the older generation to the young.~Go now. I am old, and will return to the dust soon.~\",\"STRING\")\n time.sleep(1)\n except KeyboardInterrupt:\n pass\n slow_print(\"You now have the legendary rod of Maui!~\",\"KEYWORD\")\n valid = False\n\n # Sign Condition\n elif tile == \"sign_cave\":\n slow_print(\"If I remember correctly, the cave should be pretty dry.\\nThere may not be limited chances for me to fish inside.\\nI should probably stock up on some fish before I enter.\\n\")\n time.sleep(1)\n \n\n # Tutorial Conditions\n elif tile == \"tutorial_end\":\n raise TutorialEnd\n return special_tiles, items, valid", "def validate_rule(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def validate_rule(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")", "def check_condition(self):\n\n\t\traw_context = {\n\t\t\t'folk': self.folk\n\t\t}\n\n\t\tstatus, param = self.execute(self.mission_grid, 'condition', self.pending_mission.kingdom, raw_context)\n\t\treturn status", "def test_kyc_get_validation_legal(self):\n pass", "def test_validate_transaction(self):\n sell = self.order_2\n buy = self.order_3\n amount = 5\n\n # =================================================================\n # test: BiddingRoundException\n # =================================================================\n\n different_bidding_round = BiddingRound.objects.create()\n sell.bidding_round = different_bidding_round\n\n try:\n validate_transaction(sell_order=sell, buy_order=buy, share_amount=amount)\n raise AssertionError('BiddingRoundException expected')\n except BiddingRoundException:\n pass\n\n sell.bidding_round = self.bidding_round\n buy.bidding_round = different_bidding_round\n\n try:\n validate_transaction(sell_order=sell, buy_order=buy, share_amount=amount)\n raise AssertionError('BiddingRoundException expected')\n except BiddingRoundException:\n pass\n\n buy.bidding_round = self.bidding_round\n\n # =================================================================\n # test: InactiveBiddingRoundException\n # =================================================================\n\n self.bidding_round.is_active = False\n\n try:\n validate_transaction(sell_order=sell, buy_order=buy, share_amount=amount)\n raise AssertionError('InactiveBiddingRoundException expected')\n except InactiveBiddingRoundException:\n pass\n\n self.bidding_round.is_active = True\n\n # =================================================================\n # test: OrderTypeException\n # =================================================================\n\n sell.order_type = BUY\n\n try:\n validate_transaction(sell_order=sell, buy_order=buy, share_amount=amount)\n raise AssertionError('OrderTypeException expected')\n except OrderTypeException:\n pass\n\n sell.order_type = SELL\n buy.order_type = SELL\n\n try:\n validate_transaction(sell_order=sell, buy_order=buy, share_amount=amount)\n raise AssertionError('OrderTypeException expected')\n except OrderTypeException:\n pass\n\n buy.order_type = BUY\n\n # =================================================================\n # test: SharePriceException\n # =================================================================\n\n sell.order_price_per_share = 9.0\n\n try:\n validate_transaction(sell_order=sell, buy_order=buy, share_amount=amount)\n raise AssertionError('SharePriceException expected')\n except SharePriceException:\n pass\n\n sell.order_price_per_share = 8.0\n buy.order_price_per_share = 7.0\n\n try:\n validate_transaction(sell_order=sell, buy_order=buy, share_amount=amount)\n raise AssertionError('SharePriceException expected')\n except SharePriceException:\n pass\n\n buy.order_price_per_share = 8.00\n\n # =================================================================\n # test: OrderStatusException\n # =================================================================\n\n sell.order_status = USER_ACCEPTED\n\n try:\n validate_transaction(sell_order=sell, buy_order=buy, share_amount=amount)\n raise AssertionError('OrderStatusException expected')\n except OrderStatusException:\n pass\n\n sell.order_status = DEFINITIVE\n buy.order_status = USER_ACCEPTED\n\n try:\n validate_transaction(sell_order=sell, buy_order=buy, share_amount=amount)\n raise AssertionError('OrderStatusException expected')\n except OrderStatusException:\n pass\n\n buy.order_status = DEFINITIVE\n\n # =================================================================\n # test: ShareAmountException\n # =================================================================\n\n sell.order_amount_of_shares = 4\n\n try:\n validate_transaction(sell_order=sell, buy_order=buy, share_amount=amount)\n raise AssertionError('ShareAmountException expected')\n except ShareAmountException:\n pass\n\n sell.order_amount_of_shares = 10\n buy.order_amount_of_shares = 4\n\n try:\n validate_transaction(sell_order=sell, buy_order=buy, share_amount=amount)\n raise AssertionError('ShareAmountException expected')\n except ShareAmountException:\n pass", "def check_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")", "def _check_evaluate_implementation(self) -> None:\n logging.debug(f\"Evaluate_batch_defined: {self._evaluate_batch_defined()}.\")\n logging.debug(f\"Evaluate full dataset defined: {self._evaluate_full_dataset_defined()}.\")\n check.not_eq(\n self._evaluate_batch_defined(),\n self._evaluate_full_dataset_defined(),\n \"Please define exactly one of: `evaluate_batch()` or `evaluate_full_dataset()`. \"\n \"For most use cases `evaluate_batch()` is recommended is recommended because \"\n \"it can be parallelized across all devices.\",\n )", "def pre_flight_checks(self):\n #=======================================================================\n #\n # TODO: Place any system checks here.\n #\n #=======================================================================\n return True", "def test_raises(self):\n with pytest.raises(ValueError, match=\"The target wire must be contained within wires\"):\n apply_controlled_Q(\n lambda: ..., wires=range(3), target_wire=4, control_wire=5, work_wires=None\n )", "def check(self) -> None:\n # check existence\n self.check_key_exists()\n\n # validate training config\n TrainConfigValidator(self.config[\"TRAIN_CONFIG\"], log=False).check()\n # if different training policy at prune is not specified\n if \"TRAIN_CONFIG_AT_PRUNE\" not in self.config:\n self.config[\"TRAIN_CONFIG_AT_PRUNE\"] = self.config[\"TRAIN_CONFIG\"]\n TrainConfigValidator(self.config[\"TRAIN_CONFIG_AT_PRUNE\"], log=False).check()\n\n # validate prune config\n self.check_prune_methods()\n\n # if SEED is not specified, set it same as training config's SEED\n if \"SEED\" not in self.config:\n self.config[\"SEED\"] = self.config[\"TRAIN_CONFIG\"][\"SEED\"]\n\n assert 0 < self.config[\"N_PRUNING_ITER\"]\n assert isinstance(self.config[\"N_PRUNING_ITER\"], int)", "def test_check_required_fail():\n settings = SettingsModel()\n\n with pytest.raises(InvalidSettings):\n settings.check()", "def test_break(self):\n self.circuit_breaker.failure()\n available1 = self.circuit_breaker.available()\n self.circuit_breaker.failure()\n available2 = self.circuit_breaker.available()\n self.circuit_breaker.failure()\n available3 = self.circuit_breaker.available()\n available4 = self.circuit_breaker.available()\n self.assertEqual((available1, available2, available3, available4),\n (True, True, False, False))", "def final_check(self, test_collection):\n assert True", "def run_checks(self, tile_model: TileModel) -> bool:\n\n # Doge cannot fire the deck gun\n if self.player.role == PlayerRoleEnum.DOGE:\n return False\n\n if not self.player == GameStateModel.instance().players_turn:\n return False\n\n ap_deduct = 2 if self.player.role == PlayerRoleEnum.DRIVER else 4\n\n if not TurnEvent.has_required_AP(self.player.ap, ap_deduct):\n return False\n\n # If the player is not located in the\n # same space as the engine, they cannot\n # fire the deck gun.\n engine_orient = self.engine.orientation\n if engine_orient == VehicleOrientationEnum.HORIZONTAL:\n on_first_spot = self.player.row == self.engine.row and self.player.column == self.engine.column\n on_second_spot = self.player.row == self.engine.row and self.player.column == self.engine.column + 1\n if not on_first_spot and not on_second_spot:\n return False\n\n elif engine_orient == VehicleOrientationEnum.VERTICAL:\n on_first_spot = self.player.row == self.engine.row and self.player.column == self.engine.column\n on_second_spot = self.player.row == self.engine.row + 1 and self.player.column == self.engine.column\n if not on_first_spot and not on_second_spot:\n return False\n\n engine_quadrant = self._determine_quadrant(self.engine.row, self.engine.column)\n tile_input_quadrant = self._determine_quadrant(tile_model.row, tile_model.column)\n # If there are players present in the\n # quadrant, the deck gun cannot be fired.\n # tile input gotta be on quadrant adjacent to engine\n if self._are_players_in_quadrant(engine_quadrant) or tile_input_quadrant != engine_quadrant:\n return False\n\n return True", "def check(self) -> None:\n\n raise NotImplementedError", "def test_standard_retry_conditions(case):\n standard_checker = standard.StandardRetryConditions()\n op_model = get_operation_model_with_retries()\n _verify_retryable(standard_checker, op_model, *case)", "def pre_conditions(self, transfer, robot_settings, dilution_settings):\n return []", "def test_empty_condition(self):\n assert_that(Condition.is_valid(''), equal_to(True))", "def test_validate(self):\n pass", "def test_setters(self, name, num_petals, price):\n flower = chap2.Flower('Iris', 8, 3.27)\n with pytest.raises(AssertionError):\n flower.set_name(name)\n with pytest.raises(AssertionError):\n flower.set_num_petals(num_petals)\n with pytest.raises(AssertionError):\n flower.set_price(price)" ]
[ "0.62134707", "0.5922159", "0.5859138", "0.57996106", "0.5779631", "0.57793766", "0.5778328", "0.57430685", "0.5736257", "0.5732914", "0.57291234", "0.5714089", "0.5701453", "0.5699153", "0.5698433", "0.5688214", "0.5600252", "0.55860335", "0.55774295", "0.5557103", "0.5550488", "0.5548981", "0.5548535", "0.55231905", "0.55218583", "0.55168873", "0.5501451", "0.5497198", "0.54865056", "0.54689044", "0.546514", "0.54531026", "0.5449724", "0.5449104", "0.5445683", "0.5434249", "0.54285455", "0.5423758", "0.54198444", "0.5416318", "0.5412846", "0.54059005", "0.540052", "0.53999245", "0.53976965", "0.53915685", "0.53798354", "0.537516", "0.53735304", "0.5371537", "0.5371031", "0.5363464", "0.5358825", "0.5358825", "0.5358825", "0.5358825", "0.5358526", "0.53577983", "0.5355357", "0.53485984", "0.5345819", "0.5335712", "0.5335712", "0.5333428", "0.5330399", "0.5323676", "0.53202486", "0.53166205", "0.531271", "0.53115726", "0.53110296", "0.53105414", "0.53084075", "0.5306909", "0.5300841", "0.52898324", "0.5285784", "0.52856934", "0.52846324", "0.5278666", "0.52757955", "0.52757955", "0.5273943", "0.5270037", "0.52661484", "0.5260502", "0.52580863", "0.5249746", "0.52462023", "0.5246124", "0.524477", "0.5240134", "0.5239934", "0.5237989", "0.5236778", "0.5235987", "0.52350867", "0.52273613", "0.522138", "0.5219111" ]
0.66464823
0
Test of cooking the same product twice. Test passed if second cooking of same product raise ValueError
def test_cook_twice(cook_not_busy, product_for_cook): cook_not_busy.cook_dish(product_for_cook) with pytest.raises(ValueError): cook_not_busy.cook_dish(product_for_cook)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False", "def test_buyTicket_AlreadySold():\n assert not testUser2.buyTicket(testTicket1)\n assert testTicket1 in testUser1.inventory\n assert testTicket1 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500", "def test_create_same_product(self):\n url = reverse('products:list')\n data = {\n 'name': 'Eggs',\n 'description': '''\n Bird and reptile eggs consist of a protective eggshell,\n albumen (egg white), and vitellus (egg yolk),\n contained within various thin membranes.\n The most commonly consumed eggs are chicken eggs.\n Other poultry eggs including those of duck and quail\n also are eaten.\n '''\n }\n product_count_before = models.Product.objects.count()\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(models.Product.objects.count(), product_count_before)", "def test_DECISION_repeat_conflict(self, commit):\n self.assertRaises(AssertionError, lambda:\n self.node.fake_message(Decision(slot=1, proposal=PROPOSAL2)))", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_phonebook_with_duplicate_entries_is_inconsostent(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.phonebook.add(\"Mary\", \"12345\")\n self.assertFalse(self.phonebook.is_consistent())", "def test_6(self):\n toothpaste = Store.Product(11, \"toothpaste\", \"dental\", 2, 4)\n milk = Store.Product(12, \"milk\", \"dairy\", 2, 3)\n eggs = Store.Product(14, \"eggs\", \"dairy\", 2, 2)\n apple_juice = Store.Product(13, \"apple juice\", \"drink\", 1, 1)\n\n s = Store.Store()\n s.add_product(toothpaste)\n s.add_product(milk)\n s.add_product(eggs)\n s.add_product(apple_juice)\n\n henry = Store.Customer(\"henry\", \"mrh\", False)\n s.add_member(henry)\n\n s.add_product_to_member_cart(11, \"mrh\")\n s.add_product_to_member_cart(12, \"mrh\")\n s.add_product_to_member_cart(14, \"mrh\")\n self.assertAlmostEqual(s.check_out_member(\"mrh\"), 6.42, \"not the correct checkout amount\")", "def test_save_item_with_duplicate_value_on_unique_field_raises(\n test_store, andy, pandy, candy\n):\n\n person = next(test_store.get_by(name=\"Andy\"))\n person.name = \"Pandy\"\n\n with pytest.raises(NotUniqueException):\n test_store.save(person)\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items", "def checker(product):\n for item in INSTOCK:\n if item == product:\n return True\n return False", "def test_cart_creation_duplicate_name(self):\n cart_name = 'cart name'\n self.cart_item_manager.create_cart('123', cart_name, False)\n self.cart_item_manager.create_cart('124', cart_name, False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.create_cart('123', cart_name, False)", "def test_market_1_2(self):\n\n def check_1_2(buyers: List[float], sellers: List[float], expected_num_of_deals: int,\n expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 2]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_2(buyers=[9], sellers=[-4, -3],\n expected_num_of_deals=0, expected_prices=[9, -4.5])\n check_1_2(buyers=[9, 8, 7, 6], sellers=[-6, -5, -4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-6, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n\n # PRICE CROSSES ZERO AT FIRST PHASE\n check_1_2(buyers=list(range(20)), sellers=[-3, -2, -1],\n expected_num_of_deals=1, expected_prices=[18, -9])", "def test_update_cart_name_duplicate(self):\n user_id = '123'\n cart_id = self.cart_item_manager.create_cart(user_id, 'Cart1', False)\n self.cart_item_manager.create_cart(user_id, 'Cart2', False)\n with self.assertRaises(DuplicateItemError):\n self.cart_item_manager.update_cart(user_id, cart_id, {'CartName': 'Cart2'})", "def test_duplicate_entries(self):", "def test_check_for_existing_reaction_keeps_identical_reactions_with_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=True)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=True)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertFalse(found, 'check_for_existing_reaction failed to identify duplicate template reactions')", "def test_allow_multiples(self):\r\n o1 = self.b1.get(self.key)\r\n o2 = self.b2.get(self.key)\r\n\r\n o1.set_data(\"object-1\")\r\n o1.store()\r\n o2.set_data(\"object-2\")\r\n o2.store()\r\n\r\n conflicted = self.b1.get(self.key)\r\n siblings = filter(bool, (s.get_data() for s in conflicted.get_siblings()))\r\n self.assertEqual(len(siblings), 2)", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_multiple_creates_do_not_increase_products(self):\n for i in xrange(0, 10):\n modified_po = copy.deepcopy(base_purchase_order)\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.post('/api/v1/purchase-order/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 201, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['status'], 'AWAITING APPROVAL')\n\n item1 = po_data['items'][0]\n #self.assertEqual(item1['supply']['id'], 1)\n self.assertEqual(item1['status'], u'Ordered')\n\n item2 = po_data['items'][1]\n #self.assertEqual(item1['supply']['id'], 2)\n self.assertEqual(item1['status'], u'Ordered')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=resp.data['id'])\n self.assertEqual(po.status, 'AWAITING APPROVAL')\n for item in po.items.all():\n self.assertEqual(item.status, u\"Ordered\")\n \n supplier = Supplier.objects.get(pk=1)\n\n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)\n\n supply = Supply.objects.get(pk=2)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)", "def check_duplicate(self, state):\n pass", "def test_unique_together(self):\n form = PriceForm({'price': '6.00', 'quantity': '1'})\n self.assertTrue(form.is_valid())\n form.save()\n form = PriceForm({'price': '6.00', 'quantity': '1'})\n self.assertFalse(form.is_valid())\n self.assertEqual(len(form.errors), 1)\n self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.'])", "def test_add_with_not_right_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"-14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_not_repeat_combination(self):\n self.assertTrue(\"-Yao Ming Zydrunas Ilgauskas\", show_players_sumheigh_is_input(177))\n self.assertFalse(show_players_sumheigh_is_input(177), \"-Zydrunas Ilgauskas Yao Ming\")", "def test_check_for_existing_reaction_eliminates_identical_reactions_without_duplicate_flag(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'],\n duplicate=False)\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Cs12345', 'H'],\n duplicate=False)\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to eliminate reactions without duplicate tag')", "def test_validate_duplicate_wire(self, circuit):\n with pytest.raises(ValueError, match=\"Wire ID 0 is specified more than once.\"):\n circuit._validate_wire_ids(wire_ids=[0, 0])", "def test_add_item_with_duplicate_value_on_unique_field_raises(\n test_store, andy, pandy, candy\n):\n\n person_with_duplicate_name = Person(name=\"Andy\", age=80)\n\n with pytest.raises(NotUniqueException):\n test_store.add(person_with_duplicate_name)\n\n items = list(test_store.get_by())\n assert len(items) == 3\n assert andy in items\n assert pandy in items\n assert candy in items", "def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)", "def test_cart_creation_duplicate_default_will_not_create_new_cart(self):\n test_user_id = '123'\n cart_id_1 = self.cart_item_manager.create_cart(test_user_id, 'Cart1', True)\n cart_id_2 = self.cart_item_manager.create_cart(test_user_id, 'Cart3', True)\n self.assertEqual(cart_id_1, cart_id_2)\n self.assertEqual(1, len(self.cart_item_manager.get_user_carts(test_user_id)))", "def test_get_similar_recipes(self):\n pass", "def test_check_for_duplicates_with_duplicates(self):\n quotes = [api.Quote(\" This is an added quote.\", \"Another author\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author2\", \"Publication\", [\"tag1, tag2\"]),\n api.Quote(\" This is an added quote.\", \"Another author3\", \"Publication\", [\"tag1, tag2\"])]\n\n with self.assertRaisesRegexp(Exception, \"a duplicate quote was found on line 2 of 'stdin'. \"\n \"Quote: \\\"This is an added quote.\\\".\"):\n\n api._check_for_duplicates(quotes, \"stdin\")", "def products_made(self, product) -> bool:\n return self.product_idx(product) is not None", "def _check_sn_uniqueness(self):\n if self.product_tracking == 'serial' and self.lot_producing_id:\n sml = self.env['stock.move.line'].search_count([\n ('lot_id', '=', self.lot_producing_id.id),\n ('location_id.usage', '=', 'production'),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ])\n if sml:\n raise UserError(_('This serial number for product %s has already been produced', self.product_id.name))\n\n for move in self.move_finished_ids:\n if move.has_tracking != 'serial' or move.product_id == self.product_id:\n continue\n for move_line in move.move_line_ids:\n domain = [\n ('lot_id', '=', move_line.lot_id.id),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ]\n message = _('The serial number %(number)s used for byproduct %(product_name)s has already been produced',\n number=move_line.lot_id.name,\n product_name=move_line.product_id.name)\n co_prod_move_lines = self.move_finished_ids.move_line_ids.filtered(lambda ml: ml.product_id != self.product_id)\n domain_unbuild = domain + [\n ('production_id', '=', False),\n ('location_dest_id.usage', '=', 'production')\n ]\n\n # Check presence of same sn in previous productions\n duplicates = self.env['stock.move.line'].search_count(domain + [\n ('location_id.usage', '=', 'production')\n ])\n if duplicates:\n # Maybe some move lines have been compensated by unbuild\n duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [\n ('move_id.unbuild_id', '!=', False)\n ])\n if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):\n raise UserError(message)\n # Check presence of same sn in current production\n duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line\n if duplicates:\n raise UserError(message)\n\n for move in self.move_raw_ids:\n if move.has_tracking != 'serial':\n continue\n for move_line in move.move_line_ids:\n if float_is_zero(move_line.qty_done, precision_rounding=move_line.product_uom_id.rounding):\n continue\n domain = [\n ('lot_id', '=', move_line.lot_id.id),\n ('qty_done', '=', 1),\n ('state', '=', 'done')\n ]\n message = _('The serial number %(number)s used for component %(component)s has already been consumed',\n number=move_line.lot_id.name,\n component=move_line.product_id.name)\n co_prod_move_lines = self.move_raw_ids.move_line_ids\n domain_unbuild = domain + [\n ('production_id', '=', False),\n ('location_id.usage', '=', 'production')\n ]\n\n # Check presence of same sn in previous productions\n duplicates = self.env['stock.move.line'].search_count(domain + [\n ('location_dest_id.usage', '=', 'production')\n ])\n if duplicates:\n # Maybe some move lines have been compensated by unbuild\n duplicates_unbuild = self.env['stock.move.line'].search_count(domain_unbuild + [\n ('move_id.unbuild_id', '!=', False)\n ])\n if not (duplicates_unbuild and duplicates - duplicates_unbuild == 0):\n raise UserError(message)\n # Check presence of same sn in current production\n duplicates = co_prod_move_lines.filtered(lambda ml: ml.qty_done and ml.lot_id == move_line.lot_id) - move_line\n if duplicates:\n raise UserError(message)", "def test_single_quant_assign_correct_quant(self):\n Quant = self.env[\"stock.quant\"]\n\n # Create a bunch of identical quants in the same location\n quants = Quant.browse()\n for i in range(5):\n quants |= self.create_quant(self.apple.id, self.test_stock_location_01.id, 10)\n self.assertEqual(len(quants), 5)\n\n quant = quants[2]\n pick = quant.create_picking(self.picking_type_pick, confirm=True, assign=True)\n self.assertEqual(pick.state, \"assigned\")\n self.assertEqual(quant.reserved_quantity, 10)", "def test_only_two_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [0], True, False))\n\n self.assertFalse( f( 1, 0, [], True, False))\n self.assertFalse( f( 1, 0, [1], True, False))\n self.assertTrue( f( 1, 0, [2], True, False))\n self.assertFalse( f( 1, 0, [3], True, False))\n self.assertFalse( f( 1, 0, [4], True, False))\n\n self.assertTrue( f( 1, 1, [], True, False))\n self.assertFalse( f( 1, 1, [2], True, False))\n\n self.assertFalse( f( 2, 0, [2], True, False))\n self.assertFalse( f( 2, 0, [3], True, False))\n self.assertTrue( f( 2, 0, [4], True, False))\n self.assertFalse( f( 2, 0, [5], True, False))\n \n self.assertTrue( f( 2, 1, [2], True, False))\n self.assertFalse( f( 2, 1, [3], True, False))\n self.assertFalse( f( 2, 1, [4], True, False))\n\n self.assertTrue( f(13, 26, [], True, False))\n self.assertTrue( f(13, 0, [26], True, False))\n self.assertTrue( f(13, 14, [12], True, False))\n self.assertTrue( f(13, 13, [10], True, False))\n self.assertFalse( f(13, 15, [11], True, False))\n\n self.assertFalse( f( 6, 1, [2,4,6], True, False))\n self.assertTrue( f( 7, 1, [2,4,6], True, False))\n self.assertFalse( f( 8, 1, [2,4,6], True, False))", "def test_create_id_dupe(self):\r\n user = random.getrandbits(32)\r\n courses = modulestore().get_courses()\r\n with self.assertRaises(DuplicateCourseError):\r\n dupe_course_key = courses[0].location.course_key\r\n modulestore().create_course(dupe_course_key.org, dupe_course_key.offering, user)", "def test_compute_correlation_paired_incompatible_samples(self):\r\n self.assertRaises(ValueError, _compute_correlation,\r\n self.taxa_summary1, self.taxa_summary3, 'paired',\r\n 'spearman', 'high', 9, 0.22222)", "def test_is_valid_return_only_good_products(self):\n self.assertTrue(ProductValidator().is_valid(self.good_product))\n self.assertFalse(ProductValidator().is_valid(self.bad_product))", "def testduplicate(self):\n a = AttributeAbility(['ST',], 3)\n self.assertTrue(a.duplicate(a))\n self.assertTrue(a.duplicate(AttributeAbility(['ST',], 3)))\n self.assertTrue(a.duplicate(AttributeAbility(['ST',], 5)))\n self.assertFalse(a.duplicate(AttributeAbility(['DX',], 5)))", "def is_satisfied(self, item: Product) -> bool:\n return item.size == self.size", "def test_confirm_duplicated_consent(self):\n # We create the flow request\n res = self._add_flow_request(flow_request=self.flow_request)\n confirm_id = res.json()['confirm_id']\n process_id = res.json()['process_id']\n callback_url = 'http://127.0.0.1/'\n\n # Then we login as mouse since the mock is configured to return 400 with \"mouse\" login\n self.client.login(username='mouse', password='duck')\n # Then we confirm the request.\n res = self.client.get('/v1/flow_requests/confirm/?confirm_id={}&callback_url={}&action=add'.format(\n confirm_id, callback_url))\n self.assertRedirects(res, \"{}?process_id={}&success=false&error={}\".format(callback_url, process_id,\n ERRORS_MESSAGE['ALL_CONSENTS_ALREADY_CREATED']),\n fetch_redirect_response=False)", "def test_identical(self):\n write this test!", "def test_single_quant(self):\n pick = self.quant_1.create_picking(self.picking_type_pick)\n # Confirm made in state draft\n self.assertEqual(pick.state, \"draft\")\n # Confirm quant location used if non specified\n self.assertEqual(pick.location_id, self.test_stock_location_01)\n # Confirm default dest location used if non specified\n self.assertEqual(pick.location_dest_id, self.picking_type_pick.default_location_dest_id)\n # Confirm correct picking type id associated\n self.assertEqual(pick.picking_type_id, self.picking_type_pick)\n # Check default priority is 0 = 'Normal'\n self.assertEqual(pick.priority, \"0\")\n # Check picking has correct products associated to it\n self.assertEqual(pick.product_id, self.apple)\n # Check picking has correct quantities associated to it\n self.assertEqual(pick.move_lines.product_id, self.apple)\n self.assertEqual(pick.move_lines.product_qty, 10)", "def test_cannot_sell_more_than_stock(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":15\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Only 10 NY_denims available right now!')\n self.assertEqual(resp.status_code, 400)", "def test_add_with_end_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-1\", \n \"3\", \"2020-12-1\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_is_consistent(self):\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Bob\", \"12345\")\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Mary\", \"012345\")\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Sue\", \"12345\") # identical to Bob\n self.assertTrue(self.phonebook.is_consistent())\n self.phonebook.add(\"Sue\", \"123\") # prefix of Bob\n self.assertTrue(self.phonebook.is_consistent())", "def test_raises(self):\n no_replicates = 25\n try:\n replicate(experiment3, no_replicates)\n except RuntimeError as err:\n self.assertEqual(err, FAKE_ERROR)\n else:\n assert False", "def test_roll_once(self):\n\n self.assertIn(self.new_die.roll(), self.possible_values, \"Rolled value was not in possible die values\")", "def test_check_for_existing_reaction_eliminates_identical_reactions(self):\n cerm = CoreEdgeReactionModel()\n\n # make species' objects\n spcA = Species().from_smiles('[H]')\n spcB = Species().from_smiles('C=C[CH2]C')\n spcC = Species().from_smiles('C=C=CC')\n spcD = Species().from_smiles('[H][H]')\n spcA.label = '[H]'\n spcB.label = 'C=C[CH2]C'\n spcC.label = 'C=C=CC'\n spcD.label = '[H][H]'\n spcB.generate_resonance_structures()\n\n cerm.add_species_to_core(spcA)\n cerm.add_species_to_core(spcB)\n cerm.add_species_to_core(spcC)\n cerm.add_species_to_core(spcD)\n\n reaction_in_model = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n reaction_in_model.reactants.sort()\n reaction_in_model.products.sort()\n\n reaction_to_add = TemplateReaction(reactants=[spcA, spcB],\n products=[spcC, spcD],\n family='H_Abstraction',\n template=['Csd', 'H'])\n cerm.add_reaction_to_core(reaction_in_model)\n cerm.register_reaction(reaction_in_model)\n\n found, rxn = cerm.check_for_existing_reaction(reaction_to_add)\n\n self.assertTrue(found, 'check_for_existing_reaction failed to identify existing reaction')", "def test_post_duplicate_question(self):\n self.post_question(self.valid_question2)\n\n\n response = self.post_question(self.valid_question2)\n self.assertEqual(response.status_code, 400)", "def testProtractedNSESanityChecks(self):\n self.assertGreater(self.c3.get_species_richness(1), self.c2.get_species_richness(1))\n self.assertLess(self.c4.get_species_richness(1), self.c3.get_species_richness(1))", "def test_repeated_calls_different_quotes(self):\n quoteSet = set()\n for i in range(5):\n quoteSet.add(getRandomJoke()[\"joke\"])\n self.assertEqual(len(quoteSet) > 1, True)", "def test_cant_create_order_twice(self):\n\t\to2 = BuyInfluenceOrder(\n\t\t\tplayer=self.p\n\t\t)\n\n\t\tself.assertRaises(OrderNotAvailable, o2.clean)", "def check_duplicate(triple: str, result: List[str]) -> bool:\n fields = triple.strip().split(', ')\n assert len(fields) == 13\n assert fields[9] == 'BERT'\n psuedo_triple = fields[:11]\n psuedo_triple[9] = 'RELEVANCE'\n return ', '.join(psuedo_triple) in result", "def test_duplicate_equality(self):\r\n def duplicate_and_verify(source_usage_key, parent_usage_key):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key)\r\n self.assertTrue(check_equality(source_usage_key, usage_key), \"Duplicated item differs from original\")\r\n\r\n def check_equality(source_usage_key, duplicate_usage_key):\r\n original_item = self.get_item_from_modulestore(source_usage_key, draft=True)\r\n duplicated_item = self.get_item_from_modulestore(duplicate_usage_key, draft=True)\r\n\r\n self.assertNotEqual(\r\n original_item.location,\r\n duplicated_item.location,\r\n \"Location of duplicate should be different from original\"\r\n )\r\n # Set the location and display name to be the same so we can make sure the rest of the duplicate is equal.\r\n duplicated_item.location = original_item.location\r\n duplicated_item.display_name = original_item.display_name\r\n\r\n # Children will also be duplicated, so for the purposes of testing equality, we will set\r\n # the children to the original after recursively checking the children.\r\n if original_item.has_children:\r\n self.assertEqual(\r\n len(original_item.children),\r\n len(duplicated_item.children),\r\n \"Duplicated item differs in number of children\"\r\n )\r\n for i in xrange(len(original_item.children)):\r\n if not check_equality(original_item.children[i], duplicated_item.children[i]):\r\n return False\r\n duplicated_item.children = original_item.children\r\n\r\n return original_item == duplicated_item\r\n\r\n duplicate_and_verify(self.problem_usage_key, self.seq_usage_key)\r\n duplicate_and_verify(self.html_usage_key, self.seq_usage_key)\r\n duplicate_and_verify(self.seq_usage_key, self.chapter_usage_key)\r\n duplicate_and_verify(self.chapter_usage_key, self.usage_key)", "def test_gather_success(self):\n gathered_items = self.Quant._gather(self.apple, self.test_stock_location_01)\n # Check the number of apple quants returned is correct\n self.assertEqual(len(gathered_items), 3)\n # Check that the products are all of expected type\n self.assertEqual(gathered_items.product_id, self.apple)\n\n # Unfold the returned quants\n _q1, second_quant, _q2 = gathered_items\n # Check when quant_ids is set in the context\n gathered_items_subset = self.Quant.with_context(quant_ids=[second_quant.id])._gather(\n self.apple, self.test_stock_location_01\n )\n self.assertEqual(len(gathered_items_subset), 1)\n self.assertEqual(gathered_items_subset.product_id, self.apple)\n self.assertEqual(gathered_items_subset, second_quant)", "def test_add_to_cart_item_not_in_system(self):\n # test sale products not in db\n\n response = self.client.get(\n '/self.base_url/sales/1999/2',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),\n content_type = 'application/json'\n )\n\n response_data = json.loads(response.data)\n self.assertEqual(response_data['message'],\"This product does not exist\")\n self.assertEqual(response.status_code,200)\n\n\n # test add item which is at minimum stock", "def test_check_ticket_2(self):\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set([1]), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_no_oppose_different_sectors(self):\n battle = self.battle\n self.bob.sector = 7\n\n s1 = battle.create_skirmish(self.alice, 2)\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n\n with self.assertRaises(db.WrongSectorException):\n s1.react(self.bob, 2)\n\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))", "def testProtractedPostApplicationSanityChecks(self):\n self.assertLess(self.c.get_species_richness(1), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(2), self.c.get_species_richness(4))\n self.assertLess(self.c.get_species_richness(5), self.c.get_species_richness(3))\n self.assertLess(self.c.get_species_richness(6), self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(1))\n self.assertEqual(4, self.c.get_species_richness(2))\n self.assertEqual(7, self.c.get_species_richness(3))\n self.assertEqual(7, self.c.get_species_richness(4))\n self.assertEqual(4, self.c.get_species_richness(5))\n self.assertEqual(4, self.c.get_species_richness(6))\n self.assertEqual(21, self.c.get_species_richness(7))\n self.assertEqual(38, self.c.get_species_richness(8))", "def is_satisfied(self, item: Product) -> bool:\n return item.colour == self.colour", "def test_book_uniqueness(self):\n good_book = Book(key=\"bookkey/999999\",title=\"Romeo and Juliet\", author=\"Shakespeare\", description=\"Two star crossed romantics\", \n subjects=\"Romance, Fiction\")\n db.session.add(good_book)\n db.session.commit()\n\n invalid_book = Book(key=\"bookkey/999999\",title=\"Romeo and Juliet\", author=\"Shakespeare\", description=\"Two star crossed romantics\", \n subjects=\"Romance, Fiction\")\n #if we try to commit another book with the same key, it will raise error\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.add(invalid_book)\n db.session.commit()\n #with exception need to rollback commit\n db.session.rollback()", "def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)", "def test_creation_of_duplicate_service_in_store(self):\n create_store = self.client.post(create_store_url, data=json.dumps(self.shop_zero), headers=self.my_header)\n store_id = json.loads(create_store.data)\n store_id = json.loads(store_id['store_id'])\n store_id = store_id['$oid']\n response2 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n response3 = self.client.post(store_url + store_id + '/service/',\n data=json.dumps(self.service_zero),\n headers=self.my_header)\n self.assertEqual(response3.status, \"409 CONFLICT\")\n self.assertIn(\"Sorry. Live at the yard already exists in this store.\", str(response3.data))", "def test_teacher_check_homework_raises_homework_repeat_error_if_same_solution_was_already_submitted():\n with pytest.raises(HomeworkRepeatError):\n opp_teacher.check_homework(result_1)\n advanced_python_teacher.check_homework(result_1)\n Teacher.reset_results(oop_hw)", "def test_cook_set_free(cook_busy, product_for_cook):\n cook_busy.set_free(True)\n # if product needs to be cooked\n assert product_for_cook.get_need_cook_status() is True\n cook_busy.cook_dish(product_for_cook)\n assert product_for_cook.get_need_cook_status() is False", "def test_multiple_quants(self):\n # Get all quants in test package\n quants = self.quant_1 | self.quant_2\n pick = quants.create_picking(self.picking_type_pick)\n # Check picking has correct location\n self.assertEqual(pick.location_id, self.stock_location)\n # Check picking has correct products and quantities associated to it\n self.assertEqual(pick.move_lines.product_id, quants.product_id)\n self.assertEqual(pick.move_lines.mapped(\"product_qty\"), [10.0, 10.0])", "def test_product_not_available_by_stock(self):\n product = ProductFactory(stock_amount=2)\n\n for i in range(2):\n opr = OrderProductRelationFactory(product=product)\n order = opr.order\n order.paid = True\n order.save()\n\n self.assertEqual(product.left_in_stock, 0)\n self.assertFalse(product.is_stock_available)\n self.assertFalse(product.is_available())", "def test03_unique_stakeholdercategory(self):\n city1 = CaseStudyFactory(name='City1')\n city2 = CaseStudyFactory(name='City1')\n stakeholdercat1 = StakeholderCategoryFactory(\n casestudy=city1, name='Cat1')\n stakeholdercat2 = StakeholderCategoryFactory(\n casestudy=city1, name='Cat2')\n stakeholdercat3 = StakeholderCategoryFactory(\n casestudy=city2, name='Cat1')\n\n with self.assertRaisesMessage(\n ValidationError,\n 'StakeholderCategory Cat1 already exists in casestudy City1',\n ) as err:\n stakeholdercat3 = StakeholderCategoryFactory(\n casestudy=city2, name='Cat1')\n\n stakeholder_categories = city1.stakeholder_categories\n self.assertSetEqual(stakeholder_categories, {stakeholdercat1,\n stakeholdercat2})", "def test_single_quant_confirm(self):\n pick = self.quant_1.create_picking(self.picking_type_pick, confirm=True)\n # Check it is confirmed\n self.assertEqual(pick.state, \"confirmed\")", "def confirm_harvest_pickle(before, after):\n assert after.strategy.balanceOf > before.strategy.balanceOf\n assert after.strategy.pickleBalance == 0\n assert after.strategy.stakingRewards.stakedPickle == 0\n if before.sett.pricePerFullShare:\n assert after.sett.pricePerFullShare > before.sett.pricePerFullShare", "def _test_validate_c_tag_on_same_s_tag(self):\n s = Mock()\n s.id = 123\n s.c_tag = 111\n s.s_tag = 222\n s.onu_device = \"BRCM1234\"\n\n with self.assertRaises(Exception) as e:\n self.rcord_subscriber.save()\n\n self.assertEqual(e.exception.message, \"The c_tag you specified (111) has already been used by Subscriber with id 123 and the same s_tag: 222\")\n self.models_decl.RCORDSubscriber_decl.save.assert_not_called()", "def testduplicate(self):\n self.assertTrue(WeaponAbility('Guided').duplicate(\n WeaponAbility('Guided')))\n self.assertFalse(WeaponAbility('Guided').duplicate(\n WeaponAbility('Changling')))\n self.assertTrue(WeaponAbility('Animated', range=1).duplicate(\n WeaponAbility('Animated', range=3)))\n self.assertTrue(WeaponAbility('Defender', size=1).duplicate(\n WeaponAbility('Defender', size=3)))\n fire = MentalAbility('Fireball')\n ice = MentalAbility('Iceball')\n self.assertTrue(WeaponAbility('Enhanced', abilities=[ice,]).duplicate(\n WeaponAbility('Enhanced', abilities=[fire,])))", "def test_warning_on_duplicate_category(self):\n self.client.login(username='hodor', password='hodor')\n Perms.objects.create(user=self.user, access_level=2).save()\n response = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertRedirects(response, '/categories/')\n response2 = self.client.post('/categories/add/', {'categoryType': 'tr0npr0n'})\n self.assertContains(response2, \"already exists\")", "def test_reproducible(self):\n model_1 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_1.train(epochs=2)\n\n model_2 = PoincareModel(self.data_large, seed=1, negative=3, burn_in=1)\n model_2.train(epochs=2)\n self.assertTrue(np.allclose(model_1.kv.syn0, model_2.kv.syn0))", "def test_raise_duplicate_candidate_error(self):\n\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Per\")\n candidate3 = pyrankvote.Candidate(\"Aase\")\n\n def tester(_):\n pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2, candidate3])\n\n msg = \"Candidate 1 and 2 is equal and should raise duplicate candidate error\"\n self.assertRaises(pyrankvote.models.DuplicateCandidatesError, tester, msg)\n\n # TEST THE OPPOSITE\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Maria\")\n candidate3 = pyrankvote.Candidate(\"Aase\")\n\n # This should NOT raise an error\n pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2, candidate3])", "def test_hand_has_two_pair(hand, card_list, expected):\n hand.add_cards(card_list)\n assert hand.has_two_pair() == expected", "def testPassedAlready(self):\n _pass_move = self._pass_move()\n bid_move = self._move()\n context = self._context()\n bfpc = BiddingForPrivateCompany()\n\n self.assertTrue(bfpc.run(_pass_move, context), bfpc.errors())\n self.assertEqual(_pass_move.move_type, BidType.PASS)\n self.assertEqual(len(context.private_companies[1].passed_by), 1)\n\n self.assertFalse(bfpc.run(bid_move, context), bfpc.errors())\n self.assertIn(\"You can only keep bidding until you've passed once.\", bfpc.errors())", "def test_duplicate_cards(hand, card_list):\n with pytest.raises(AssertionError):\n hand.add_cards(card_list)", "def test_no_support_different_sectors(self):\n battle = self.battle\n self.carol.sector = 7\n\n s1 = battle.create_skirmish(self.alice, 2)\n prev_skirmishes = self.sess.query(db.SkirmishAction).count()\n with self.assertRaises(db.WrongSectorException):\n s1.react(self.carol, 2, hinder=False)\n now_skirmishes = self.sess.query(db.SkirmishAction).count()\n self.assertEqual(prev_skirmishes, now_skirmishes)", "def test_lots_of_coins_given(self):\n item, change, _ = give_item_and_change('apple', '1.00 0.5 0.2 0.1 0.1 0.05 0.02 0.02 0.01')\n self.assertEqual(item, 'apple')\n self.assertEqual(change, [1.0, 0.5, 0.05, 0.02])", "def test_cannot_sale_out_of_stock_product(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":20\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'NY_denims is out of stock!')\n self.assertEqual(resp.status_code, 404)", "def test_two_and_three_card_petitions(self):\n f = gtrutils.check_petition_combos\n\n self.assertTrue( f( 0, 0, [], True, True))\n\n self.assertFalse( f( 1, 0, [], True, True))\n self.assertFalse( f( 1, 0, [1], True, True))\n self.assertTrue( f( 1, 0, [2], True, True))\n self.assertTrue( f( 1, 0, [3], True, True))\n self.assertFalse( f( 1, 0, [4], True, True))\n self.assertTrue( f( 1, 1, [], True, True))\n self.assertTrue( f( 1, 2, [], True, True))\n self.assertTrue( f( 1, 3, [], True, True))\n self.assertFalse( f( 1, 4, [], True, True))\n\n self.assertFalse( f( 1, 1, [2], True, True))\n self.assertFalse( f( 1, 1, [3], True, True))\n self.assertFalse( f( 1, 2, [2], True, True))\n self.assertFalse( f( 1, 3, [2], True, True))\n self.assertFalse( f( 1, 3, [3], True, True))\n\n self.assertTrue( f( 2, 1, [2], True, True))\n self.assertTrue( f( 2, 1, [3], True, True))\n self.assertTrue( f( 2, 0, [4], True, True))\n self.assertTrue( f( 2, 0, [5], True, True))\n self.assertTrue( f( 2, 0, [6], True, True))\n self.assertTrue( f( 2, 4, [], True, True))\n self.assertTrue( f( 2, 5, [], True, True))\n self.assertTrue( f( 2, 6, [], True, True))\n \n self.assertTrue( f(13, 26, [], True, True))\n self.assertTrue( f(13, 39, [], True, True))\n self.assertTrue( f(13, 0, [26], True, True))\n self.assertTrue( f(13, 14, [12], True, True))\n self.assertTrue( f(13, 13, [10], True, True))\n self.assertTrue( f(13, 15, [11], True, True))\n self.assertFalse( f(13, 40, [], True, True))\n self.assertFalse( f(13, 11, [3], True, True))\n\n self.assertFalse( f(4, 1, [2,3,6], True, True))\n self.assertTrue( f(5, 1, [2,3,6], True, True))\n self.assertTrue( f(6, 1, [2,3,6], True, True))\n self.assertFalse( f(7, 1, [2,3,6], True, True))", "def testEquality(self):\n pass", "def test_meal_name_already_exists(self):\n\n with self.client:\n self.add_meal(\"fries\", 10000)\n response = self.add_meal(\"fries\", 10000)\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'), \"Meal name already exists\")\n self.assertEqual(response.status_code, 409)", "def test_create_dup(self):\n obj = self.provision_single_asset()\n p = self.post('widget', 409, params={'name': u'Testing'})\n assert 'duplicate value already' in p['message']", "def test_check_duplication_entry_at_restoring_two_chain(self):\n ref_entity = Entity.objects.create(name=\"ReferredEntity\", created_user=self._user)\n ref_entries = [\n Entry.objects.create(name=\"ref-%d\" % i, created_user=self._user, schema=ref_entity)\n for i in range(3)\n ]\n ref_entity_2 = Entity.objects.create(name=\"ReferredEntity2\", created_user=self._user)\n ref_entries_2 = [\n Entry.objects.create(name=\"ref2-%d\" % i, created_user=self._user, schema=ref_entity_2)\n for i in range(3)\n ]\n\n # initialize EntityAttrs\n attr_info = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries[1:],\n },\n }\n attr_info_2 = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries_2[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries_2[1:],\n },\n }\n for attr_name, info in attr_info.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=self._entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity)\n\n self._entity.attrs.add(attr)\n\n for attr_name, info in attr_info_2.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=ref_entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity_2)\n\n ref_entity.attrs.add(attr)\n\n # initialize target entry\n entry = Entry.objects.create(name=\"entry\", schema=self._entity, created_user=self._user)\n entry.complement_attrs(self._user)\n\n for attr_name, info in attr_info.items():\n attr = entry.attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n ref_entries[0].complement_attrs(self._user)\n for attr_name, info in attr_info_2.items():\n attr = ref_entries[0].attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n # delete target entry at first\n entry.delete()\n # sync referral entries from database\n [x.refresh_from_db() for x in ref_entries]\n [x.refresh_from_db() for x in ref_entries_2]\n\n self.assertFalse(ref_entries_2[1].is_active)\n\n # create same name entry\n Entry.objects.create(name=\"ref2-1\", created_user=self._user, schema=ref_entity_2)\n\n # check duplicate entry\n ret = entry.check_duplication_entry_at_restoring(entry_chain=[])\n self.assertTrue(ret)", "def test_consecutive_queries_yield_different_individual_items(test_store):\n queried = next(test_store.get_by(name=\"Andy\"))\n other = next(test_store.get_by(name=\"Andy\"))\n\n assert queried is not other\n assert queried == other", "def check_inputs(self, item_data):\n if not item_data[0] in self.data['pizza']:\n print('Error: ' + item_data[0] + ' pizza does not exist.')\n return False\n\n if not item_data[1] in self.data['pizza'][item_data[0]]:\n print('Error: ' + item_data[1] + ' size does not exist for '\n + item_data[0] + ' pizza.')\n return False\n\n for topping in item_data[2]:\n if not topping in self.data['topping']:\n print('Error: Pizza topping ' + topping + ' does not exist.')\n return False\n return True", "def test_multi_same(nothing_list):\n result = multi_same_list(nothing_list)\n assert result[1][2] == 0\n assert result[0][2] == 0", "def test_present_in_both_db(self):\n for i in range(5):\n price = find_cheapest_price(\"Star Wars: Episode VI - Return of the Jedi\")\n if price is \"69.5\":\n break\n time.sleep(1)\n self.assertTrue(price == \"69.5\")", "def test_shoppingcart_must_not_update_if_closed(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the closed shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n data[\"is_closed\"] = True\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then check for fail in update shoppingcart\n self.url = reverse(\"update-shoppingcart\")\n data[\"id\"] = id_cart\n response = self.client.post(self.url, data, **self.auth_headers)\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_give_correct_change(self):\n item, change, _ = give_item_and_change('coke', 1)\n self.assertEqual(item, 'coke')\n self.assertEqual(change, [.20, .05, .02])", "def test_add_duplicate_book(self):\n create_admin()\n response = self.client().post('/api/v1/login', json=self.test_admin)\n json_data = json.loads(response.data)\n access_token = json_data.get('access_token')\n self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n response = self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"Book already exists\")\n self.assertEqual(response.status_code, 409)", "def _check_is_client_duped(client, client_id):\n try:\n other_client = CLIENTS[client_id]\n except KeyError:\n return\n \n if other_client is not client:\n raise RuntimeError(\n f'Creating the same client multiple times is not allowed; {client!r} already exists:, {other_client!r}.'\n )", "def test_eq_false_id(self):\n other = PrepSample('1.SKD8.640184', self.prep_template)\n self.assertFalse(self.tester == other)", "def test_extra_chocolates_multiple(self):\n _inpc = ChocolateFeast(6,2,2)\n self.assertEquals(5,_inpc.get_total_chocolates())", "def expect_duplicate(self):\n # Reset everything for this record\n self._expect_duplicate = False\n self.__dupcntr = 0\n self.__maxdup = 0\n # Get the probability to generate duplicate for next record\n if self.fake.random.random() < self.duplicate_cfg[\"Prob_duplicate\"]:\n self._expect_duplicate = True\n self.__maxdup = self.random_select_ndups()\n else:\n self._expect_duplicate = False\n self.__maxdup = 0\n\n self.__logger.debug(\"expect_duplicate ndups: %d\", self.__maxdup)", "def test_double_corrupt(pid: int, otId: int) -> bool:\n box_mon = BoxMon()\n box_mon.personality = pid\n box_mon.otId = otId\n box_mon.sub(0).type0.species = 308\n box_mon.sub(0).type0.experience = 2195\n box_mon.sub(0).type0.friendship = 70\n sub1 = box_mon.sub(1).type1\n sub1.moves[0] = 33\n sub1.moves[1] = 253\n sub1.moves[2] = 185\n sub1.pp[0] = 35\n sub1.pp[1] = 10\n sub1.pp[2] = 20\n sub2 = box_mon.sub(2).type2\n sub2.attackEV = 22\n sub2.hpEV = 8\n sub3 = box_mon.sub(3).type3\n sub3.metLocation = 28\n sub3.metLevel = 14\n sub3.metGame = 3\n sub3.pokeBall = 2\n sub3.otGender = 1\n sub3.unk = 977594907\n box_mon.checksum = box_mon.calc_checksum()\n sum1 = box_mon.checksum\n box_mon.encrypt()\n box_mon.personality |= 0x40000000\n box_mon.decrypt()\n sum2 = box_mon.calc_checksum()\n box_mon.encrypt()\n box_mon.otId |= 0x40000000\n box_mon.decrypt()\n sum3 = box_mon.calc_checksum()\n if sum1 == sum2 == sum3 and box_mon.sub(3).type3.isEgg == 0:\n box_mon.encrypt()\n return True\n return False", "def testduplicate(self):\n self.assertTrue(AmuletAbility('Control Dragon').duplicate(\n AmuletAbility('Control Dragon')))\n self.assertFalse(AmuletAbility('Control Dragon').duplicate(\n AmuletAbility('Control NPC')))\n self.assertTrue(AmuletAbility('Proof', element='Fire').duplicate(\n AmuletAbility('Proof', element='Fire')))\n self.assertFalse(AmuletAbility('Proof', element='Fire').duplicate(\n AmuletAbility('Proof', element='Water')))\n self.assertTrue(AmuletAbility('Attribute', attr='ST').duplicate(\n AmuletAbility('Attribute', attr='ST')))\n self.assertFalse(AmuletAbility('Attribute', attr='ST').duplicate(\n AmuletAbility('Attribute', attr='DX')))\n self.assertTrue(AmuletAbility('Skepticism', size=3).duplicate(\n AmuletAbility('Skepticism', size=3)))\n self.assertTrue(AmuletAbility('Skepticism', size=3).duplicate(\n AmuletAbility('Skepticism', size=5)))", "def test_is_product_entangled_state():\n ent_vec = max_entangled(3)\n np.testing.assert_equal(is_product_vector(ent_vec), False)" ]
[ "0.6690166", "0.63332933", "0.62514263", "0.61649024", "0.6153124", "0.605767", "0.6029322", "0.60229874", "0.6018796", "0.6007936", "0.5988192", "0.5973974", "0.5963615", "0.5908742", "0.58811826", "0.58582234", "0.585461", "0.5827044", "0.5807381", "0.58039653", "0.579343", "0.57912743", "0.5782794", "0.5782669", "0.5776542", "0.57610244", "0.5741526", "0.57412314", "0.57355934", "0.573267", "0.572312", "0.57103074", "0.56956005", "0.56811833", "0.56780577", "0.5677357", "0.5664046", "0.5661502", "0.56525564", "0.56419647", "0.56336665", "0.5626593", "0.56168795", "0.56055987", "0.56040925", "0.56030595", "0.5586377", "0.5570261", "0.55653244", "0.5552833", "0.55506665", "0.5538205", "0.55348957", "0.5528606", "0.5527455", "0.55268836", "0.5520179", "0.5520179", "0.5520179", "0.55138606", "0.5513529", "0.55094135", "0.54932934", "0.5485915", "0.5478565", "0.5474596", "0.54691243", "0.54662955", "0.5457378", "0.5448217", "0.5446862", "0.54446375", "0.5440086", "0.5435308", "0.5433268", "0.54311883", "0.5429177", "0.5427966", "0.54270405", "0.54258883", "0.5420095", "0.541043", "0.539998", "0.53991693", "0.5393312", "0.5390631", "0.53897274", "0.5389379", "0.53865707", "0.53809655", "0.53763366", "0.5370619", "0.5363661", "0.5362907", "0.5355402", "0.5355362", "0.5352605", "0.5350437", "0.5349947", "0.53495175" ]
0.7491278
0
Test of cooking by busy cook Test passed if busy cook raise a CustomWarning
def test_busy_cook(cook_busy, product_for_cook): with pytest.raises(CustomWarning): assert cook_busy.cook_dish(product_for_cook)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cook_twice(cook_not_busy, product_for_cook):\n\n cook_not_busy.cook_dish(product_for_cook)\n with pytest.raises(ValueError):\n cook_not_busy.cook_dish(product_for_cook)", "def test_cook_set_free(cook_busy, product_for_cook):\n cook_busy.set_free(True)\n # if product needs to be cooked\n assert product_for_cook.get_need_cook_status() is True\n cook_busy.cook_dish(product_for_cook)\n assert product_for_cook.get_need_cook_status() is False", "def _warn_exit_early(self):\n ready_outputs = self.n_completed_tasks - self._nb_consumed\n is_completed = self._is_completed()\n msg = \"\"\n if ready_outputs:\n msg += (\n f\"{ready_outputs} tasks have been successfully executed \"\n \" but not used.\"\n )\n if not is_completed:\n msg += \" Additionally, \"\n\n if not is_completed:\n msg += (\n f\"{self.n_dispatched_tasks - self.n_completed_tasks} tasks \"\n \"which were still being processed by the workers have been \"\n \"cancelled.\"\n )\n\n if msg:\n msg += (\n \" You could benefit from adjusting the input task \"\n \"iterator to limit unnecessary computation time.\"\n )\n\n warnings.warn(msg)", "def life_critical():\n return True", "def test_breeding_failed_carn(self):\n nt.assert_equal(self.carn.breeding(1), None)", "def test_pm_Completeness(self):\n pass", "def test_cliches_write_good_basic(self):\n assert chk.check_cliches_write_good(\"\"\"No cliches here.\"\"\") == []\n # use one of the example cliches to verify basic functionality\n assert chk.check_cliches_write_good(self.l_write_good) != []\n assert \"cliches.write_good\" in chk.check_cliches_write_good(\n self.l_write_good)[0]", "def test_check_opt_crashed(self):\n self.assertEqual(check_opt(self.jobset2.job), 'ocrashed')", "def test_noFailure(self):\n for i in range(10):\n self.assertTrue(self.circuit_breaker.available())", "def test_life_critical():\n assert chap2.life_critical()", "def test_block_bad_state(self):\n pass", "def test_is_poor_style(self):\n self.assertEqual(1, uut.is_poor_style(\"test/with_leak.cc\"))\n self.assertEqual(0, uut.is_poor_style(\"test/without_leak.cc\"))", "def test_failure_does_not_set_work_presentation_ready(self):\n\n provider = NeverSuccessfulBibliographicCoverageProvider(\n self.pool.collection\n )\n result = provider.ensure_coverage(self.identifier)\n assert CoverageRecord.TRANSIENT_FAILURE == result.status\n assert False == self.work.presentation_ready", "def test_not_ready_if_insufficient_working_space(self):\n self.command.package = self.input_ovf\n\n self.command.ui.default_confirm_response = False\n with mock.patch.object(self.command,\n 'working_dir_disk_space_required',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertFalse(ready)\n self.assertRegex(reason, \"Insufficient disk space available for\"\n \" temporary file storage\")\n\n # User can opt to continue anyway\n self.command.ui.default_confirm_response = True\n self.command._cached_disk_requirements.clear()\n with mock.patch.object(self.command,\n 'working_dir_disk_space_required',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertTrue(ready)", "def delay_checks(self):\n return False", "def testcheatFalse(self):\n import Cheat\n res = Cheat.cheatclass.cheatF(self)\n exp = Cheat.cheatclass.cheatingR(self)\n\n self.assertFalse(res, exp)", "def test_that_test_can_fail():\n try:\n verify_atomic_weight_for_substance(\"O2\", 1.0)\n except AssertionError as e:\n return\n\n raise AssertionError(\"test_that_test_can_fail() didn't fail\")", "def testDryRun(self):\n\n\t\tself.testTooLong(dry_run=True)", "def test_dont_cancel_bookings_in_cancellation_period_if_warning_not_sent(self, mock_tz):\n mock_tz.now.return_value = datetime(2015, 2, 11, 12, 0, tzinfo=dt_timezone.utc)\n\n # self.ticketed_event payment due date 2015/2/11 23:59\n\n unpaid_no_warning = baker.make(\n TicketBooking,\n ticketed_event=self.ticketed_event,\n paid=False,\n date_booked=datetime(2015, 2, 10, 5, 30, tzinfo=dt_timezone.utc),\n warning_sent=False\n )\n unpaid_warning_within_2_hrs = baker.make(\n TicketBooking,\n ticketed_event=self.ticketed_event,\n paid=False,\n date_booked=datetime(2015, 2, 10, 5, 30, tzinfo=dt_timezone.utc),\n warning_sent=True,\n date_warning_sent=datetime(2015, 2, 11, 10, 30, tzinfo=dt_timezone.utc),\n )\n unpaid_warning_more_than_2_hrs_ago = baker.make(\n TicketBooking,\n ticketed_event=self.ticketed_event,\n paid=False,\n date_booked=datetime(2015, 2, 10, 5, 30, tzinfo=dt_timezone.utc),\n warning_sent=True,\n date_warning_sent=datetime(2015, 2, 11, 9, 30, tzinfo=dt_timezone.utc),\n )\n\n self.assertFalse(unpaid_no_warning.cancelled)\n self.assertFalse(unpaid_warning_within_2_hrs.cancelled)\n self.assertFalse(unpaid_warning_more_than_2_hrs_ago.cancelled)\n\n management.call_command('cancel_unpaid_ticket_bookings')\n unpaid_no_warning.refresh_from_db()\n unpaid_warning_within_2_hrs.refresh_from_db()\n unpaid_warning_more_than_2_hrs_ago.refresh_from_db()\n self.assertFalse(unpaid_no_warning.cancelled)\n self.assertFalse(unpaid_warning_within_2_hrs.cancelled)\n self.assertTrue(unpaid_warning_more_than_2_hrs_ago.cancelled)", "def test_case_01(self):\n if True:\n self.fail()", "def this_needs_work_test_ensure_our_presence(self):\n self.do_test_ensure_our_presence()", "def issue_locked_warning() -> None:\n print(\"\\n[red]Warning:[/red] Your bank account has been completely \"\n \"locked out for exceeding 2 or more categories!\")", "def test_email_warnings(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 10, 10, 0, tzinfo=dt_timezone.utc\n )\n\n # cancellation period starts 2015/2/14 17:00\n # payment_due_date 2015/2/11 23:59\n event = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 14, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n payment_due_date=datetime(2015, 2, 11, tzinfo=dt_timezone.utc),\n cancellation_period=1)\n # cancellation period starts 2015/2/14 17:00\n # payment_due_date 2015/2/12 23:59\n event1 = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 14, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n payment_due_date=datetime(2015, 2, 12, tzinfo=dt_timezone.utc),\n cancellation_period=1)\n # no cost, no warnings sent\n event2 = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 14, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=0,\n cancellation_period=1)\n baker.make_recipe(\n 'booking.booking', event=event, paid=False,\n payment_confirmed=False,\n date_booked=datetime(2015, 2, 9, 19, 30, tzinfo=dt_timezone.utc),\n _quantity=5,\n )\n baker.make_recipe(\n 'booking.booking', event=event1, paid=False,\n payment_confirmed=False,\n date_booked=datetime(2015, 2, 9, 19, 30, tzinfo=dt_timezone.utc),\n _quantity=5,\n )\n baker.make_recipe(\n 'booking.booking', event=event2, paid=False,\n payment_confirmed=False,\n date_booked=datetime(2015, 2, 9, 21, 00, tzinfo=dt_timezone.utc),\n _quantity=5,\n )\n _add_user_email_addresses(Booking)\n management.call_command('email_warnings')\n self.assertEqual(len(mail.outbox), 10)", "def test_wip(self):\n self.assertTrue(not return_true())", "def test_warning(self):\n self.p.compute_termination_criteria = True\n self.set_parameter_and_step(\"max_iter\", True, 5, \"ignore\")", "def noCheck():\n dislin.nochek()", "def test_block_bad_consensus(self):\n pass", "def test_require_now_silently_succeeds_for_available_tests(self, test_generator):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n feature.require_now(\"no message\")\n check.assert_called_once()", "def test_out_of_date(self):\n self.assertTrue(update_available(0.0))", "def confirm_harvest_pickle(before, after):\n assert after.strategy.balanceOf > before.strategy.balanceOf\n assert after.strategy.pickleBalance == 0\n assert after.strategy.stakingRewards.stakedPickle == 0\n if before.sett.pricePerFullShare:\n assert after.sett.pricePerFullShare > before.sett.pricePerFullShare", "def test_checkFlags(self):\n self.failUnlessEqual(self.nice.opts['aflag'], 1)\n self.failUnlessEqual(self.nice.opts['flout'], 0)", "def test_readiness(self):\n self.command.package = self.input_ovf\n ready, reason = self.command.ready_to_run()\n self.assertFalse(ready)\n self.assertRegex(reason, \"No file information\")\n self.assertRaises(InvalidInputError, self.command.run)\n\n self.command.file_path = \"input.vmdk\"\n ready, reason = self.command.ready_to_run()\n self.assertTrue(ready)\n\n self.command.file_path = None\n self.command.file_id = \"file1\"\n ready, reason = self.command.ready_to_run()\n self.assertTrue(ready)", "def warn():\n pass", "def test_check_buoy2():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy(buoy_fail_2)\n assert str(err_info.value) == 'Input file requires character input'", "def test_for_unknown_chemical():\n try:\n verify_atomic_weight_for_substance(\",.!\", 1.0)\n except periodic_table.InvalidFormula as e:\n return\n\n raise AssertionError(\"test_for_unknown_chemical() didn't fail\")", "def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")", "def test_dont_cancel_rebookings_within_cancellation_period_without_warning_sent(self, mock_tz):\n mock_tz.now.return_value = datetime(2015, 2, 10, 18, 0, tzinfo=dt_timezone.utc)\n # cancel booking to reset warning flags\n self.unpaid.status = \"CANCELLED\"\n self.unpaid.save()\n # rebook\n self.unpaid.status = \"OPEN\"\n self.unpaid.date_rebooked = datetime(2015, 2, 10, 12, 30, tzinfo=dt_timezone.utc)\n self.unpaid.save()\n\n self.assertEqual(self.unpaid.status, 'OPEN')\n self.assertFalse(self.unpaid.warning_sent)\n self.assertIsNone(self.unpaid.date_warning_sent)\n management.call_command('cancel_unpaid_bookings')\n self.unpaid.refresh_from_db()\n # still open\n self.assertEqual(self.unpaid.status, 'OPEN')\n\n # set the warning sent flag to < 2hrs ago\n self.unpaid.warning_sent = True\n self.unpaid.date_warning_sent = datetime(2015, 2, 10, 17, 0, tzinfo=dt_timezone.utc)\n self.unpaid.save()\n management.call_command('cancel_unpaid_bookings')\n self.unpaid.refresh_from_db()\n # still open\n self.assertEqual(self.unpaid.status, 'OPEN')\n\n # set the warning sent flag to > 2hrs ago\n self.unpaid.warning_sent = True\n self.unpaid.date_warning_sent = datetime(2015, 2, 10, 15, 0, tzinfo=dt_timezone.utc)\n self.unpaid.save()\n management.call_command('cancel_unpaid_bookings')\n self.unpaid.refresh_from_db()\n # now cancelled\n self.assertEqual(self.unpaid.status, 'CANCELLED')", "def test_unpaid_penalty_prevents_borrow(self):\n ten_days_ago = timezone.now() - timezone.timedelta(days=10)\n Borrow.objects.create(\n book_id=1,\n student=self.students[0],\n requested_at=ten_days_ago,\n borrowed_at=ten_days_ago,\n duration=6,\n )\n client1 = APIClient()\n client1.login(username=self.manager.username, password=\"salam*123\")\n client1.post(\"/borrows/1/terminate/\")\n client2 = APIClient()\n client2.login(username=self.students[0].username, password=\"salam*123\")\n response = client2.post(\"/borrows/\", data={\"book\": 5})\n self.assertEqual(response.status_code, 400)", "def test_handle__no_confirm(self):\n err = StringIO()\n call_command(\"load_county_limits\", stderr=err)\n self.assertNotIn(\"Successfully loaded data from\", err.getvalue())", "def test_service_initiated():\n assert \"ready\" in bkt_outcome_unwind.index()", "def busy(self, flag, message=\"\"): \n return None", "def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n budget = self.budget_manager.get_budget(transaction.budget_category)\n exceeded_ratio = budget.exceeded_ratio\n if exceeded_ratio > 1:\n self._notify_exceeded_budget(budget)\n self.print_transactions_for_review(budget)\n elif exceeded_ratio > 0.9:\n self._warn_nearing_exceed_budget(budget, 90)\n self.print_transactions_for_review(budget)", "def test_xfail_with_run_false_and_with_reason():\n pass", "def check_requirements(self): # pylint: disable=no-self-use\n self.is_skipped = False", "def has_warnings_active(self) -> bool:", "def can_dry_run(self):\r\n return False", "def test_long_run_case_that_we_want_to_skip():\n time.sleep(30)\n assert 0", "def is_code_good(safe_from_bugs, ready_for_change, easy_to_understand):\n pass # your code here!", "def check():", "def test_check_freq_crashed(self):\n self.assertEqual(check_freq(self.jobset2), 'fcrashed')", "def test_large_state_warning(self, monkeypatch):\n circuit = hadamard_circuit(17, shots=2)\n bits, recipes = circuit()\n shadow = ClassicalShadow(bits, recipes)\n\n msg = \"Querying density matrices for n_wires > 16 is not recommended, operation will take a long time\"\n\n with monkeypatch.context() as m:\n # don't run the actual state computation since we only want the warning\n m.setattr(np, \"einsum\", lambda *args, **kwargs: None)\n m.setattr(np, \"reshape\", lambda *args, **kwargs: None)\n\n with pytest.warns(UserWarning, match=msg):\n shadow.global_snapshots()", "async def test_warn_upgrade_new_install(config: Config, time: Time):\n cache = DataCache(config, time)\n assert not cache.notifyForIgnoreUpgrades\n assert cache._config.get(Setting.IGNORE_UPGRADE_BACKUPS)", "def testInsufficientCash(self):\n\n bid_move = self._move()\n context = self._context()\n context.players[0].cash = 200\n bfpc = BiddingForPrivateCompany()\n\n self.assertFalse(bfpc.run(bid_move, context), bfpc.errors())", "async def test_sensor_backup_reserve_unavailable(hass: HomeAssistant) -> None:\n\n mock_powerwall = await _mock_powerwall_with_fixtures(hass)\n mock_powerwall.get_backup_reserve_percentage = Mock(\n side_effect=MissingAttributeError(Mock(), \"backup_reserve_percent\", \"operation\")\n )\n\n config_entry = MockConfigEntry(domain=DOMAIN, data={CONF_IP_ADDRESS: \"1.2.3.4\"})\n config_entry.add_to_hass(hass)\n with patch(\n \"homeassistant.components.powerwall.config_flow.Powerwall\",\n return_value=mock_powerwall,\n ), patch(\n \"homeassistant.components.powerwall.Powerwall\", return_value=mock_powerwall\n ):\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.powerwall_backup_reserve\")\n assert state is None", "def unstable_test(reason):\n def decor(f):\n @functools.wraps(f)\n def inner(self, *args, **kwargs):\n try:\n return f(self, *args, **kwargs)\n except Exception as e:\n msg = (\"%s was marked as unstable because of %s, \"\n \"failure was: %s\") % (self.id(), reason, e)\n raise self.skipTest(msg)\n return inner\n return decor", "def test_cancellations(self):\n self.assertEqual(self.meter * self.imeter, 1)\n self.assertEqual(self.second * self.isecond, 1)\n self.assertEqual(self.kgram * self.ikgram, 1)", "def test_dont_cancel_bookings_within_cancellation_period_without_warning_sent(self, mock_tz):\n mock_tz.now.return_value = datetime(2015, 2, 10, 18, 0, tzinfo=dt_timezone.utc)\n # reset warning flags\n self.unpaid.warning_sent = False\n self.unpaid.date_warning_sent = None\n self.unpaid.save()\n\n self.assertEqual(self.unpaid.status, 'OPEN')\n self.assertFalse(self.unpaid.warning_sent)\n self.assertIsNone(self.unpaid.date_warning_sent)\n management.call_command('cancel_unpaid_bookings')\n self.unpaid.refresh_from_db()\n # still open\n self.assertEqual(self.unpaid.status, 'OPEN')\n\n # set the warning sent flag to < 2hrs ago\n self.unpaid.warning_sent = True\n self.unpaid.date_warning_sent = datetime(2015, 2, 10, 17, 0, tzinfo=dt_timezone.utc)\n self.unpaid.save()\n management.call_command('cancel_unpaid_bookings')\n self.unpaid.refresh_from_db()\n # still open\n self.assertEqual(self.unpaid.status, 'OPEN')\n\n # set the warning sent flag to > 2hrs ago\n self.unpaid.warning_sent = True\n self.unpaid.date_warning_sent = datetime(2015, 2, 10, 15, 0, tzinfo=dt_timezone.utc)\n self.unpaid.save()\n management.call_command('cancel_unpaid_bookings')\n self.unpaid.refresh_from_db()\n # now cancelled\n self.assertEqual(self.unpaid.status, 'CANCELLED')", "def test_change_provisioned_throughput_usual_case():", "def test_fleur_relax_continue_converged(self, run_with_cache, mock_code_factory):\n assert False", "def has_warnings(self) -> bool:", "def test_too_far_scenario():\n start_too_far_scenario(config.MERAKI_CAMERAS[0][\"serial\"])\n time.sleep(WARN_EVENT_THRESHOLD)\n start_too_far_scenario(config.MERAKI_CAMERAS[0][\"serial\"])\n return \"ok\"", "def test_email_warnings_only_sent_for_open_bookings(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 10, 10, tzinfo=dt_timezone.utc\n )\n event = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 13, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n payment_due_date=datetime(2015, 2, 11, tzinfo=dt_timezone.utc),\n cancellation_period=1)\n baker.make_recipe(\n 'booking.booking', event=event, paid=False,\n payment_confirmed=False, status='OPEN',\n date_booked=datetime(2015, 2, 9, 19, 30, tzinfo=dt_timezone.utc),\n _quantity=3,\n )\n baker.make_recipe(\n 'booking.booking', event=event, paid=False,\n payment_confirmed=False, status='CANCELLED',\n date_booked=datetime(2015, 2, 9, 19, 30, tzinfo=dt_timezone.utc),\n _quantity=3,\n )\n _add_user_email_addresses(Booking)\n \n management.call_command('email_warnings')\n self.assertEqual(len(mail.outbox), 3)\n for booking in Booking.objects.filter(status='OPEN'):\n self.assertTrue(booking.warning_sent)\n for booking in Booking.objects.filter(status='CANCELLED'):\n self.assertFalse(booking.warning_sent)", "def test_version_check_outdated(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_outdated\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_outdated\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertIn(\"Already at latest version\", output)\n self.assertNotIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def test_can_not_book_running_block(self):\n date = datetime.now().replace(minute=0, second=0, microsecond=0)\n\n response = self.client.post(\n reverse('bookings', kwargs={'facility': 'g'}), {'book': str(date.timestamp())})\n\n context = response.context\n bookings = context[\"bookings\"]\n\n self.assertEqual(response.status_code, 403)\n self.assertEqual(context[\"quota\"], settings.BOOKINGS_QUOTA)", "def testHealthAssessWorkMissed(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"work_missed\")\n\n self.util.intPropertyTest(self, attr, \"work_missed\")", "def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n budget = self.budget_manager.get_budget(transaction.budget_category)\n exceeded_ratio = budget.exceeded_ratio\n if exceeded_ratio > 1:\n self._notify_exceeded_budget(budget)\n self._lock_budget(budget)\n self.print_transactions_for_review(budget)\n if self.budget_manager.no_locked_budgets >= 2:\n self._locked = True\n print('YOUR BANK ACCOUNT HAS BEEN LOCKED!')\n elif exceeded_ratio > 0.5:\n self._warn_nearing_exceed_budget(budget, 50)\n self.print_transactions_for_review(budget)", "def test_condition_keep(self):\n self.write_contents(\n 'external/wpt/pass.html.ini', \"\"\"\\\n [pass.html]\n [subtest]\n expected:\n if (product == \"content_shell\") and (os == \"win\"): PASS\n FAIL\n \"\"\")\n self.update(\n {\n 'run_info': {\n 'product': 'content_shell',\n 'os': 'win'\n },\n 'results': [{\n 'test':\n '/pass.html',\n 'status':\n 'TIMEOUT',\n 'expected':\n 'OK',\n 'subtests': [{\n 'name': 'subtest',\n 'status': 'TIMEOUT',\n 'expected': 'PASS',\n }],\n }],\n }, {\n 'run_info': {\n 'product': 'content_shell',\n 'os': 'mac'\n },\n 'results': [],\n }, {\n 'run_info': {\n 'product': 'chrome',\n 'os': 'linux'\n },\n 'results': [],\n })\n # Without result replay, the `FAIL` expectation is erroneously deleted,\n # which will give either:\n # expected: TIMEOUT\n #\n # with a full update alone (i.e., `--overwrite-conditions=yes`), or\n # expected:\n # if os == \"win\": TIMEOUT\n #\n # without a full update (i.e., `--overwrite-conditions=no`).\n self.assert_contents(\n 'external/wpt/pass.html.ini', \"\"\"\\\n [pass.html]\n expected:\n if (product == \"content_shell\") and (os == \"win\"): TIMEOUT\n [subtest]\n expected:\n if (product == \"content_shell\") and (os == \"win\"): TIMEOUT\n FAIL\n \"\"\")", "def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n budget = self.budget_manager.get_budget(transaction.budget_category)\n exceeded_ratio = budget.exceeded_ratio\n if exceeded_ratio > 1.2:\n self._lock_budget(budget)\n self.print_transactions_for_review(budget)\n elif exceeded_ratio > 1:\n self._notify_exceeded_budget(budget)\n self.print_transactions_for_review(budget)\n elif exceeded_ratio > 0.75:\n self._warn_nearing_exceed_budget(budget, 75)\n self.print_transactions_for_review(budget)", "def test_may_certify(self):\r\n self.assertTrue(self.past_show_certs.may_certify())\r\n self.assertTrue(self.past_noshow_certs.may_certify())\r\n self.assertTrue(self.future_show_certs.may_certify())\r\n self.assertFalse(self.future_noshow_certs.may_certify())", "def test_not_ready_if_insufficient_output_space(self):\n self.command.package = self.input_ovf\n\n self.command.ui.default_confirm_response = False\n # Make working directory requirements negligible but output huge\n with mock.patch.object(self.command,\n \"working_dir_disk_space_required\",\n return_value=0), \\\n mock.patch.object(self.command.vm,\n 'predicted_output_size',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertFalse(ready)\n self.assertRegex(reason, \"Insufficient disk space available\"\n \" to guarantee successful output\")\n\n # User can opt to continue anyway\n self.command.ui.default_confirm_response = True\n self.command._cached_disk_requirements.clear()\n with mock.patch.object(self.command,\n \"working_dir_disk_space_required\",\n return_value=0), \\\n mock.patch.object(self.command.vm,\n 'predicted_output_size',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertTrue(ready)", "def is_yummy(self):\n return False", "def _nonforce_drop(self) -> bool:\n if self.closed:\n return True\n if self.zero_failures():\n return False\n return random.random() < self.failurerate", "def _set_insufficient_confidence_warning(\n self): # pragma: no cover\n self.failed_initial_confidence = True\n self.surface_result('LO_INIT_CONF')\n self.warnings.append(\n 'Bisect failed to reproduce the regression with enough confidence.')", "def test_negative_is_active_of_homework():\n assert not expired_hw.is_active()", "def test_email_warnings_not_sent_within_2_hrs_of_booking(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 9, 21, 45, tzinfo=dt_timezone.utc\n )\n event = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 13, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n payment_due_date=datetime(2015, 2, 11, tzinfo=dt_timezone.utc),\n cancellation_period=1)\n booking1 = baker.make_recipe(\n 'booking.booking', event=event, paid=False,\n payment_confirmed=False, status='OPEN',\n date_booked=datetime(2015, 2, 9, 19, 30, tzinfo=dt_timezone.utc)\n )\n booking2 = baker.make_recipe(\n 'booking.booking', event=event, paid=False,\n payment_confirmed=False, status='OPEN',\n date_booked=datetime(2015, 2, 9, 20, 30, tzinfo=dt_timezone.utc)\n )\n _add_user_email_addresses(Booking)\n management.call_command('email_warnings')\n self.assertEqual(len(mail.outbox), 1)\n booking1.refresh_from_db()\n booking2.refresh_from_db()\n self.assertTrue(booking1.warning_sent)\n self.assertFalse(booking2.warning_sent)", "def skipTest(self, text, *args, **kw):\n reason = compact(text, *args, **kw)\n try:\n super(RsyncSystemBackupsTestCase, self).skipTest(reason)\n except AttributeError:\n # unittest.TestCase.skipTest() isn't available in Python 2.6.\n logger.warning(\"%s\", reason)", "def test_available(self):\n feature_guard = _make_requires(True, \"Error text\")\n results = []\n\n @feature_guard\n def inner():\n results.append(True)\n return True\n\n assert inner() is True\n assert [True] == results", "async def check(self):\n if await self.is_water_level_critical():\n _LOGGER.debug(\"Water level critical - pump should be off\")\n else:\n for run in self._runs:\n if run.run_now(self._now):\n _LOGGER.debug(\"Pool pump should be on now: %s\", run)\n await self._switch_pool_pump(STATE_ON)\n return\n # If we arrive here, the pool pump should be off.\n _LOGGER.debug(\"Pool pump should be off\")\n await self._switch_pool_pump(STATE_OFF)", "def test_check_cds_7(self):\n self.cds1.translation = Seq(\"MB\", IUPAC.protein)\n import_genome.check_cds(self.cds1, self.eval_flags)\n count = count_status(self.cds1, \"error\", \"warning\")\n self.assertEqual(count, 2)", "def testValidDescriptions(self):\n self.assertTrue(self.app._ignore_jobs(\"\"))\n self.assertTrue(self.app._ignore_jobs(\"This is valid\"))\n self.assertTrue(self.app._ignore_jobs(\"you can telecommute\"))", "def test_stress_scu(self):\n proc = subprocess.Popen(['ectool', 'stress'], stdout=subprocess.PIPE)\n time.sleep(5)\n proc.send_signal(subprocess.signal.SIGINT)\n stdout, _ = proc.communicate()\n self.assertIn(b'Total failures: 0\\n', stdout)", "def check_stability(self):", "def test_running_measure_failing_checks(processor, measure_with_tools):\n measure_with_tools.pre_hooks['dummy'].fail_check = True\n processor.start_measure(measure_with_tools)\n\n process_and_assert(getattr, (processor, 'active'))\n\n process_and_join_thread(processor._thread)\n assert measure_with_tools.status == 'FAILED'\n assert 'checks' in measure_with_tools.infos\n m = processor.plugin.workbench.get_manifest('test.measure')\n assert not m.find('runtime_dummy1').collected\n assert not m.find('runtime_dummy2').collected", "def test_ensure_not_ts_pass(self):\n self.assertEqual(ensure_not_ts(self.jobset1), 'completed')", "async def test_warn_upgrade_old_install_explicit_ignore_others(config: Config, time: Time, cleandir: str):\n with open(config.get(Setting.DATA_CACHE_FILE_PATH), \"w\") as f:\n data = {\n \"upgrades\": [\n {\n \"prev_version\": str(Version.default()),\n \"new_version\": \"0.108.1\",\n \"date\": time.now().isoformat()\n }\n ]\n }\n json.dump(data, f)\n config_path = join(cleandir, \"config.json\")\n with open(config_path, \"w\") as f:\n data = {\n Setting.IGNORE_OTHER_BACKUPS.value: True,\n Setting.DATA_CACHE_FILE_PATH.value: config.get(Setting.DATA_CACHE_FILE_PATH)\n }\n json.dump(data, f)\n cache = DataCache(Config.fromFile(config_path), time)\n assert not cache.notifyForIgnoreUpgrades", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check_alive(cw: CustomWidget) -> NoReturn:\r\n ...", "def test_check_cds_21(self):\n import_genome.check_cds(self.cds1, self.eval_flags,\n description_field = \"function\")\n count = count_status(self.cds1, \"error\", \"warning\")\n self.assertEqual(count, 1)", "def test_work_without_activity(human):\n with pytest.raises(AttributeError):\n human.work()", "def test_read_not_interested(self):\n try:\n self.reader.read(self.books[2], 0, 0)\n self.fail(\"Readed book not interested\")\n except AssertionError:\n pass", "def test_reject_proposal_demand(self):\n pass", "def violated(self) -> bool:\n ...", "def test_alerts_when_no_breath(app, events, data):\n time_intervals = 1 / DriverFactory.MOCK_SAMPLE_RATE_HZ\n num_of_samples = int(NO_BREATH_TIME / time_intervals)\n app.run_iterations(num_of_samples)\n assert alerts.AlertCodes.NO_BREATH in events.alerts_queue.active_alerts, \\\n f\"NO_BREATH missing from: {events.alerts_queue.active_alerts}\"", "def has_warn(self):\r\n return self._arm.has_warn", "def test_require_now_raises_for_unavailable_tests(self, test_generator):\n feature = test_generator()\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n with self.assertRaisesRegex(MissingOptionalLibraryError, \"sentinel message\"):\n feature.require_now(\"sentinel message\")\n check.assert_called_once()", "def test_watch_bad_argument(self):\n self.assertFalse(\n reloading.refresh(datetime, force=True),\n Message('Should not reload not a module')\n )", "def busy(self):\n pass" ]
[ "0.65008634", "0.625711", "0.620796", "0.58594835", "0.5785558", "0.5728688", "0.57037574", "0.57026243", "0.5623727", "0.56204015", "0.5618192", "0.56137985", "0.5599557", "0.5592458", "0.5572111", "0.55385923", "0.5527014", "0.551451", "0.551242", "0.55018896", "0.5490242", "0.5488431", "0.54782844", "0.5456323", "0.5447276", "0.54462975", "0.5437933", "0.54363036", "0.5432498", "0.54274404", "0.54263306", "0.54261446", "0.54255146", "0.54235524", "0.5415568", "0.54017967", "0.54014874", "0.5385889", "0.5385483", "0.5385363", "0.5384663", "0.53767455", "0.53759366", "0.5371214", "0.53670466", "0.5347165", "0.53419507", "0.5337768", "0.5333578", "0.533356", "0.5330822", "0.53202355", "0.5313521", "0.53125507", "0.5306952", "0.53030336", "0.52852666", "0.52806836", "0.52804023", "0.5280232", "0.52673227", "0.52662677", "0.5265321", "0.5263729", "0.5262893", "0.5255934", "0.5247116", "0.52459955", "0.524557", "0.52454245", "0.5242472", "0.5238744", "0.5237985", "0.5234628", "0.52285504", "0.52264816", "0.52227145", "0.5219148", "0.5215732", "0.5211488", "0.5209838", "0.5205581", "0.5204385", "0.52014446", "0.5193539", "0.51925737", "0.51925737", "0.51925737", "0.51925737", "0.5191532", "0.5190667", "0.5183383", "0.51821846", "0.51815575", "0.5174227", "0.51721966", "0.51715183", "0.5162049", "0.51607203", "0.5153738" ]
0.7838216
0
Test of changing state of cook. Busy cook set to free and then tries to cook the dish. Cooking should be successful (product.get_need_cook_status should be False)
def test_cook_set_free(cook_busy, product_for_cook): cook_busy.set_free(True) # if product needs to be cooked assert product_for_cook.get_need_cook_status() is True cook_busy.cook_dish(product_for_cook) assert product_for_cook.get_need_cook_status() is False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cook_twice(cook_not_busy, product_for_cook):\n\n cook_not_busy.cook_dish(product_for_cook)\n with pytest.raises(ValueError):\n cook_not_busy.cook_dish(product_for_cook)", "def test_update_state1(self):\n pass", "def test_update_state(self):\n pass", "def test_update_state2(self):\n pass", "def testSettled(self):\n self.injectEvent(safe.Settling.SETTLED)\n self.assertCurrentState(safe.Grabbing)", "def test_busy_cook(cook_busy, product_for_cook):\n with pytest.raises(CustomWarning):\n assert cook_busy.cook_dish(product_for_cook)", "def test_update_state3(self):\n pass", "def Cook(self, env, customer, cooking_time_type = 'fixed', manual_cook_time = None):\n with self.resource.request() as req:\n yield req #resource를 점유 해야 함.\n now_time = round(env.now , 1)\n req.info = [customer.name, now_time]\n if cooking_time_type == 'fixed':\n cooking_time = self.order_ready_time\n elif cooking_time_type == 'random':\n cooking_time = random.randrange(1,self.order_ready_time)\n elif cooking_time_type == 'uncertainty':\n cooking_time = customer.cook_time\n else:\n cooking_time = 0.001\n print('T :{} 가게 {}, {} 분 후 주문 {} 조리 완료'.format(int(env.now),self.name,cooking_time,customer.name))\n if manual_cook_time == None:\n yield env.timeout(cooking_time)\n else:\n yield env.timeout(manual_cook_time)\n print('T :{} 가게 {} 주문 {} 완료'.format(int(env.now),self.name,customer.name))\n customer.food_ready = True\n customer.ready_time = env.now\n self.ready_order.append(customer)", "def updateState(self):\n\n if ('cutting' in self.step_ops) and (self.cut_state.user_cutting):\n self.step_ops['cutting'] = True\n \n if ('cooking' in self.step_ops) and (self.cut_state.user_cooking):\n self.step_ops['cooking'] = True\n\n # TODO: add the rest of the operations\n\n advance = True\n\n # Check if ALL operations are complete\n for op in self.step_ops:\n if self.step_ops[op] == False:\n advance = False\n break\n\n if advance:\n self.nextStep()", "def test_update_state4(self):\n pass", "def change_availability():\n artwork_sold = get_artwork_name()\n if not controls_utils.artwork_exists(artwork_sold):\n print('No record of that piece of art. ')\n else:\n artist = controls_utils.name_of_artist(artwork_sold)\n if not controls_utils.artwork_available(artwork_sold, artist):\n print('Sorry that piece has already been sold. ')\n else:\n response = input('Mark ' + artwork_sold + ' as sold? Y or N ')\n if response.upper() == 'Y':\n mark_as_sold(artwork_sold)\n while not controls_utils.response_affirmative(response):\n response = input('Are you sure you want to mark '\n + artwork_sold + ' by ' + artist + ' as sold? Y or N or press X to escape ')\n if response.upper() == 'X':\n break\n elif response.upper() == 'N':\n break", "def test_charge_correct_for_fiction_after_close(self):\n rental = create_test_rental(\n book=self.book2,\n customer=self.user1,\n date_borrowed=\"2019-05-22 00:00:00.400952+00:00\",\n )\n close_rental_url = reverse(\"close_rental\", kwargs={\"pk\": rental.pk})\n\n data = {\"date_returned\": \"2019-05-25 13:46:57.249145+03:00\"}\n response = self.client.put(close_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.data[\"amount_charged\"], \"9.00\")\n self.assertEqual(response.data[\"rental_status\"], \"Closed\")\n self.assertEqual(response.data[\"currency\"], CURRENCY)", "def test_ensure_state_change_if_needed(self, setState, commit):\n advisory = errata.Advisory(errata_id=123, errata_state='QE')\n advisory.ensure_state('NEW_FILES')\n setState.assert_called()", "async def test_state_update(hass: HomeAssistant) -> None:\n await init_integration(hass)\n\n state = hass.states.get(\"sensor.home_cloud_ceiling\")\n assert state\n assert state.state != STATE_UNAVAILABLE\n assert state.state == \"3200.0\"\n\n future = utcnow() + timedelta(minutes=60)\n\n current_condition = load_json_object_fixture(\n \"accuweather/current_conditions_data.json\"\n )\n current_condition[\"Ceiling\"][\"Metric\"][\"Value\"] = 3300\n\n with patch(\n \"homeassistant.components.accuweather.AccuWeather.async_get_current_conditions\",\n return_value=current_condition,\n ), patch(\n \"homeassistant.components.accuweather.AccuWeather.requests_remaining\",\n new_callable=PropertyMock,\n return_value=10,\n ):\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.home_cloud_ceiling\")\n assert state\n assert state.state != STATE_UNAVAILABLE\n assert state.state == \"3300\"", "def test_is_active(self):\n\n self.sold.health = 0\n self.assertFalse(self.sold.is_active)", "async def test_fan_read_state(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_fan_service)\n\n state = await helper.async_update(\n ServicesTypes.FAN, {CharacteristicsTypes.ON: False}\n )\n assert state.state == \"off\"\n\n state = await helper.async_update(\n ServicesTypes.FAN, {CharacteristicsTypes.ON: True}\n )\n assert state.state == \"on\"", "def test_updating_item_status(self):\n #test original quantity\n self.assertEqual(self.supply1.quantity, 10)\n self.assertEqual(self.supply2.quantity, 10)\n \n modified_po = copy.deepcopy(base_purchase_order)\n modified_po['status'] = 'Received'\n modified_po['items'][0]['id'] = 1\n modified_po['items'][0]['status'] = 'Receieved'\n \n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po)\n \n self.assertEqual(resp.status_code, 200, msg=resp)\n \n po = resp.data\n \n self.assertEqual(Supply.objects.get(pk=1).quantity, 20)", "def test_consume_status(self):\n # Arrange\n player = Character.objects.get(pk=1)\n target = Character.objects.get(pk=2)\n\n object_to_test = Combat(player=player,\n target=target,\n player_attack_type=\"disrupt\",\n target_attack_type=\"block\",\n player_enhanced=True)\n\n # Inflict a status effect\n _ = object_to_test.do_combat_round()\n\n check_status_before_apply = StatusEffects.objects.filter(character_id=target.pk)\n self.assertTrue(check_status_before_apply.exists())\n\n # Act\n # Check and apply the status effect\n _ = object_to_test.check_and_apply_status()\n\n check_status_after_apply = StatusEffects.objects.filter(character_id=target.pk)\n self.assertFalse(check_status_after_apply.exists())", "async def test_fanv2_read_state(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_fanv2_service)\n\n state = await helper.async_update(\n ServicesTypes.FAN_V2, {CharacteristicsTypes.ACTIVE: False}\n )\n assert state.state == \"off\"\n\n state = await helper.async_update(\n ServicesTypes.FAN_V2, {CharacteristicsTypes.ACTIVE: True}\n )\n assert state.state == \"on\"", "def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)", "def test(self, state):\n\n # manual dice should have been typed in by this point, if they don't\n # exist exit\n if state.dice == \"manual\" and (\n state.rolls is None or state.rolls == []):\n return state\n\n test_dict = {\"attr\": self._test_1dice,\n \"fight_talent\": self._test_1dice,\n \"advantage\": self._test_1dice,\n \"skill\": self._test_3dice,\n \"spell\": self._test_3dice,\n \"misc\": self._test_misc}\n\n state = test_dict[state.selection.category](state)\n\n return state", "def test_update_station_status(self):\n\t\t# Seed the db and initialize all counts to 0\n\t\tget_info.seed_station_information()\n\n\t\t# Save number of bikes/docks before update\n\t\tE40th_5thave = db.session.query(Station).filter(Station.id == 153).one()\n\t\tbikes_before = E40th_5thave.num_bikes_available\n\t\tdocks_before = E40th_5thave.num_docks_available\n\n\t\t# Update bike/dock numbers\n\t\tget_info.update_station_status()\n\n\t\tE40th_5thave = db.session.query(Station).filter(Station.id == 153).one()\n\t\tbikes_after = E40th_5thave.num_bikes_available\n\t\tdocks_after = E40th_5thave.num_docks_available\n\n\t\tself.assertNotEqual(bikes_before + docks_before, bikes_after + docks_after, 'Bikes did not update, or station is disabled.')", "def test_give_correct_change(self):\n item, change, _ = give_item_and_change('coke', 1)\n self.assertEqual(item, 'coke')\n self.assertEqual(change, [.20, .05, .02])", "def test_multiple_states(self):\n\n # Prepare.\n app = self.factory()\n request = self.getRequest(app)\n context = model.factory()\n\n # Create a dummy event and get it back.\n event_id = boilerplate.createEvent(context)\n event = repo.LookupActivityEvent()(event_id)\n\n # Cancel when created.\n state_changer = request.state_changer\n # We have to use a transaction manager because perform creates\n # a new event on state change.\n with transaction.manager:\n bm.Session.add(event)\n bm.Session.add(context)\n state_changer.perform(context, a.CANCEL, event)\n s1 = context.work_status.value\n self.assertEqual(s1, s.CANCELLED)\n\n # Cancel when started.\n c2 = model.factory(initial_state=s.STARTED)\n # We have to use a transaction manager because perform creates\n # a new event on state change.\n with transaction.manager:\n bm.Session.add(event)\n state_changer.perform(c2, a.CANCEL, event)\n s2 = c2.work_status.value\n self.assertEqual(s2, s.CANCELLED)", "def test_single_quant_confirm(self):\n pick = self.quant_1.create_picking(self.picking_type_pick, confirm=True)\n # Check it is confirmed\n self.assertEqual(pick.state, \"confirmed\")", "def test_update_condition_true(self):\n original_alt_info = getattr(self.form, 'alt_field_info', None)\n expected_label = 'alt_test_feature'\n test_method = getattr(self.form, 'condition_' + expected_label, None)\n alt_info = getattr(self, 'alt_field_info', None)\n expected = alt_info.get(expected_label, None)\n self.form.alt_field_info = alt_info\n self.form.test_condition_response = True\n actual = self.form.get_alt_field_info()\n\n self.assertIsNotNone(alt_info)\n self.assertIsNotNone(test_method)\n self.assertTrue(test_method())\n self.assertIsNotNone(expected)\n self.assertIn(expected_label, alt_info)\n self.assertEqual(expected, actual)\n\n self.form.test_condition_response = False\n self.form.alt_field_info = original_alt_info\n if original_alt_info is None:\n del self.form.alt_field_info", "def test_ensure_state_untouched_if_not_necessary(self, setState):\n advisory = errata.Advisory(errata_id=123, errata_state='QE')\n advisory.ensure_state('QE')\n setState.assert_not_called()", "def test_update_cupcake(self):\n\n response = self.client.patch(\"/cupcakes/10000\",\n json={\"flavor\": \"chocolate\",\n \"size\": \"giant\",\n \"rating\": 11,\n \"id\": 10000,\n \"image\": None})\n\n self.assertEqual(response.json['response']['flavor'], \"chocolate\")\n self.assertEqual(response.json['response']['size'], 'giant')\n self.assertEqual(response.json['response']['rating'], 11)\n self.assertEqual(response.status_code, 200)\n\n all_cupcakes = self.client.get('/cupcakes')\n all_cupcakes_data = all_cupcakes.json['response']\n self.assertEqual(len(all_cupcakes_data), 1)", "def check(self):\n os.system(\"clear\")\n self.print()\n print(\"\\n\")\n _, response = _num_select(\"Would you like to make any changes?\",\n [\"Name\", \"Ingredients\", \"Steps\", \"Description\", \"Tags\", \"No\"])\n if response == \"Name\":\n self.name = input(\"Recipe Name: \")\n print(\"New name is: \" + self.name)\n return False\n elif response == \"Ingredients\":\n self.edit_ingredients()\n return False\n elif response == \"Steps\":\n self.edit_steps()\n return False\n elif response == \"Description\":\n self.description = input(\"Description: \")\n return False\n elif response == \"Tags\":\n self.edit_tags()\n return False\n return True", "def the_changed_brightness_should_be_reflected_in_the_state_10():\n assert web_app.get_state()\n assert web_app.check_value_in_state(\"brightness\",\"10\")", "def test_review_is_done(self):\n self.assertFalse(processes.review_is_done(None))\n self.assertFalse(processes.review_is_done(0))\n self.assertFalse(processes.review_is_done(models.REVIEW_PENDING))\n self.assertFalse(processes.review_is_done(models.REVIEW_ISSUES_OPEN))\n self.assertTrue(processes.review_is_done(models.REVIEW_ISSUES_ADDRESSED))\n self.assertTrue(processes.review_is_done(models.REVIEW_NA))", "def test_service_initiated():\n assert \"ready\" in bkt_outcome_unwind.index()", "def test_turn_on(power_supply):\n power_supply.Init()\n assert power_supply.state() != tango.DevState.ON\n power_supply.current = 5.0\n power_supply.turn_on()\n assert power_supply.state() == tango.DevState.ON", "async def test_stretch_switch_changes(opp, mock_stretch):\n entry = await async_init_integration(opp, mock_stretch)\n assert entry.state is ConfigEntryState.LOADED\n\n await opp.services.async_call(\n \"switch\",\n \"turn_off\",\n {\"entity_id\": \"switch.koelkast_92c4a\"},\n blocking=True,\n )\n\n state = opp.states.get(\"switch.koelkast_92c4a\")\n assert str(state.state) == \"off\"\n\n await opp.services.async_call(\n \"switch\",\n \"toggle\",\n {\"entity_id\": \"switch.droger_52559\"},\n blocking=True,\n )\n state = opp.states.get(\"switch.droger_52559\")\n assert str(state.state) == \"off\"\n\n await opp.services.async_call(\n \"switch\",\n \"toggle\",\n {\"entity_id\": \"switch.droger_52559\"},\n blocking=True,\n )\n state = opp.states.get(\"switch.droger_52559\")\n assert str(state.state) == \"on\"", "def test_edit_recipe_category(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.login('Bo_theo5@example.com', 'Bo1995')\n self.dashboard()\n self.category('JunkFood')\n self.dashboard()\n self.recipe_dashboard()\n self.create_recipe('cakes', 'blah, blah, blah....mix ingredient, heat')\n rv = self.edit_recipe('edited cakes', 'edited blah blah blah spoon , heat')\n self.assertIn(b'Recipe successfully updated', rv.data)", "def test_order_update_status_function(self):\n order = OrderInfo.objects.create(user=self.create_test_user())\n self.assertIsInstance(order.ordered, datetime)\n self.assertIsNone(order.cooked)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsNone(order.delivered)\n\n order.update_current_state()\n self.assertIsInstance(order.cooked, datetime)\n self.assertIsInstance(order.delivered, datetime)", "def test_gain(self):\n self.plr.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"Get Estate\"]\n self.plr.gain_card(\"Cursed Village\")\n self.assertNotIn(\"Curse\", self.plr.piles[Piles.DISCARD])\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Estate\"])\n self.assertIn(\"Duchy\", self.g.trashpile)", "def test_should_flip():\n\n # Function: should_flip(bag_state, has_red, has_blue, has_green)\n\n # Test state and serial number:\n test_state = {\n 'suspicion level': 0,\n 'indicators': {},\n }\n test_state['serial number'] = 'JQXX7e3652'\n test_state['indicators']['check engine'] = False\n test_state['indicators']['everything ok'] = True\n\n # Label D, All off, return False.\n assert(should_flip(test_state, False, False, False) is False)\n\n # Label C, Red and blue on, green off, False.\n assert(should_flip(test_state, True, True, False) is False)\n\n # Label E, green and red on, blue off, True.\n assert(should_flip(test_state, True, False, True) is True)\n\n # Label J, green on, red and blue off, True (J in serial)\n assert(should_flip(test_state, False, False, True) is True)\n\n # Label Q, all lights on, True (Q in serial)\n assert(should_flip(test_state, True, True, True) is True)\n\n # Label Y, only blue light on. False (No Y in serial).\n assert(should_flip(test_state, False, True, False) is False)", "def the_changed_brightness_should_be_reflected_in_the_state_5():\n assert web_app.get_state()\n assert web_app.check_value_in_state(\"brightness\",\"5\")", "def test_market_state(self):\n\n result = self.client.get('/market/ca')\n self.assertEqual(200, result.status_code)", "def test_state(self, mock_req):\n self.setup_api(MOCK_DATA, mock_req)\n now = datetime(1970, month=1, day=1)\n with patch(\"homeassistant.util.dt.now\", return_value=now):\n for name in self.sensor_dict:\n sensor = self.sensor_dict[name][\"sensor\"]\n self.fake_delay(2)\n sensor.update()\n if name == google_wifi.ATTR_LAST_RESTART:\n assert \"1969-12-31 00:00:00\" == sensor.state\n elif name == google_wifi.ATTR_UPTIME:\n assert 1 == sensor.state\n elif name == google_wifi.ATTR_STATUS:\n assert \"Online\" == sensor.state\n else:\n assert \"initial\" == sensor.state", "def test_editing_supplies_user(self):\n id = self.testsupply.id\n oldstate = self.testsupply.state\n request = self.factory.put(\n '/api/supplies/%s/' % id, {'name': '3d printer', 'state': 'aaa'})\n force_authenticate(request, user=self.testuser1)\n response = SupplyDetailsView.as_view()(request, pk=id)\n # normal user should get forbidden error\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n # data should not change\n self.assertEqual(Supply.objects.get(id=id).state, oldstate)", "def test_shoppingcart_must_not_update_if_closed(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we create a product\n id_prod = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id_prod:\n # then we can create the closed shoppingcart\n data = self.shoppingcart_data\n data[\"customer_id\"] = id\n data[\"product_id\"] = id\n data[\"is_closed\"] = True\n id_cart = self._create_model(\"shoppingcart\", data, [\"quantity\", \"discount_value\", \"is_closed\"])\n if id_cart:\n # then check for fail in update shoppingcart\n self.url = reverse(\"update-shoppingcart\")\n data[\"id\"] = id_cart\n response = self.client.post(self.url, data, **self.auth_headers)\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n self.assertIsNotNone(id_cart)\n self.assertIsNotNone(id_prod)\n self.assertIsNotNone(id)", "def test_off_reboot_on(self):\n self.openstack('baremetal node power off {0}'\n .format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power off', show_prop['power_state'])\n\n self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power on', show_prop['power_state'])", "async def test_power_state(hass, hk_driver, cls):\n entity_id = 'climate.test'\n\n # SUPPORT_ON_OFF = True\n hass.states.async_set(entity_id, STATE_HEAT,\n {ATTR_SUPPORTED_FEATURES: 4096,\n ATTR_OPERATION_MODE: STATE_HEAT,\n ATTR_TEMPERATURE: 23.0,\n ATTR_CURRENT_TEMPERATURE: 18.0})\n await hass.async_block_till_done()\n acc = cls.thermostat(hass, hk_driver, 'Climate', entity_id, 2, None)\n await hass.async_add_job(acc.run)\n await hass.async_block_till_done()\n assert acc.support_power_state is True\n\n assert acc.char_current_heat_cool.value == 1\n assert acc.char_target_heat_cool.value == 1\n\n hass.states.async_set(entity_id, STATE_OFF,\n {ATTR_OPERATION_MODE: STATE_HEAT,\n ATTR_TEMPERATURE: 23.0,\n ATTR_CURRENT_TEMPERATURE: 18.0})\n await hass.async_block_till_done()\n assert acc.char_current_heat_cool.value == 0\n assert acc.char_target_heat_cool.value == 0\n\n hass.states.async_set(entity_id, STATE_OFF,\n {ATTR_OPERATION_MODE: STATE_OFF,\n ATTR_TEMPERATURE: 23.0,\n ATTR_CURRENT_TEMPERATURE: 18.0})\n await hass.async_block_till_done()\n assert acc.char_current_heat_cool.value == 0\n assert acc.char_target_heat_cool.value == 0\n\n # Set from HomeKit\n call_turn_on = async_mock_service(hass, DOMAIN, 'turn_on')\n call_turn_off = async_mock_service(hass, DOMAIN, 'turn_off')\n call_set_operation_mode = async_mock_service(hass, DOMAIN,\n 'set_operation_mode')\n\n await hass.async_add_job(acc.char_target_heat_cool.client_update_value, 1)\n await hass.async_block_till_done()\n assert call_turn_on\n assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id\n assert call_set_operation_mode\n assert call_set_operation_mode[0].data[ATTR_ENTITY_ID] == entity_id\n assert call_set_operation_mode[0].data[ATTR_OPERATION_MODE] == STATE_HEAT\n assert acc.char_target_heat_cool.value == 1\n\n await hass.async_add_job(acc.char_target_heat_cool.client_update_value, 0)\n await hass.async_block_till_done()\n assert call_turn_off\n assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id\n assert acc.char_target_heat_cool.value == 0", "async def test_api_state_change(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n hass.states.async_set(\"test.test\", \"not_to_be_set\")\n\n await mock_api_client.post(\n \"/api/states/test.test\", json={\"state\": \"debug_state_change2\"}\n )\n\n assert hass.states.get(\"test.test\").state == \"debug_state_change2\"", "def _check_state(self):\n if (self.stock_checker.isChecked() or self.future_checker.isChecked()) and self.name.buddy.text():\n self.btn_ok.setEnabled(True)\n self.btn_ok.setDefault(True)\n else:\n self.btn_ok.setEnabled(False)", "def mark_completed(self,status):\r\n if status == \"r\":\r\n self.status = \"c\"#It is to test the mark complete function in the test_book.py, otherwise this program works fine in the main.py\r\n return True\r\n elif status == \"c\":\r\n return False", "def testConditionChecking(self):\n\n state = State.from_problem(self.prob)\n \n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n self.assert_(state.is_satisfied(drive.precondition))\n\n with drive.instantiate([\"agent\", \"tru1\", \"apt2\"], self.prob):\n self.assertFalse(state.is_satisfied(drive.precondition))", "def test_on_reboot_on(self):\n self.openstack('baremetal node power on {0}'.format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power on', show_prop['power_state'])\n\n self.openstack('baremetal node reboot {0}'.format(self.node['uuid']))\n show_prop = self.node_show(self.node['uuid'], ['power_state'])\n self.assertEqual('power on', show_prop['power_state'])", "def test_reset_computer(self):\n computer1 = computer.Computer(1)\n computer1.reset_computer()\n res = computer1.greediness == 7 and computer1.rolls == 0\n self.assertTrue(res)", "def test_update_deployment_state(self):\n pass", "def test_successReset(self):\n for i in range(3):\n self.circuit_breaker.failure()\n self.circuit_breaker.success()\n available0 = self.circuit_breaker.available()\n self.circuit_breaker.failure()\n available1 = self.circuit_breaker.available()\n self.circuit_breaker.failure()\n available2 = self.circuit_breaker.available()\n self.circuit_breaker.failure()\n available3 = self.circuit_breaker.available()\n available4 = self.circuit_breaker.available()\n self.assertEqual((available0, available1, available2, available3, available4),\n (True, True, True, False, False))", "def test_change_provisioned_throughput_usual_case():", "def _verify(\n hass,\n expected_state,\n expected_percentage,\n expected_oscillating,\n expected_direction,\n expected_preset_mode,\n):\n state = hass.states.get(_TEST_FAN)\n attributes = state.attributes\n assert state.state == str(expected_state)\n assert attributes.get(ATTR_PERCENTAGE) == expected_percentage\n assert attributes.get(ATTR_OSCILLATING) == expected_oscillating\n assert attributes.get(ATTR_DIRECTION) == expected_direction\n assert attributes.get(ATTR_PRESET_MODE) == expected_preset_mode", "def test_lots_of_coins_given(self):\n item, change, _ = give_item_and_change('apple', '1.00 0.5 0.2 0.1 0.1 0.05 0.02 0.02 0.01')\n self.assertEqual(item, 'apple')\n self.assertEqual(change, [1.0, 0.5, 0.05, 0.02])", "async def test_change_state(\n hass: HomeAssistant,\n aioclient_mock: AiohttpClientMocker,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n await init_integration(hass, aioclient_mock)\n\n with patch(\"aiomodernforms.ModernFormsDevice.light\") as light_mock:\n await hass.services.async_call(\n LIGHT_DOMAIN,\n SERVICE_TURN_OFF,\n {ATTR_ENTITY_ID: \"light.modernformsfan_light\"},\n blocking=True,\n )\n await hass.async_block_till_done()\n light_mock.assert_called_once_with(\n on=False,\n )\n\n with patch(\"aiomodernforms.ModernFormsDevice.light\") as light_mock:\n await hass.services.async_call(\n LIGHT_DOMAIN,\n SERVICE_TURN_ON,\n {ATTR_ENTITY_ID: \"light.modernformsfan_light\", ATTR_BRIGHTNESS: 255},\n blocking=True,\n )\n await hass.async_block_till_done()\n light_mock.assert_called_once_with(on=True, brightness=100)", "def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]", "def test_wait_tx_settled_ok(self, is_transaction_settled_mock):\n wait_tx_settled(\"some\", \"some\", timeout=4)", "def test_charge_correct_for_regular_after_close_1_day(self):\n rental = create_test_rental(\n book=self.book3,\n customer=self.user1,\n date_borrowed=\"2019-05-24 00:00:00.400952+00:00\",\n )\n close_rental_url = reverse(\"close_rental\", kwargs={\"pk\": rental.pk})\n\n data = {\"date_returned\": \"2019-05-25 13:46:57.249145+03:00\"}\n response = self.client.put(close_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.data[\"amount_charged\"], \"2.00\")\n self.assertEqual(response.data[\"rental_status\"], \"Closed\")\n self.assertEqual(response.data[\"currency\"], CURRENCY)", "def test_turn_off(power_supply):\n power_supply.Init()\n assert power_supply.state() != tango.DevState.OFF\n power_supply.turn_off()\n assert power_supply.state() == tango.DevState.OFF", "def process_recipe(self, recipe: int):\n\n recipe = CoffeeMachine.recipes.get_recipe(recipe)\n\n if (self.water - recipe.water) < 0:\n print('Sorry, not enough water!')\n return\n\n if (self.milk - recipe.milk) < 0:\n print('Sorry, not enough milk!')\n return\n\n if (self.beans - recipe.beans) < 0:\n print('Sorry, not enough coffee beans!')\n return\n\n if self.cups == 0:\n print('Sorry, not enough coffee disposable cups!')\n return\n\n print('I have enough resources, making you a coffee!')\n\n # Charging\n self.water -= recipe.water\n self.milk -= recipe.milk\n self.beans -= recipe.beans\n self.money += recipe.cost\n self.cups -= 1", "def updateTamagochiState():\n\n global tamagochiState # Do not delete this part\n global nutrition # or this part\n\n pass\n\n nutrition = nutrition - 3 #decrease nutrition each turn\n\n if game.seeFood() > 0 nutrition < 100: #eat above all else\n print \"Tomagochi is eating!\"\n elif nutrition <= 25:\n print \"Tomagochi is hungry!\"\n else:\n if game.seeToy(): #play\n print \"Tomagochi is playing!\"\n else: #bored is default state\n print \"Tomogachi is bored.\"", "async def test_switch_read_lock_state(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_lock_service)\n\n state = await helper.async_update(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE: 0,\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 0,\n },\n )\n assert state.state == \"unlocked\"\n assert state.attributes[\"battery_level\"] == 50\n\n state = await helper.async_update(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE: 1,\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 1,\n },\n )\n assert state.state == \"locked\"\n\n await helper.async_update(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE: 2,\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 1,\n },\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"jammed\"\n\n await helper.async_update(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE: 3,\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 1,\n },\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"unknown\"\n\n await helper.async_update(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE: 0,\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 1,\n },\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"locking\"\n\n await helper.async_update(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE: 1,\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 0,\n },\n )\n state = await helper.poll_and_get_state()\n assert state.state == \"unlocking\"", "def test_change_volume_status(self, volume, volumes_steps):\n volumes_steps.change_volume_status(volume.name, 'Error')\n volumes_steps.change_volume_status(volume.name, 'Available')", "def test_set_state(self):\n\n self.pump.set_state = MagicMock(return_value=True)\n self.pump.set_state('PUMP_IN')\n self.pump.set_state.assert_called_with('PUMP_IN')", "async def test_set_hvac_bad_attr_and_state(opp):\n state = opp.states.get(ENTITY_CLIMATE)\n assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_COOL\n assert state.state == HVAC_MODE_COOL\n\n with pytest.raises(vol.Invalid):\n await common.async_set_hvac_mode(opp, None, ENTITY_CLIMATE)\n await opp.async_block_till_done()\n\n state = opp.states.get(ENTITY_CLIMATE)\n assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_COOL\n assert state.state == HVAC_MODE_COOL", "def check_state(self):\n pass", "def test_update_condition_false(self):\n original_alt_info = getattr(self.form, 'alt_field_info', None)\n expected_label = 'alt_test_feature'\n test_method = getattr(self.form, 'condition_' + expected_label, None)\n alt_info = getattr(self, 'alt_field_info', None)\n expected = {}\n self.form.alt_field_info = alt_info\n self.form.test_condition_response = False\n actual = self.form.get_alt_field_info()\n\n self.assertIsNotNone(alt_info)\n self.assertIsNotNone(test_method)\n self.assertFalse(test_method())\n self.assertIsNotNone(expected)\n self.assertIn(expected_label, alt_info)\n self.assertEqual(expected, actual)\n\n self.form.test_condition_response = False\n self.form.alt_field_info = original_alt_info\n if original_alt_info is None:\n del self.form.alt_field_info", "def test_ensure_coverage_changes_status(self):\n always = AlwaysSuccessfulCoverageProvider(self._db)\n persistent = NeverSuccessfulCoverageProvider(self._db)\n transient = TransientFailureCoverageProvider(self._db)\n\n # Cover the same identifier multiple times, simulating all\n # possible states of a CoverageRecord. The same CoverageRecord\n # is used every time and the status is changed appropriately\n # after every run.\n c1 = persistent.ensure_coverage(self.identifier, force=True)\n assert CoverageRecord.PERSISTENT_FAILURE == c1.status\n\n c2 = transient.ensure_coverage(self.identifier, force=True)\n assert c2 == c1\n assert CoverageRecord.TRANSIENT_FAILURE == c1.status\n\n c3 = always.ensure_coverage(self.identifier, force=True)\n assert c3 == c1\n assert CoverageRecord.SUCCESS == c1.status\n\n c4 = persistent.ensure_coverage(self.identifier, force=True)\n assert c4 == c1\n assert CoverageRecord.PERSISTENT_FAILURE == c1.status", "async def test_switch_change_lock_state(hass: HomeAssistant, utcnow) -> None:\n helper = await setup_test_component(hass, create_lock_service)\n\n await hass.services.async_call(\n \"lock\", \"lock\", {\"entity_id\": \"lock.testdevice\"}, blocking=True\n )\n helper.async_assert_service_values(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 1,\n },\n )\n\n await hass.services.async_call(\n \"lock\", \"unlock\", {\"entity_id\": \"lock.testdevice\"}, blocking=True\n )\n helper.async_assert_service_values(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 0,\n },\n )", "def testEffects(self):\n\n state = State.from_problem(self.prob)\n \n fold = Fact(StateVariable(self.prob.functions[\"location-of\"][0], [self.prob[\"tru1\"]]), self.prob[\"pos1\"])\n fnew = Fact(StateVariable(self.prob.functions[\"location-of\"][0], [self.prob[\"tru1\"]]), self.prob[\"apt1\"])\n self.assert_(fold in state)\n\n drive = self.dom.get_action(\"drive\")\n with drive.instantiate([\"agent\", \"tru1\", \"apt1\"], self.prob):\n state.apply_effect(drive.effect)\n\n self.assert_(fnew in state)\n self.assertFalse(fold in state)", "async def test_state_update(hass):\n assert await setup_multimatic(hass)\n assert_entities_count(hass, 11)\n\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"off\")\n assert hass.states.is_state(\"binary_sensor.room_1_window\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_lock\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_battery\", \"off\")\n assert hass.states.is_state(\"binary_sensor.boiler\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_connectivity\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_update\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_online\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_holiday\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_errors\", \"off\")\n state = hass.states.get(\"binary_sensor.multimatic_holiday\")\n assert state.attributes.get(\"start_date\") is None\n assert state.attributes.get(\"end_date\") is None\n assert state.attributes.get(\"temperature\") is None\n assert hass.states.is_state(\"binary_sensor.multimatic_quick_mode\", \"off\")\n\n dhw = SystemManagerMock.data[\"get_dhw\"]\n dhw.circulation.time_program = time_program(SettingModes.ON, None)\n dhw.circulation.operating_mode = OperatingModes.AUTO\n\n hvac_status = SystemManagerMock.data[\"get_hvac_status\"]\n hvac_status.boiler_status.status_code = \"F11\"\n hvac_status.online = \"OFFLINE\"\n hvac_status.update = \"UPDATE_PENDING\"\n hvac_status.errors = [\n Error(\"device\", \"title\", \"status_code\", \"descr\", datetime.datetime.now())\n ]\n\n rooms = SystemManagerMock.data[\"get_rooms\"]\n rooms[0].devices = [Device(\"Device 1\", \"123456789\", \"VALVE\", True, True)]\n rooms[0].time_program = time_program(None, 20)\n rooms[0].temperature = 22\n rooms[0].target_high = 24\n rooms[0].operating_mode = OperatingModes.AUTO\n rooms[0].child_lock = True\n rooms[0].window_open = True\n\n new_holiday_mode = active_holiday_mode()\n SystemManagerMock.data[\"get_holiday_mode\"] = new_holiday_mode\n SystemManagerMock.data[\"get_quick_mode\"] = QuickModes.HOTWATER_BOOST\n\n await goto_future(hass)\n\n assert_entities_count(hass, 11)\n assert hass.states.is_state(\"binary_sensor.room_1_window\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_lock\", \"off\")\n assert hass.states.is_state(\"binary_sensor.123456789_battery\", \"on\")\n assert hass.states.is_state(\"binary_sensor.boiler\", \"on\")\n assert hass.states.is_state(\"binary_sensor.123456789_connectivity\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_update\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_system_online\", \"off\")\n assert hass.states.is_state(\"binary_sensor.multimatic_errors\", \"on\")\n assert hass.states.is_state(\"binary_sensor.multimatic_holiday\", \"on\")\n state = hass.states.get(\"binary_sensor.multimatic_holiday\")\n assert state.attributes[\"start_date\"] == new_holiday_mode.start_date.isoformat()\n assert state.attributes[\"end_date\"] == new_holiday_mode.end_date.isoformat()\n assert state.attributes[\"temperature\"] == new_holiday_mode.target\n assert hass.states.is_state(\"binary_sensor.multimatic_quick_mode\", \"on\")\n state = hass.states.get(\"binary_sensor.multimatic_quick_mode\")\n assert state.attributes[\"quick_mode\"] == QuickModes.HOTWATER_BOOST.name\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"off\")\n\n SystemManagerMock.data[\"get_holiday_mode\"] = HolidayMode(False)\n\n await goto_future(hass)\n assert hass.states.is_state(\"binary_sensor.dhw_circulation\", \"on\")", "def test_unavailabe_items(self):\n item, change, _ = give_item_and_change('crisps', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def state_wait_do(cfg, app, win, events):", "def _sun_chaged(hass, entity_id=None, old_state=None, new_state=None):\n PERSIST['states'][1] = new_state.state == 'off'\n _eval_state(hass)", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def test_update_from_pending_change_with_rich_text_reset(self):\n review_request = ReviewRequest.objects.create(self.user,\n self.repository)\n draft = ReviewRequestDraft.create(review_request)\n\n draft.description_rich_text = True\n draft.testing_done_rich_text = True\n\n changeset = ChangeSet()\n changeset.changenum = 4\n changeset.summary = '* This is a summary'\n changeset.description = '* This is a description.'\n changeset.testing_done = '* This is some testing.'\n draft.update_from_pending_change(4, changeset)\n\n self.assertEqual(draft.summary, '* This is a summary')\n self.assertEqual(draft.description, '* This is a description.')\n self.assertFalse(draft.description_rich_text)\n self.assertEqual(draft.testing_done, '* This is some testing.')\n self.assertFalse(draft.testing_done_rich_text)", "def test_multiple_change_review_status(self):\n runid = self._runid\n logging.debug('Get all run results from the db for runid: ' +\n str(runid))\n\n run_results = get_all_run_results(self._cc_client, runid)\n self.assertIsNotNone(run_results)\n self.assertNotEqual(len(run_results), 0)\n\n bug = run_results[0]\n\n # There are no system comments for this bug.\n comments = self.__get_system_comments(bug.reportId)\n self.assertEqual(len(comments), 0)\n\n # Change review status to confirmed bug.\n review_comment = 'This is really a bug'\n status = ReviewStatus.CONFIRMED\n success = self._cc_client.changeReviewStatus(\n bug.reportId, status, review_comment)\n\n self.assertTrue(success)\n logging.debug('Bug review status changed successfully')\n\n report = self._cc_client.getReport(bug.reportId)\n self.assertEqual(report.reviewData.comment, review_comment)\n self.assertEqual(report.reviewData.status, status)\n\n # There is one system comment for this bug.\n comments = self.__get_system_comments(bug.reportId)\n self.assertEqual(len(comments), 1)\n\n # Try to update the review status again with the same data and check\n # that no new system comment entry will be created.\n success = self._cc_client.changeReviewStatus(\n bug.reportId, status, review_comment)\n comments = self.__get_system_comments(bug.reportId)\n self.assertEqual(len(comments), 1)\n\n # Test that updating only the review status message a new system\n # comment will be created.\n success = self._cc_client.changeReviewStatus(\n bug.reportId, status, \"test system comment change\")\n self.assertTrue(success)\n comments = self.__get_system_comments(bug.reportId)\n self.assertEqual(len(comments), 2)\n\n # Try to change review status back to unreviewed.\n status = ReviewStatus.UNREVIEWED\n success = self._cc_client.changeReviewStatus(\n bug.reportId,\n status,\n None)\n\n self.assertTrue(success)\n logging.debug(\"Bug review status changed successfully\")\n\n report = self._cc_client.getReport(bug.reportId)\n self.assertEqual(report.reviewData.comment, '')\n self.assertEqual(report.reviewData.status, status)\n\n # Change review status to false positive.\n review_comment = 'This is not a bug'\n status = ReviewStatus.FALSE_POSITIVE\n success = self._cc_client.changeReviewStatus(\n bug.reportId, status, review_comment)\n\n self.assertTrue(success)\n logging.debug('Bug review status changed successfully')\n\n report = self._cc_client.getReport(bug.reportId)\n self.assertEqual(report.reviewData.comment, review_comment)\n self.assertEqual(report.reviewData.status, status)\n\n # Change review status to intentional.\n review_comment = ''\n status = ReviewStatus.INTENTIONAL\n success = self._cc_client.changeReviewStatus(\n bug.reportId, status, review_comment)\n\n self.assertTrue(success)\n logging.debug('Bug review status changed successfully')\n\n report = self._cc_client.getReport(bug.reportId)\n self.assertEqual(report.reviewData.comment, review_comment)\n self.assertEqual(report.reviewData.status, status)", "def test_charge_correct_for_regular_after_close_2_days(self):\n rental = create_test_rental(\n book=self.book3,\n customer=self.user1,\n date_borrowed=\"2019-05-23 00:00:00.400952+00:00\",\n )\n close_rental_url = reverse(\"close_rental\", kwargs={\"pk\": rental.pk})\n\n data = {\"date_returned\": \"2019-05-25 13:46:57.249145+03:00\"}\n response = self.client.put(close_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.data[\"amount_charged\"], \"2.00\")\n self.assertEqual(response.data[\"rental_status\"], \"Closed\")\n self.assertEqual(response.data[\"currency\"], CURRENCY)", "def test_update_state(self):\n # add a task\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title='Sample task doing').first()\n\n # change task to todo\n old_id = task.id\n self.update_state(id=old_id, state='todo')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'todo')\n\n # change task to done\n old_id = task.id\n self.update_state(id=old_id, state='done')\n task = Task.query.filter_by(title='Sample task doing').first()\n self.assertEqual(task.id, old_id)\n self.assertEqual(task.state, 'done')", "def test_update_shopping_cart(self):\n food_cost = self.browser.find_element_by_id('food-cost')\n old_food_cost = int(food_cost.text)\n\n items = self.get_list_of_items()\n index = randint(1, len(items) - 1)\n list_item = self.get_item_dict(items[index])\n item_price = self.expected_contents[index]['price']\n old_cost = self.expected_contents[index]['cost']\n\n increase_by = randint(5, 10)\n directions = [\n {\n 'action': 'increase',\n 'range': range(1, increase_by + 1)\n },\n {\n 'action': 'decrease',\n 'range': range(increase_by - 1, - 1, -1)\n }\n ]\n for direction in directions:\n for i in direction['range']:\n list_item[direction['action']].click()\n sleep(0.1)\n new_cost = int(list_item['cost'].text)\n new_food_cost = int(food_cost.text)\n self.assertTrue(new_food_cost - old_food_cost ==\n new_cost - old_cost == item_price * i)", "def test_statemachine_confirm():\n # Create state machine\n model_data = {\n 'url': 'test',\n 'state': statemachine.PaymentChannelState.CONFIRMING_DEPOSIT,\n 'creation_time': 42,\n 'deposit_tx': bitcoin.Transaction.from_hex(\"010000000119de54dd7043927219cca4c06cc8b94c7c862b6486b0f989ea4c6569fb34383d010000006b483045022100c45e5bd8d00caa1cd3ad46e078ec132c9c505b3168d1d1ffe6285cf054f54ed302203ea12c4203ccee8a9de616cc22f081eed47a78660ce0a01cb3a97e302178a573012103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dffffffff0198b101000000000017a9149bc3354ccfd998cf16628449b940e6914210f1098700000000\"), # nopep8\n 'refund_tx': bitcoin.Transaction.from_hex(\"0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c47304402207c866a5d8d46c767975c95b9fa65051578898445c85f367c4d6b56c6b795491102202db45315bfd27aa19bd7156aa70aed48ebe331c88297711ff675da5ff069f7b90101004c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac0000000001888a0100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac32a77056\"), # nopep8\n 'payment_tx': None,\n 'spend_tx': None,\n 'spend_txid': None,\n 'min_output_amount': 1000,\n }\n wallet = walletwrapper.Two1WalletWrapper(mock.MockTwo1Wallet(), mock.MockBlockchain())\n model = statemachine.PaymentChannelModel(**model_data)\n sm = statemachine.PaymentChannelStateMachine(model, wallet)\n\n # Assert state machine state\n expected_state = {}\n expected_state['state'] = statemachine.PaymentChannelState.CONFIRMING_DEPOSIT\n expected_state['balance_amount'] = 100000\n expected_state['deposit_amount'] = 100000\n expected_state['fee_amount'] = 10000\n expected_state['creation_time'] = lambda sm: sm.creation_time > 0\n expected_state['expiration_time'] = 1450223410\n expected_state['deposit_tx_utxo_index'] = 0\n expected_state['deposit_tx'] = \"010000000119de54dd7043927219cca4c06cc8b94c7c862b6486b0f989ea4c6569fb34383d010000006b483045022100c45e5bd8d00caa1cd3ad46e078ec132c9c505b3168d1d1ffe6285cf054f54ed302203ea12c4203ccee8a9de616cc22f081eed47a78660ce0a01cb3a97e302178a573012103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dffffffff0198b101000000000017a9149bc3354ccfd998cf16628449b940e6914210f1098700000000\" # nopep8\n expected_state['deposit_txid'] = \"7e1a558c84abd5aaf57999557f4a7205d4b69241b7c9cab6c0795fdd663a51ef\"\n expected_state['deposit_txid_signature'] = \"30450221008f51b6565a8ee67c32529ed840116c44e1f60a628c51ac59720cc8c6df1b5eab02204ccc32c89f81425f483c64c6f8dd77e57eefd3b6a5b7548d1875f5ef3f86cf27\" # nopep8\n expected_state['refund_tx'] = \"0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c47304402207c866a5d8d46c767975c95b9fa65051578898445c85f367c4d6b56c6b795491102202db45315bfd27aa19bd7156aa70aed48ebe331c88297711ff675da5ff069f7b90101004c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac0000000001888a0100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac32a77056\" # nopep8\n expected_state['refund_txid'] = \"e49cef2fbaf7b6590eb502e4b143f24d5d95ca2e255b166f3b40bef786a32bba\"\n expected_state['payment_tx'] = None\n expected_state['spend_tx'] = None\n expected_state['spend_txid'] = None\n assert_statemachine_state(expected_state, sm)\n\n # Check invalid transition CONFIRMING_DEPOSIT -> OUTSTANDING via pay()\n with pytest.raises(statemachine.StateTransitionError):\n sm.pay(1)\n # Check invalid transition CONFIRMING_DEPOSIT -> READY via pay_ack()\n with pytest.raises(statemachine.StateTransitionError):\n sm.pay_ack()\n # Check invalid transition CONFIRMING_DEPOSIT -> READY via pay_nack()\n with pytest.raises(statemachine.StateTransitionError):\n sm.pay_nack()\n\n # Check valid transition CONFIRMING_DEPOSIT -> READY via confirm()\n sm.confirm()\n expected_state['state'] = statemachine.PaymentChannelState.READY\n assert_statemachine_state(expected_state, sm)\n\n # Reset state machine\n model = statemachine.PaymentChannelModel(**model_data)\n sm = statemachine.PaymentChannelStateMachine(model, wallet)\n\n # Check valid transition CONFIRMING_DEPOSIT -> CONFIRMING_SPEND via close()\n sm.close(\"2654e56291a542e99d26e1d2ba34d455031517453b6c7ae256c62e151ddc41cc\")\n expected_state['spend_txid'] = \"2654e56291a542e99d26e1d2ba34d455031517453b6c7ae256c62e151ddc41cc\"\n expected_state['state'] = statemachine.PaymentChannelState.CONFIRMING_SPEND\n assert_statemachine_state(expected_state, sm)\n\n # Reset state machine\n model = statemachine.PaymentChannelModel(**model_data)\n sm = statemachine.PaymentChannelStateMachine(model, wallet)\n\n # Check valid transition OPENING -> CLOSED via finalize()\n sm.finalize(\"0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c47304402207c866a5d8d46c767975c95b9fa65051578898445c85f367c4d6b56c6b795491102202db45315bfd27aa19bd7156aa70aed48ebe331c88297711ff675da5ff069f7b90101004c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac0000000001888a0100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac32a77056\") # nopep8\n expected_state['spend_tx'] = \"0100000001ef513a66dd5f79c0b6cac9b74192b6d405724a7f559979f5aad5ab848c551a7e000000009c47304402207c866a5d8d46c767975c95b9fa65051578898445c85f367c4d6b56c6b795491102202db45315bfd27aa19bd7156aa70aed48ebe331c88297711ff675da5ff069f7b90101004c5063210316f5d704b828c3252432886a843649730e08ae01bbbd5c6bde63756d7f54f961ad670432a77056b175682103ee071c95cb772e57a6d8f4f987e9c61b857e63d9f3b5be7a84bdba0b5847099dac0000000001888a0100000000001976a914b42fb00f78266bba89feee86036df44401320fba88ac32a77056\" # nopep8\n expected_state['spend_txid'] = \"e49cef2fbaf7b6590eb502e4b143f24d5d95ca2e255b166f3b40bef786a32bba\"\n expected_state['state'] = statemachine.PaymentChannelState.CLOSED\n assert_statemachine_state(expected_state, sm)", "def test_init(power_supply):\n power_supply.Init()\n assert power_supply.state() == tango.DevState.STANDBY", "def ComputeChangeInStateOfCharge(self):\r\n pass", "def test_a_renew_non_active_license(self):\n self.assertTrue(self.status.is_ready(), \"The license is active, non active state awaited\")\n with self.assertRaisesRegexp(IOError, 'PUT .* HTTP error 4[0-9][0-9]$'):\n self.status.renew(self.status.DEVICEID1, self.status.DEVICENAME1, self.end+2*self.ADAY)", "def test_out_of_date(self):\n self.assertTrue(update_available(0.0))", "def test_manager_change_order_status(self):\n self.client.force_authenticate(self.user)\n cancel = \"CA\"\n url = reverse('order-set_status', args=[self.order.id])\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n new_status = Order.objects.values(\"status\").get(pk=self.order.id)\n self.assertEqual(new_status[\"status\"], cancel)\n\n with self.subTest('customer can not change order status'):\n self.user.role = get_user_model().CUSTOMER\n self.client.force_authenticate(self.user)\n resp = self.client.patch(url, data={\n \"status\": cancel\n })\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "async def test_attribute_no_state(hass):\n calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)\n calls_2 = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)\n calls_3 = async_mock_service(hass, DOMAIN, SERVICE_SET_OPERATION_MODE)\n\n value = \"dummy\"\n\n await async_reproduce_states(hass, [\n State(ENTITY_1, None,\n {ATTR_OPERATION_MODE: value})\n ])\n\n await hass.async_block_till_done()\n\n assert len(calls_1) == 0\n assert len(calls_2) == 0\n assert len(calls_3) == 1\n assert calls_3[0].data == {'entity_id': ENTITY_1,\n ATTR_OPERATION_MODE: value}", "def runTest(self):\n false_change = EtcProposalsChangeStub(False)\n true_change = EtcProposalsChangeStub()\n true_type = EtcProposalChangeType(true_change)\n false_type = EtcProposalChangeType(false_change)\n self.testbox.pack_start(false_type, False, False, 1)\n self.testbox.pack_start(true_type, False, False, 1)\n gtk.main()\n self.failIf(self.Failed, 'Test failed.')", "def test_decision_maker_handle_update_apply(self):\n good_holdings = {\"good_id\": 2}\n currency_holdings = {\"FET\": 100}\n currency_deltas = {\"FET\": -10}\n good_deltas = {\"good_id\": 1}\n state_update_message = StateUpdateMessage(\n performative=StateUpdateMessage.Performative.APPLY,\n amount_by_currency_id=currency_deltas,\n quantities_by_good_id=good_deltas,\n )\n self.decision_maker.handle(state_update_message)\n expected_amount_by_currency_id = {\n key: currency_holdings.get(key, 0) + currency_deltas.get(key, 0)\n for key in set(currency_holdings) | set(currency_deltas)\n }\n expected_quantities_by_good_id = {\n key: good_holdings.get(key, 0) + good_deltas.get(key, 0)\n for key in set(good_holdings) | set(good_deltas)\n }\n assert (\n self.decision_maker.ownership_state.amount_by_currency_id\n == expected_amount_by_currency_id\n ), \"The amount_by_currency_id must be equal with the expected amount.\"\n assert (\n self.decision_maker.ownership_state.quantities_by_good_id\n == expected_quantities_by_good_id\n )", "def test_state_after_failure(self):\n pass", "def test_done_value_can_be_set_to_True(self):\n item = Item(name = \"A test item\", done = True)\n self.assertEqual(item.name, \"A test item\")\n self.assertTrue(item.done)", "def test_book_return_makes_book_available_for_borrow(self):\n book = Book.objects.get(copies=1)\n client1 = APIClient()\n client1.login(username=self.students[0].username, password=\"salam*123\")\n client1.post(\"/borrows/\", data={\"book\": book.id})\n client2 = APIClient()\n client2.login(username=self.manager.username, password=\"salam*123\")\n client2.post(\"/borrows/1/start/\", data={\"duration\": 5})\n response = client2.post(\"/borrows/1/terminate/\")\n self.assertIsNotNone(response.json()[\"returned_at\"])\n response = client1.post(\"/borrows/\", data={\"book\": book.id})\n self.assertEqual(response.status_code, 201)", "def test_checkbox_attr_change(self):\n custom_attribute_values = [{\n \"custom_attribute_id\": self.cad3.id,\n \"attribute_value\": \"1\",\n }]\n response = self.api.put(self.assessment, {\n \"custom_attribute_values\": custom_attribute_values\n })\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"user@example.com\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"].keys(), [\"CA3\"])", "def test_charge_correct_for_novel_after_close_2_days(self):\n rental = create_test_rental(\n book=self.book1,\n customer=self.user1,\n date_borrowed=\"2019-05-23 00:00:00.400952+00:00\",\n )\n close_rental_url = reverse(\"close_rental\", kwargs={\"pk\": rental.pk})\n\n data = {\"date_returned\": \"2019-05-25 13:46:57.249145+03:00\"}\n response = self.client.put(close_rental_url, data=data, format=\"json\")\n\n self.assertEqual(response.data[\"amount_charged\"], \"4.50\")\n self.assertEqual(response.data[\"rental_status\"], \"Closed\")\n self.assertEqual(response.data[\"currency\"], CURRENCY)", "def testCheck(self):\n change = ChangeState(self.config, \"changestate_t\")\n\n # Run through all good state transitions and assert that they work\n for state in self.transitions:\n for dest in self.transitions[state]:\n change.check(dest, state)\n dummystates = ['dummy1', 'dummy2', 'dummy3', 'dummy4']\n\n # Then run through some bad state transistions and assertRaises(AssertionError)\n for state in self.transitions:\n for dest in dummystates:\n self.assertRaises(AssertionError, change.check, dest, state)\n return", "def unit_state_change_cb (unit, state) :\n\n print \"[Callback]: ComputeUnit '%s' state: %s.\" % (unit.uid, state)\n\n if state == rp.FAILED :\n sys.exit (1)", "async def test_turn_on_with_mode(hass):\n calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)\n calls_2 = async_mock_service(hass, DOMAIN, SERVICE_SET_OPERATION_MODE)\n\n await async_reproduce_states(hass, [\n State(ENTITY_1, 'on',\n {ATTR_OPERATION_MODE: STATE_HEAT})\n ])\n\n await hass.async_block_till_done()\n\n assert len(calls_1) == 1\n assert calls_1[0].data == {'entity_id': ENTITY_1}\n\n assert len(calls_2) == 1\n assert calls_2[0].data == {'entity_id': ENTITY_1,\n ATTR_OPERATION_MODE: STATE_HEAT}", "def _checkModeChange(self, expected, target=None):\n result = self._parseModeChange(self.client.calls, target)\n self.assertEqual(result, expected)\n self.client.calls = []" ]
[ "0.61100876", "0.5935458", "0.59113294", "0.5875447", "0.58063436", "0.57781756", "0.57589597", "0.5701074", "0.5680897", "0.5638473", "0.5637342", "0.56042886", "0.5569046", "0.5534861", "0.5534524", "0.54650354", "0.545623", "0.54499906", "0.542222", "0.5383806", "0.5364995", "0.5354965", "0.5350797", "0.5349044", "0.5345805", "0.5343979", "0.5336155", "0.53216034", "0.53191954", "0.5310999", "0.5292194", "0.5288272", "0.5272192", "0.5270483", "0.52659553", "0.5240362", "0.5236071", "0.52345365", "0.5224133", "0.5219883", "0.52154905", "0.5213587", "0.5194478", "0.5188375", "0.5178882", "0.51709145", "0.51661175", "0.5165207", "0.51611567", "0.51503265", "0.51490134", "0.5132696", "0.5131972", "0.51264405", "0.51227176", "0.50947374", "0.50924176", "0.5091732", "0.50895625", "0.5087764", "0.5083171", "0.5082223", "0.50756687", "0.50677896", "0.50666434", "0.5066158", "0.506559", "0.50649077", "0.5060626", "0.5047897", "0.50443685", "0.5042115", "0.50402266", "0.5038407", "0.50359106", "0.50305945", "0.5028998", "0.50221664", "0.5016988", "0.5014889", "0.5014862", "0.50142163", "0.50117445", "0.50116247", "0.5011114", "0.50108826", "0.50047636", "0.5004591", "0.50021815", "0.4996907", "0.4993887", "0.49932545", "0.49811682", "0.49806514", "0.4977767", "0.4974481", "0.49723077", "0.49721757", "0.49631333", "0.49562392" ]
0.75115085
0
Formats the output of a transaction receipt to its proper values
def output_transaction_receipt_formatter(receipt): if receipt is None: return None logs_formatter = compose(functools.partial(map, outputLogFormatter), list) formatters = { 'blockNumber': to_decimal, 'transactionIndex': to_decimal, 'cumulativeGasUsed': to_decimal, 'gasUsed': to_decimal, 'logs': lambda l: logs_formatter(l) if is_array(l) else l, } return { key: formatters.get(key, identity)(value) for key, value in receipt.items() }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_receipt(self) -> typing.List[str]:\n lines = []\n euro_total=0\n usd_total=0\n gbp_total=0\n\n for item in self._items.items():\n euro_price = self._get_product_price(item[0]) * item[1]\n usd_price = self.get_price_in_currency(euro_price,\"USD\")\n gbp_price = self.get_price_in_currency(euro_price,\"GBP\")\n\n euro_total += euro_price\n usd_total += usd_price\n gbp_total += gbp_price\n\n euro_price_string = \"€%.2f\" % euro_price\n usd_price_string = \"$%.2f\" % usd_price\n gbp_price_string = \"£%.2f\" % gbp_price\n \n lines.append(item[0] + \" - \" + str(item[1]) + ' - ' + euro_price_string + ' - ' + \\\n usd_price_string + ' - ' + gbp_price_string)\n \n euro_total_str=\"€%.2f\" % euro_total\n usd_total_str=\"$%.2f\" % usd_total\n gbp_total_str=\"£%.2f\" % gbp_total\n\n lines.append(\"Total = \"+euro_total_str+ ' - ' + usd_total_str + ' - ' + gbp_total_str)\n logging.info(str(datetime.now())+': Receipt =' +str(lines))\n return lines", "def convert_trans_to_string(self, transaction):\r\n #note, repr will not work because it doesn't remove curly brackets and colons\r\n record_list = []\r\n for mode, trans in transaction.iteritems():\r\n record_list.append(str(\"mode: \" + mode + \" \"))\r\n for product,quantity in trans.iteritems():\r\n record_list.append(str(product + \":\"))\r\n record_list.append(str(quantity) + \" \")\r\n \r\n record_string = \"\".join(record_list) + \"\\n\"\r\n return record_string", "def receipt(basket):\n\n cprint(\"\"\"\\n\\n Item Price Discount Final Price\n------------------------------------------------------------------\"\"\")\n sigma_all = sum([e[1] for e in basket])\n sigma_discount = 0\n for name, price, discount in basket:\n discounted_price = (100 - discount) / 100 * price\n cprint(\"| %16s | £%10.2f | %3d\" % (name, price, discount) + \"%\" + f\" | £%10.2f |\" % discounted_price)\n sigma_discount += discounted_price\n cprint(\"|________________________________________________________________|\")\n\n cprint(\"\\n\\nTotal Price: £%.2f\" % sigma_all)\n cprint(\"Total Discount: £%.2f\" % (sigma_all - sigma_discount))\n cprint(\"Final Price: £%.2f\" % sigma_discount)\n\n cprint(\"\\nThank you for shopping at \" + SHOP_NAME)", "def output(self):\n \n str_title_len = 50\n str_date_len = 40\n str_purpose_len = 30\n str_price_len = 10\n str_payer_len = 20\n #str_comment_len =\n \n if len(self.title) > (str_title_len - 2):\n out_title = self.title[:str_title_len - 2] + \" |\"\n else:\n out_title = self.title + (\" \" * (str_title_len - len(self.title) - 2)) + \" |\"\n \n # if date is presented with <datetime> object, then\n # then output it in format %d.%m.%y (31.12.99)\n if type(self.date) is datetime.datetime:\n out_date = \" \" + datetime.datetime.strftime(\"%d.%m.%y\") + \" |\"\n # or output as string otherwise\n else:\n if len(self.date) > (str_date_len - 4):\n out_date = \" \" + self.date[:str_date_len - 4] + \" |\"\n else:\n out_date = \" \" + self.date + (\" \" * (str_date_len - len(self.date) - 4)) + \" |\"\n \n if len(self.purpose) > (str_purpose_len - 4):\n out_purpose = \" \" + self.purpose[:str_purpose_len - 4] + \" |\"\n else:\n out_purpose = \" \" + self.purpose + (\" \" * (str_purpose_len - len(self.purpose) - 4)) + \" |\"\n \n # enormous sums aren't supported (over 9999999 at the moment)\n if len(str(self.price)) > (str_price_len - 4):\n raise Exception\n out_price = (' ' * (str_price_len - len(str(self.price)) - 4) ) + str(self.price) + ' |'\n \n if len(self.payer) > (str_payer_len - 2):\n out_payer = \" \" + self.payer[:str_payer_len - 2]\n else:\n out_payer = \" \" + self.payer + (\" \" * (str_payer_len - len(self.payer) - 2))\n \n out_line = out_title + out_date + out_purpose + out_price + out_payer\n return out_line", "def pp_entry(self, entry):\n self.separator()\n print('Type: {}'.format(self.TRANSACTION_CODES[entry['Transaction Code']]))\n for item in entry:\n print(item.ljust(25, ' ') + ': {}'.format(entry[item]))\n self.separator()", "def print_invoice(request, invoice_number):\n\n data = Invoice.objects.get(number=invoice_number)\n\n sub_total = sum([a.get(\"total_cost\") for a in data.items])\n s_gst_val = float(sub_total) * (float(data.s_gst) / 100)\n c_gst_val = float(sub_total) * (float(data.c_gst) / 100)\n\n data.addressed_to = data.addressed_to.replace(\"\\n\", \"<br>\")\n\n return render(request,\n \"invoice/invoice_print.html\",\n {\n \"data\": data,\n \"sub_total\": sub_total,\n \"s_gst_value\": s_gst_val,\n \"c_gst_value\": c_gst_val\n })", "def printPayment(self):\n print self.output()", "def format(self, data):", "def receipt_text(self, **kw):\n return self._text(self._receipt_template, **kw)", "def _format_output(selected_number, raw_data):\n tmp_data = {}\n data = collections.defaultdict(lambda: 0)\n balance = raw_data.pop('balance')\n for number in raw_data.keys():\n tmp_data = dict([(k, int(v) if v is not None else \"No limit\")\n for k, v in raw_data[number].items()])\n tmp_data['number'] = number\n if selected_number is None or selected_number == number:\n data[number] = tmp_data\n\n output = (\"\"\"Account Balance\n=======\n\nBalance: {:.2f} $\n\"\"\")\n print(output.format(balance))\n for number_data in data.values():\n _print_number(number_data)", "def format_coin_output(coin):\n coin_output1 = \"Grabbing latest data for *\" + coin['name'] + \"*\\n\"\n coin_output2 = \"```{:20s}\\t${:.2f}\\n\".format(\"Price USD\",float(coin['price_usd']))\n coin_output3 = \"{:20s}\\t{:.8f}\\n\".format(\"Price BTC\",float(coin['price_btc']))\n coin_output4 = \"{:20s}\\t${:.2f}\\n\".format(\"Market Cap\",float(coin['market_cap_usd']))\n coin_output5 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 1hr\",float(coin['percent_change_1h']))\n coin_output6 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 24hr\",float(coin['percent_change_24h']))\n coin_output7 = \"{:20s}\\t{:.2f}%\\n```\".format(\"Change 7d\",float(coin['percent_change_7d']))\n return (coin_output1+coin_output2+coin_output3+coin_output4+coin_output5+coin_output6+coin_output7)", "def __repr__(self):\n\n output = list()\n output.append('{resonance_id:6s}'.format(**self.par))\n output.append('{h_larmor_frq:6.1f}'.format(**self.par))\n output.append('{temperature:4.1f}'.format(**self.par))\n output.append('{:10.5f}'.format(self.val))\n output.append('{:10.5f}'.format(self.err))\n\n if self.cal:\n output.append('{:10.5f}'.format(self.cal))\n\n return ' '.join(output)", "def __str__(self):\n string = \"\"\n for i in range(len(self.book[Trade.WAY_SELL])-1, -1, -1):\n string = string + \"%.10f\\t\\t%.8f\\n\" % (self.book[Trade.WAY_SELL][i].get_price(),\n self.book[Trade.WAY_SELL][i].get_quote_amount())\n string = string + \"-----------------------------------\\n\"\n for i in range(len(self.book[Trade.WAY_BUY])):\n string = string +\"%.10f\\t\\t%.8f\\n\" % (self.book[Trade.WAY_BUY][i].get_price(),\n self.book[Trade.WAY_BUY][i].get_quote_amount())\n return string", "def format_step(self):\n if self.terminal:\n totrwdstr = \" %6.3f\" % self.total_reward\n else:\n totrwdstr = \"\"\n \n logging.info(\" %3d %1.0f => %7.1f %4.1f %7.1f %7.1f %4.1f %4.1f = %i %6.3f%s\" % (\n self.nsteps,\n self.action['heater_on'],\n self.state['heat_cost'],\n self.state['set_temp'],\n self.state['room_temp'],\n self.state['room_temp_change'],\n self.state['outside_temp'],\n self.state['outside_temp_change'],\n self.terminal,\n self.reward,\n totrwdstr,\n ))", "def pay_formatter(self, pay_item):\n return {\n \"payment_id\": pay_item[0],\n \"amount_paid\": pay_item[1],\n \"payment_info\": pay_item[2],\n \"approved\": pay_item[3],\n \"pay_date\": pay_item[4],\n \"loan_id\": pay_item[5],\n \"farmer_id\": pay_item[6]\n }", "def format(self) -> str:", "def format_result(self, order):\n return u\"%s\" % (order)", "def format_data(self, data):", "def format_tuition(self, data):\n d = u'$%.2f' % data\n return d.replace('.00','')", "def _format_remittance_information_70(self, val):\n if not self.use_operations_xml:\n val = self.format_MT103_field70(val, 35, 4)\n else:\n val = val.replace('newline', '\\n')\n return str(val)", "def _massage_raw_pg_output_vals(self):\n pass", "def format(self):\n ...", "def final_info_printing(self, title_string, amount_to_display):\n self.__string_to_print = f\"{title_string} \" \\\n f\"{'.' * (40 - len(title_string))} \" \\\n f\"$ {'.' * (11 - len('{:0,.2f}'.format(amount_to_display)))}\" \\\n f\"{amount_to_display:0,.2f}\"\n return self.__string_to_print", "def format_item(self, order):\n return unicode(order)", "def outputBlockFormatter(block):\n\n # Transform to number\n block[\"gasLimit\"] = to_decimal(block[\"gasLimit\"])\n block[\"gasUsed\"] = to_decimal(block[\"gasUsed\"])\n block[\"size\"] = to_decimal(block[\"size\"])\n block[\"timestamp\"] = to_decimal(block[\"timestamp\"])\n\n if block.get(\"number\"):\n block[\"number\"] = to_decimal(block[\"number\"])\n\n block[\"difficulty\"] = to_decimal(block[\"difficulty\"])\n block[\"totalDifficulty\"] = to_decimal(block[\"totalDifficulty\"])\n\n if is_array(block.get(\"transactions\")):\n for item in block[\"transactions\"]:\n if not is_string(item):\n item = output_transaction_formatter(item)\n\n return block", "def formatResult(self, result):\r\n return str(result)", "def toQif(self):\n out=list();\n if 'date' in self:\n out.append(\"D{}\".format(self['date']));\n if 'amount' in self:\n out.append(\"T{}\".format(self['amount']));\n if 'memo' in self and len(self['memo'])>3:\n out.append(\"M{}\".format(self['memo']));\n if 'payee' in self and len(self['payee'])>3:\n out.append(\"P{}\".format(self['payee']));\n out.append(\"^\");\n return \"\\n\".join(out);", "def __format__(self, format_spec: str) -> str:\n\n return format(self.balance, format_spec)", "def create_invoice(self):\n sales_tax = 0.06\n item_sum = 0\n inv = f'Invoice#: {self.invoice_id}\\n'\n for key, value in self.items_with_price.items():\n item_sum += value\n inv += f'{key}.....${value:.2f}\\n'\n\n tax = item_sum * sales_tax\n inv += f'Tax.....${tax:.2f}\\n'\n inv += f'Total.....${tax + item_sum:.2f}'\n # print(inv)\n # returning for unit testing purposes\n return inv", "def to_transfac(self):\n m = \"%s\\t%s\\t%s\\n\" % (\"DE\", self.id, \"unknown\")\n for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())):\n m += \"%i\\t%s\\t%s\\n\" % (i, \"\\t\".join([str(int(x)) for x in row]), cons)\n m += \"XX\"\n return m", "def __repr__(self):\n return encode_as_str([self.header(), \"!\".join([str(tx) for tx in self.transactions])], sep=\"`\")", "def __repr__(self):\n return encode_as_str([self.header(), \"!\".join([str(tx) for tx in self.transactions])], sep=\"`\")", "def formatResult(self, result):\r\n return '\\t'.join(map(str, result))", "def print_receipt(Student):\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page('P')\n pdf.set_font('Times', 'B', 14)\n\n pdf.multi_cell(0, 5, 'Student Dues Payment Receipt')\n pdf.ln()\n pdf.multi_cell(0, 5, ('Student ID: %s' % Student.student_ID))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Name: %s' % Student.name))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Mess Fees: %s' % Student.mess_charge))\n pdf.ln()\n\n if Student.room_type == \"S\":\n room_rent = db.get(\"hall\", Student.hall_ID, \"single_room_rent\")[0]\n elif Student.room_type == \"D\":\n room_rent = db.get(\"hall\", Student.hall_ID, \"double_room_rent\")[0]\n\n pdf.multi_cell(0, 5, ('Room Rent: %s' % room_rent))\n pdf.ln()\n\n pdf.multi_cell(0, 5, ('Amenities Charge: %s' % str(db.get(\"hall\", Student.hall_ID, \"amenities_charge\")[0])))\n pdf.ln()\n\n pdf.multi_cell(0, 5, ('Total Amount Paid: %s' % str(Student.total_dues)))\n pdf.ln()\n\n # Write generated output file to PDF\n pdf.output(('receipt_%s.pdf' % Student.hall_ID), 'F')", "def display_record_purchase_form_return_data(trx_type: str) -> Dict:\n artist = st.text_input(\"Artist (separate multiple artists with ';')\")\n artist_country = st.text_input(\n \"(Artist) Country (one for each artist, separate with ';')\"\n )\n title = st.text_input(\"Title\")\n genre = st.selectbox(\"Genre\", app_utils.genre_list, 3)\n label = st.text_input(\"Label (separate multiple labels with ';')\")\n year = st.number_input(\"Year\", value=dt.date.today().year, format=\"%d\")\n record_format = st.selectbox(\"Format\", app_utils.record_format_list, 4)\n vinyl_color = st.text_input(\"Vinyl Color\", value=\"black\").lower()\n lim_edition = st.text_input(\"Lim Edition\")\n number = st.text_input(\"Number\")\n remarks = st.text_input(\"Remarks\")\n purchase_date = st.date_input(\"Purchase Date\", value=dt.date.today())\n price = st.number_input(\n \"Price\", value=20.00, min_value=0.00, step=5.00, format=\"%f\"\n )\n rating = st.text_input(\"Rating\")\n is_digitized = st.number_input(\n \"is digitized\", value=0, min_value=0, max_value=1, step=1, format=\"%i\",\n )\n is_active = st.number_input(\n \"is active\", value=1, min_value=0, max_value=1, step=1, format=\"%d\"\n )\n credit_value = st.number_input(\n \"Credits\", value=1, min_value=0, max_value=1, step=1, format=\"%d\"\n )\n\n record_data = {\n \"trx_type\": trx_type,\n \"artist\": artist if artist != \"\" else \"NA\",\n \"artist_country\": artist_country if artist_country != \"\" else \"NA\",\n \"title\": title if title != \"\" else None,\n \"genre\": genre,\n \"label\": label if label != \"\" else \"NA\",\n \"year\": year,\n \"record_format\": record_format,\n \"vinyl_color\": vinyl_color if vinyl_color != \"\" else None,\n \"lim_edition\": lim_edition if lim_edition != \"\" else None,\n \"number\": number if number != \"\" else None,\n \"remarks\": remarks if remarks != \"\" else None,\n \"purchase_date\": purchase_date,\n \"price\": price,\n \"rating\": rating\n if rating not in [\"\", \"None\"]\n else None, # TODO Check if that has solved the None problem\n \"is_digitized\": is_digitized,\n \"is_active\": is_active,\n \"credit_value\": credit_value,\n }\n return record_data", "def formatted(self) -> str:\r\n ...", "def report(self):\n print(f\"Money: {self.CURRENCY}{self.profit}\")", "def format_output(output, case_number, status):\n output.append(\"Case #%s: %s\" % (case_number, status))", "def format_data(self)->float: \n try:\n formatted = chr(self.data[0])\n for i in range(1, len(self.data)): \n formatted = formatted + (chr(self.data[i])) \n return str(formatted)\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)", "def __repr__(self):\n return u'<Transaction id={i}, amount={a}>'.format(\n i=self.id,\n a=self.amount\n )", "def __str__(self):\n number_stars = (30-len(self.name))//2\n title_line = '*'*number_stars+self.name+'*'*number_stars\n corpus = ''\n for i in range(len(self.ledger)):\n corpus += (((self.ledger[i])['description']))[0:min(23, len((self.ledger[i])['description']))].ljust(23)+(\n str(\"{:.2f}\".format(round(float((self.ledger[i])['amount']), 2)))).rjust(7)+'\\n'\n Total = 'Total: '+str(\"{:.2f}\".format((round(float(self.get_balance()), 2))))\n return title_line+'\\n'+corpus+Total", "def print_data():\n print \"quantity1.value %f\" % 10.0\n return 0", "def get_transaction_value():\n #Get the user input, tranform it from a string to afloat and store it\n tx_recipient=input('Enter the recipient of the transaction: ')\n tx_amount = float(input('your transaction amount please: '))\n return tx_recipient, tx_amount", "def CreateZipOutputString(trans_median, trans_total, trans_number, id, zipcode):\n return( '%s|%s|%d|%d|%d\\n' %(id, zipcode, round(trans_median), trans_number, trans_total))", "def _format_details_of_charges_71A(self, val):\n return val", "def outputLogFormatter(log):\n if log.get(\"blockNumber\"):\n log[\"blockNumber\"] = to_decimal(log[\"blockNumber\"])\n if log.get(\"transactionIndex\"):\n log[\"transactionIndex\"] = to_decimal(log[\"transactionIndex\"])\n if log.get(\"logIndex\"):\n log[\"logIndex\"] = to_decimal(log[\"logIndex\"])\n\n return log", "def _ticket_repr(self, ticket):\n rep = '%s (%s)' % (ticket['summary'], ticket['status'])\n return rep", "def process_receipts(self):\n receipts = open('receipts.txt','r')\n rec_temp = []\n rec = []\n for line in receipts:\n rec_temp.append(line)\n for i in rec_temp:\n rec.append(i.split(','))\n for i in rec:\n for q in range(len(i)):\n if q == 0:\n pass\n else:\n i[q] = float(i[q])\n\n for i in rec:\n for q in range(len(i)):\n self.receipts[i[0]] = i[1:]\n #print(self.receipts)\n return self.receipts", "def asformat(self, format):", "def get_trans_tostring(response_dict):\n try:\n if response_dict['dattyp'] == 'robtarget':\n # Formatting the robtarget to check if it is valid.\n value = response_dict['value']\n # Converts from unicode to normalized string\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = value.translate(None, \"[]\")\n value_list = value.split(',')\n # Robtarget should consist of 17 numbers.\n if len(value_list) == 17:\n res = 'Trans: [X,Y,Z] = [%s,%s,%s]' % (value_list[0], value_list[1], value_list[2])\n return res\n else:\n err = 'Something wrong with the robtarget: ' + response_dict['value']\n return err\n else:\n err = 'DataType is ' + response_dict['dattyp'] + ' and not robtarget.'\n return err\n except Exception, err:\n return err", "def prod(): \n query = \"SELECT * FROM ProducedMsg;\"\n tablestr = dbwrapper._query_pretty(query)\n result = string.replace(str(tablestr),'\\n','<br>')\n return result", "def softm_to_invoice(rechnungsnr):\n from pprint import pprint\n\n if str(rechnungsnr).startswith('RG'):\n rechnungsnr = str(rechnungsnr)[2:]\n rg, orderlines = get_rechnung('RG833645')\n hint = {}\n for attr in 'skontobetrag'.split():\n hint[attr] = rg[attr]\n out = {'hint': hint}\n for attr in '''kundenauftragsnr auftragsnr versandkosten rechnung_steuranteil rechnungsnr\n zu_zahlen'''.split():\n out[attr] = rg[attr]\n\n out['leistungsdatum'] = rg['versand_date']\n out['kundennr'] = rg['kundennr_rechnungsempfaenger']\n out['erfasst_von'] = rg['sachbearbeiternr']\n out['abschlag_prozent'] = rg['auftragsrabatt1p'] + rg['auftragsrabatt2p']\n out['auftragsrabatt'] = rg['auftragsrabatt']\n out['rechungsdatum'] = rg['druck_date']\n rabatttext = ' und '.join([x for x in [rg['rabatttext1'].strip(), rg['rabatttext2'].strip()] if x])\n rabatttext = \"\"\n if rabatttext:\n rabatttext = \"%s: %f\" % (rabatttext, out['abschlag_prozent'])\n elif out['abschlag_prozent']:\n rabatttext = u\"Ab/Zuschläge: %f\" % (out['abschlag_prozent'])\n\n out['infotext_kunde'] = '\\n'.join([rabatttext])\n\n out['orderlines'] = []\n for ol in get_connection().query(['AFU00'], condition=\"FURGNR=%s\" % sql_escape(rechnungsnr)):\n pprint(ol)\n outol = {}\n for attr in '''menge artnr abschlag rechungsbetrag warenwert'''.split(): # zu_zahlen\n outol[attr] = ol[attr]\n out['orderlines'].append(outol)\n\n #line = dict(\n # guid=p.guid,\n # menge=int(p.menge),\n # artnr=p.artnr,\n # #kundenartnr=f3.artnr_kunde,\n # #name=f3.artikelbezeichnung.strip(),\n # infotext_kunde=p.text\n # #einzelpreis=int(abs(f3.verkaufspreis)*100),\n # #warenwert=int(p.wert_netto*100),\n # #zu_zahlen=int(abs(f3.wert_brutto)*100),\n # #abschlag=int(f4.positionsrabatt_gesamt*100)\n # )\n\n #if f3.ean and int(f3.ean):\n # line['ean'] = f3.ean", "def final_receipt(self):\n return self._final_receipt", "def string(self):\n table = Table(2)\n table.add_row(['Name:', f'{self._name}'])\n table.add_row(['Type:', f'{self.account_type}'])\n total = sum([asset.adjusted_value()\n for asset in self._assets.values()])\n table.add_row(['Total:', utils.format_money(total)])\n if self._cash:\n table.add_row(['Available Cash:',\n utils.format_money_delta(self._cash)])\n return table.string(tablefmt='plain')", "def pretty_print(self, indent=8):\n formatted_lines = []\n for line in self.coefficients:\n formatted_items = []\n for item in line:\n formatted_items.append(str(item).ljust(indent, \" \"))\n formatted_lines.append(u\"(\" + \", \".join(formatted_items) + u\")\")\n return u\"(\" + u\",\\n \".join(formatted_lines) + u\")\"", "def __str__(self) -> str:\n return f'{self.amount}{self.currency}'", "def _format_beneficiary_customer_59F(self, val):\n account = val.get('ACCOUNT')\n name = val.get('NAME')\n address = val.get('ADDRESS')\n country_code = val.get('COUNTRY_CODE')\n town = val.get('TOWN')\n name_list, address_list, country_and_town_list = [], [], []\n\n if name:\n name_list = FSwiftWriterUtils.split_text_and_prefix(str(name), 33, '1/')\n if address:\n address_list = FSwiftWriterUtils.split_text_and_prefix(str(address), 33, '2/')\n if country_code:\n additional_details = str(country_code)\n if town:\n additional_details = str(additional_details) + '/' + str(town)\n country_and_town_list = FSwiftWriterUtils.split_text_and_prefix(\n str(additional_details), 33, '3/')\n value = FSwiftWriterUtils.allocate_space_for_name_and_address_with_constraint(name_list, address_list,\n country_and_town_list)\n if account:\n account = '/' + str(account)\n value = account + '\\n' + value\n return value", "def __str__(self):\n if self.filename:\n filename = self.filename\n else:\n filename = 'Unknown'\n if self.endian == '<':\n endian = 'Little Endian'\n else:\n endian = 'Big Endian'\n ret_val = ('FILE: %s\\nRecord Offset: %i byte\\n' +\n 'Header Endianness: %s\\n\\n') % \\\n (filename, self.record_offset, endian)\n ret_val += 'FIXED SECTION OF DATA HEADER\\n'\n for key in self.fixed_header.keys():\n ret_val += '\\t%s: %s\\n' % (key, self.fixed_header[key])\n ret_val += '\\nBLOCKETTES\\n'\n for key in self.blockettes.keys():\n ret_val += '\\t%i:' % key\n if not len(self.blockettes[key]):\n ret_val += '\\tNOT YET IMPLEMENTED\\n'\n for _i, blkt_key in enumerate(self.blockettes[key].keys()):\n if _i == 0:\n tabs = '\\t'\n else:\n tabs = '\\t\\t'\n ret_val += '%s%s: %s\\n' % (tabs, blkt_key,\n self.blockettes[key][blkt_key])\n ret_val += '\\nCALCULATED VALUES\\n'\n ret_val += '\\tCorrected Starttime: %s\\n' % self.corrected_starttime\n return ret_val", "def get_trans_dict(self):\n translated = dict([(k,v) for (k,v) in self._trans_dict.items() if k is not v])\n frm = \" \".join([ c + ' |' for c in translated.keys()])\n to = \" \".join([ c + ' |' for c in translated.values()])\n\n return \"code: \\t{}\\nactual:\\t{}\".format(frm, to)", "def format(self, message):", "def total_calculator(description, receipt_input, fees_input, tax_input, tip_input):\n rf = receiptFormat()\n # a dictionary of name(s) and sum of amount\n raw_pairs = [(\n rf.parse_alpha(alpha),\n sum([float(i) for i in rf.parse_numbers(numbers)])\n ) for (alpha, numbers) in re.findall(rf.pattern, receipt_input)]\n # combine all split costs with the people involved\n data = {}\n for (people, amount) in raw_pairs:\n for person in [person.capitalize() for person in people]:\n if not person in data:\n data[person] = round(amount/len(people),2)\n else:\n data[person] += round(amount/len(people),2)\n\n precheck_sum = sum(data.values())\n total_value = round(precheck_sum+tax_input+tip_input+fees_input,2) # prefill the total\n total_input = st.number_input(\"Calculated Total*\",step=10.0,value=total_value) \n return total_input, data", "def show_receipt(context):\n return({'Store': settings.SITE_NAME,\n 'order': context['order']})", "def _format_beneficiary_customer_no_option_59(self, val):\n account = val.get('ACCOUNT')\n name = val.get('NAME')\n address = val.get('ADDRESS')\n\n temp_name = name\n temp_address = address\n char_set = ''\n lookup_temp = lookup\n try:\n char_set = str(self.acm_obj.Counterparty().AdditionalInfo().TraditionalChinese())\n except Exception as e:\n notifier.WARN(\"Could not find Additional Info 'TraditionalChinese'.\")\n\n if char_set == 'True':\n lookup_temp = CCC_traditional_writer\n elif char_set == 'False':\n lookup_temp = CCC_simplified_writer\n\n for key in list(lookup_temp.keys()):\n temp_name = temp_name.replace(str(key), lookup_temp[key] + \" \")\n temp_address = temp_address.replace(str(key), lookup_temp[key] + \" \")\n if name == temp_name:\n if name and address:\n name = FSwiftWriterUtils.split_text_and_prefix(name, 35)\n address = FSwiftWriterUtils.split_text_and_prefix(address, 35)\n val = FSwiftWriterUtils.allocate_space_for_name_address_without_constraint(name, address)\n if account:\n val = \"/\" + str(account) + \"\\n\" + str(val)\n return val\n else:\n name = temp_name\n address = 'ADD. ' + temp_address\n name_and_address = name + address\n split_name_and_address = FSwiftWriterUtils.split_text_logically_on_character_limit(name_and_address, 35)\n val = ('\\n').join(split_name_and_address)\n if account:\n val = \"/\" + str(account) + \"\\n\" + str(val)\n return val", "def __str__(self) -> str:\n return self.customer.name + ' completes checkout at ' + \\\n str(self.timestamp) + ' line ' + str(self.line_number)", "def nice_output(self):\n return self.des", "def nice_output(self):\n return self.des", "def format_package_output(self):\n if self.package_type == \"TCP\":\n\n content_format = \"[{package_type}]\\t[{timestamp_num}\\t{timestamp}]\\t{src_ip}:{src_port}({src_mac}) ----->\" \\\n \"{dst_ip}:{dst_port}({dst_mac})\\t\" \\\n \"SEQ={tcp_seq}\\tACK={tcp_ack}\\tFLAGS={tcp_flags_list}\\tWIN={tcp_win}\\t\" \\\n \"DATA={tcpudp_data}\\t\" \\\n \"ttl={ttl}\\tDATA_BINARY={tcpudp_data_binary}\\tLEN={len_tcpudp_data}\"\n content = content_format.format(\n package_type=self.package_type,\n timestamp_num=self.timestamp_num,\n timestamp=self.timestamp,\n src_ip=self.src_ip,\n src_port=self.src_port,\n src_mac=self.src_mac,\n dst_ip=self.dst_ip,\n dst_port=self.dst_port,\n dst_mac=self.dst_mac,\n tcp_seq=self.tcp_seq,\n tcp_ack=self.tcp_ack,\n tcp_flags_list=self.tcp_flags_list,\n tcp_win=self.tcp_win,\n tcpudp_data=self.tcpudp_data,\n tcpudp_data_binary=self.tcpudp_data_binary,\n len_tcpudp_data=self.len_tcpudp_data,\n ttl=self.ttl\n )\n elif self.package_type == \"UDP\":\n content_format = \"[{package_type}]\\t[{timestamp_num}\\t{timestamp}]\\t{src_ip}:{src_port}({src_mac}) ----->\" \\\n \"{dst_ip}:{dst_port}({dst_mac})\\t\" \\\n \"ttl={ttl}\\tDATA_BINARY={tcpudp_data_binary}\\tLEN={len_tcpudp_data}\"\n content = content_format.format(\n package_type=self.package_type,\n timestamp_num=self.timestamp_num,\n timestamp=self.timestamp,\n src_ip=self.src_ip,\n src_port=self.src_port,\n src_mac=self.src_mac,\n dst_ip=self.dst_ip,\n dst_port=self.dst_port,\n dst_mac=self.dst_mac,\n tcpudp_data_binary=self.tcpudp_data_binary,\n len_tcpudp_data=self.len_tcpudp_data,\n ttl=self.ttl\n )\n elif self.package_type.startswith(\"ICMP\"):\n content_format = \"[{package_type}]\\t[{timestamp_num}\\t{timestamp}]\\t{src_ip}:{src_port}({src_mac}) ----->\" \\\n \"{dst_ip}:{dst_port}({dst_mac})\\t{icmp_type}:{icmp_code}[{icmp_message}]\\t\" \\\n \"ttl={ttl}\\tDATA_BINARY={tcpudp_data_binary}\\tLEN={len_tcpudp_data}\"\n content = content_format.format(\n package_type=self.package_type,\n timestamp_num=self.timestamp_num,\n timestamp=self.timestamp,\n src_ip=self.src_ip,\n src_port=self.src_port,\n src_mac=self.src_mac,\n dst_ip=self.dst_ip,\n dst_port=self.dst_port,\n dst_mac=self.dst_mac,\n tcpudp_data_binary=self.tcpudp_data_binary,\n len_tcpudp_data=self.len_tcpudp_data,\n icmp_type=self.icmp_type,\n icmp_code=self.icmp_code,\n icmp_message=self.icmp_message,\n ttl=self.ttl\n )\n return content", "def generate_txt(self):\n txt_string = ''\n rp_obj = self.env['res.partner']\n for txt in self:\n vat = rp_obj._find_accounting_partner(\n txt.company_id.partner_id).vat[2:]\n vat = vat\n for txt_line in txt.txt_ids:\n vendor, buyer = self.get_buyer_vendor(txt, txt_line)\n period = txt.period_id.name.split('/')\n period2 = period[0] + period[1]\n # TODO: use the start date of the period to get the period2\n # with the 'YYYYmm'\n operation_type = ('V' if txt_line.invoice_id.type in\n ['out_invoice', 'out_refund'] else 'C')\n document_type = self.get_type_document(txt_line)\n document_number = self.get_document_number(\n txt_line, 'inv_number')\n control_number = self.get_number(\n txt_line.invoice_id.nro_ctrl, 'inv_ctrl', 20)\n document_affected = self.get_document_affected(txt_line)\n voucher_number = self.get_number(\n txt_line.voucher_id.number, 'vou_number', 14)\n amount_exempt, amount_untaxed = \\\n self.get_amount_exempt_document(txt_line)\n amount_untaxed = amount_untaxed\n alicuota = self.get_alicuota(txt_line)\n amount_total, amount_exempt = self.get_amount_line(\n txt_line, amount_exempt)\n\n txt_string = (\n txt_string + buyer + '\\t' + period2.strip() + '\\t' +\n txt_line.invoice_id.date_invoice + '\\t' + operation_type +\n '\\t' + document_type + '\\t' + vendor + '\\t' +\n document_number + '\\t' + control_number + '\\t' +\n str(round(amount_total, 2)) + '\\t' +\n str(round(txt_line.untaxed, 2)) + '\\t' +\n str(round(txt_line.amount_withheld, 2)) + '\\t' +\n document_affected + '\\t' + voucher_number + '\\t' +\n str(round(amount_exempt, 2)) + '\\t' + str(alicuota) +\n '\\t' + '0' + '\\n')\n return txt_string", "def _printable(self) -> str:\n \n if self.type_of_second_operand == self.TYPE_REF_ID:\n operand_type = \"RefID\"\n else:\n operand_type = \"Value\"\n\n # parenthesis to concatenate the string over multiple lines\n return (\n \"CQC IF header. RefID=\" + str(self.first_operand)\n + \" | Operator=\" + str(self.operator)\n + \" | \" + operand_type + \"=\" + str(self.second_operand)\n + \" | Second_operand_type=\" + operand_type\n + \" | Body_length=\" + str(self.length)\n )", "def format(self, record):\n row = [self.formatTime(record, self.datefmt), record.name, record.levelname]\n keys = filter(self.filterer, record.__dict__)\n extra = [record.__dict__[k] for k in keys]\n\n self.writer.writerow(row + extra + [record.getMessage()])\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def outputPostFormatter(post):\n\n post[\"expiry\"] = to_decimal(post[\"expiry\"])\n post[\"sent\"] = to_decimal(post[\"sent\"])\n post[\"ttl\"] = to_decimal(post[\"ttl\"])\n post[\"workProved\"] = to_decimal(post[\"workProved\"])\n\n if not post.get(\"topics\"):\n post[\"topics\"] = []\n\n post[\"topics\"] = [decode_hex(topic) for topic in post[\"topics\"]]\n\n return post", "def get_formatted_line(self) -> str:\n\t\tmount_point_fs_space = \"\"\n\t\tfs_device_space = \"\"\n\t\tdevice_flags_space = \"\"\n\t\tmount_point_fs_space_int = default_mount_point_fs_space - len(self.mount_point)\n\t\tfs_device_space_int = default_fs_device_space - len(self.fstype)\n\t\tdevice_flags_space_int = default_device_flags_space - len(self.device)\n\t\tfor _ in repeat(None, mount_point_fs_space_int):\n\t\t\tmount_point_fs_space += \" \"\n\t\tfor _ in repeat(None, fs_device_space_int):\n\t\t\tfs_device_space += \" \"\n\t\tfor _ in repeat(None, device_flags_space_int):\n\t\t\tdevice_flags_space += \" \"\n\t\treadable_flags = \"flags=\"\n\t\tfor flag in self.flags:\n\t\t\treadable_flags += f\"{flag};\"\n\t\treturn f\"{self.mount_point}{mount_point_fs_space}{self.fstype}{fs_device_space}{self.device}{device_flags_space}{readable_flags}\\n\"", "def encode(self):\n\n packet = (\n\n str(self.pos_number) + # 2 octets\n\n str(self.transaction_result) + # 1 octet\n\n ('%.0f' % (self.amount * 100)).zfill(8) + # 8 octets\n\n str(self.payment_mode) + # 1 octet\n\n str(self.repport) + # 55 octets\n\n str(self.currency_numeric) + # 3 octets\n\n str(self.private) # 10 octets\n\n )\n\n packet_len = len(packet)\n\n if packet_len not in [TERMINAL_ANSWER_COMPLETE_SIZE - 3, TERMINAL_ANSWER_LIMITED_SIZE - 3]:\n raise SequenceDoesNotMatchLengthException(\n 'Cannot create response payment sequence with len != {0} or {1} octet(s) '\n 'Currently have {2} octet(s).'\n .format(TERMINAL_ANSWER_COMPLETE_SIZE - 3, TERMINAL_ANSWER_LIMITED_SIZE - 3, packet_len))\n\n return TeliumData.framing(packet)", "def format_value(text):\n return text.encode('utf8').replace('\\n', ' ').replace('\\r', ' ')", "def format_line_lean(self):\n\t\tline = \"<tr>\\\n\t\t\t\t\t<td>\\\n\t\t\t\t\t\t<font size='2'>\" + self.tag + \"</font>\\\n\t\t\t\t\t</td>\\\n\t\t\t\t\t<td>\\\n\t\t\t\t\t\t<font size='2'>\" + self.value + \"</font>\\\n\t\t\t\t\t</td>\\\n\t\t\t\t</tr>\"\n\t\treturn line", "def _format_output(predictions, descriptors):\n # type: (list,dict) -> list\n\n output = []\n for item in predictions:\n output.append(' - '.join([descriptors.get(item[1]), str(item[0]).format('0.00f')]))\n\n return output", "def display(self):\r\n return str((self.last_name + \", \" + self.first_name+\": \" + self.phone_number + \"\\n\" + self.address + \"\\nStart Date: \" +\r\n self.start_date.strftime(\"%m\") + \"/\" + self.start_date.strftime(\"%d\") +\r\n \"/\" + self.start_date.strftime(\"%Y\")+\"\\nHourly Pay $\" + str('%.2f' % self.hourly_pay)))", "def __str__(self):\n return f'Order: {self.size} {self.drink_name} from {self.shop}\\n' \\\n f'Details: {self.details}\\n' \\\n f'Location: {self.location}\\n' \\\n f'Contact Info: {self.customer_name}, {self.customer_number}'", "def formatRecvdData(data_recvd):\n\n\t##############\tADD YOUR CODE HERE\t##############\n\t\n\tx = PrettyTable()\n\t\t\n\tdict2 = eval(data_recvd)\n\t\t\n\tx.field_names=[\"OPTION_NUMBER\",\"OPTIONS\",\"ADDITIONAL INFO\"]\n\tfor key,value in dict2.items():\n\t\tvalue = str(value)\n\t\tif '{' in value:\n\t\t\t\td = eval(value)\n\t\t\t\tfor i,j in d.items():\n\n\t\t\t\t\tx.add_row([key,i,j])\n\t\t\t\t\n\t\telse:\n\t\t\t\t\n\t\t\tx.add_row([key,value,\"-\"])\n\tprint(x)\n\t\n\n\t##################################################", "def displayReport(trade_history):\n stock_history = dict()\n if len(trade_history) > 0:\n for action in trade_history:\n buysell = action.split()\n if stock_history.get(buysell[3]) != None: \n if buysell[2] == 'BUY':\n stock_history[buysell[3]][0] += 1\n else:\n stock_history[buysell[3]][1] += 1\n stock_history[buysell[3]][2] += float(buysell[6])\n else:\n if buysell[2] == 'BUY':\n stock_history[buysell[3]] = [1,0,0]\n else:\n stock_history[buysell[3]] = [0,1,float(buysell[6])]\n\n print(\"Stock: Buys Sells Total Return\")\n for k, v in stock_history.items():\n print(f\"{k}: {v[0]} {v[1]} {v[2] : .2f}\")", "def __str__(self):\n return_string = \"Block Number: {}\\nTimestamp: {}\\nDifficulty: {}\\nNonce: {}\\nPrevious Hash: {}\\nMerkle Root \" \\\n \"Hash: {}\\n\".format(self.block_number, self.timestamp, self.difficulty, self.nonce,\n self.prev_hash, self.merkle_root_hash)\n\n for t in self.transactions:\n return_string += \"Transaction:\\n \" + t.__str__() + \"\\n\"\n\n return_string += \"Self Hash: {}\\n\".format(self.self_hash)\n return return_string", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def _format_output(**values):\r\n return WEATHER_TEXT.format(**values)", "def _format_beneficiary_customer_59A(self, val):\n beneficiary_customer_account = val.get('ACCOUNT')\n beneficiary_customer_bic = val.get('BIC')\n if beneficiary_customer_bic:\n if beneficiary_customer_account:\n val = \"/\" + str(beneficiary_customer_account) + \"\\n\" + str(beneficiary_customer_bic)\n else:\n val = str(beneficiary_customer_bic)\n return val\n else:\n notifier.ERROR(\"Inappropriate option selected. Option A is invalid.\")", "def get_transaction_value():\n # Get the user input, transform it from a string to a float and store it\n tx_recipient = input('Enter the recipient of the transaction: ')\n tx_amount = float(input('Enter the transaction amount, please: '))\n return tx_recipient, tx_amount", "def printAsTextTable(self, format_type, text, template=False):\n\n # the order is defined by header list\n col_paddings = []\n message = \"\"\n\n if format_type == \"text\":\n col = rcol = lcol = ecol = tbcol = tecol = bcol = tcol = \"|\"\n row = \"+\"\n space = \"\"\n for name in self.table_header:\n pad = self.getWidth(text[name] + [name, ])\n col_paddings.append(pad)\n for i in range(pad):\n row = \"%s-\" % (row)\n row = \"%s-+\" % (row)\n ecol = \"%s\\n%s\" % (ecol, row)\n tecol = \"%s\\n%s\" % (tecol, row)\n message = \"%s\\n\" % (row,)\n else:\n for name in self.table_header:\n col_paddings.append(0)\n if format_type == \"csv\":\n col = \",\"\n bcol = ecol = tecol = tbcol = \"\"\n tcol = rcol = lcol = \",\"\n row = \"\"\n space = \"\"\n if format_type == \"html\":\n col = \"</td>\\n<td align=center>\"\n tbcol = \"<tr><th align=center>\"\n tecol = \"</th></tr>\"\n tcol = \"</th><th align=center>\"\n rcol = \"</td>\\n<td align=right>\"\n lcol = \"</td>\\n<td align=left>\"\n bcol = \"<tr><td align=left>\"\n ecol = \"</td></tr>\"\n space = \"&nbsp;\"\n\n if not template and format_type != \"html\":\n line = \"\"\n for i in range(len(self.table_header)):\n pad = col_paddings[i]\n column = self.table_header[i].center(pad + 1)\n if i == 0:\n line = column\n else:\n line = \"%s%s%s\" % (line, tcol, column)\n message = \"%s%s%s%s\\n\" % (message, tbcol, line, tecol)\n\n for count in range(0, self.getLength(text)):\n index = 0\n line = bcol\n for key in self.table_header:\n item = text[key][count]\n separator = lcol\n if format_type != \"csv\" and (\n type(item) == type(0) or type(item) == type(0.0)):\n separator = rcol\n nv = NiceNum.niceNum(item, 1)\n value = nv.rjust(col_paddings[index] + 1)\n else:\n if type(item) == type(0) or type(item) == type(0.0):\n value = repr(item).rjust(col_paddings[index] + 1)\n else:\n value = item.ljust(col_paddings[index] + 1)\n if format_type == \"html\" and len(item.strip()) == 0:\n value = space\n if line == bcol:\n line = \"%s%s\" % (line, value)\n else:\n line = \"%s%s%s\" % (line, separator, value)\n index += 1\n line = \"%s%s\" % (line, ecol)\n message = \"%s%s\\n\" % (message, line)\n\n return message", "def formatter(cls, obj, **kwargs):\n if not obj.data:\n return \"\"\n if kwargs and kwargs.get('of') == 'xm':\n return legacy_export_as_marc(hep2marc.do(obj.data))\n return render_template(\n 'inspirehep_theme/format/record/Holding_Pen_HTML_detailed.tpl',\n record=obj.data\n )", "def format_element(bfo, style='eu', markup='html', separator=', ', suffix=''):\n\n latexperiod = ''\n displayouts = []\n errata = []\n displaycnt = 0\n backup_out = ''\n\n publication_infos = bfo.fields('773__')\n\n for publication_info in publication_infos:\n out = ''\n journal_source = cgi.escape(publication_info.get('p', ''))\n volume = cgi.escape(publication_info.get('v', ''))\n year = cgi.escape(publication_info.get('y', ''))\n number = cgi.escape(publication_info.get('n', ''))\n pages = cgi.escape(publication_info.get('c', ''))\n erratum = cgi.escape(publication_info.get('m', ''))\n doi = bfo.field('0247_a') or publication_info.get('a', '')\n conf_code = publication_info.get('w')\n latex_p = markup.lower() == 'latex'\n eu_style_p = style.lower() == 'eu'\n\n if journal_source:\n if not (volume or number or pages or doi):\n out += \"Submitted to: \"\n if latex_p:\n out = '%' + out\n out += journal_source\n\n else:\n if latex_p:\n journal_source = journal_source.replace(\".\", '.\\\\ ')\n ## but some journal names end in '.', and subsequent steps\n ## assume we do not end with '\\ ', so take it off:\n ##if journal_source[-2:] == '\\ ':\n ## journal_source = journal_source[:-2]\n out += journal_source\n\n if volume: # preparing volume and appending it\n if latex_p and not journal_source == 'Conf.Proc.':\n # XXX: Special case for Conference Proceedings, which have\n # letters in their conf #, but no volumes\n char_i = 0\n for char in volume:\n if char.isalpha():\n char_i += 1\n else:\n break\n journal_letter = volume[:char_i]\n ##if journal_letter:\n ##journal_letter = '\\\\ ' + journal_letter\n if journal_letter and out[-1] != ' ':\n journal_letter = ' ' + journal_letter\n volume_number = volume[char_i:]\n out += journal_letter + ' {\\\\bf ' + volume_number + '}'\n else:\n out += ' ' + volume\n\n if year: # preparing year; it's appended below\n if eu_style_p or latex_p:\n year = ' (' + year + ')'\n else:\n year = ', ' + year\n\n if number: # preparing number; it's appended below\n if eu_style_p:\n number = ' no.' + number + ', '\n else:\n number = ', no. ' + number\n\n if latex_p:\n if pages:\n dashpos = pages.find('-')\n if dashpos > -1:\n pages = pages[:dashpos]\n\n if eu_style_p: # EU style reference\n out += year\n out += number\n if pages:\n out += ' ' + pages\n else: # US style reference\n out += number\n if pages:\n if latex_p:\n out += ', ' + pages\n else:\n out += ': ' + pages\n ##out += ' ' + year\n out += year\n displaycnt += 1\n if displaycnt > 1:\n if latex_p:\n out = '[' + out + ']'\n if len(publication_infos) > 1 and \\\n erratum.lower() in ['erratum', 'addendum', 'corrigendum']:\n if suffix == '</b>':\n errata.append(\"</b>%s<b>: %s\" % (erratum.capitalize(), out,))\n else:\n errata.append(\"%s: %s\" % (erratum.capitalize(), out,))\n else:\n displayouts.append(out)\n\n elif conf_code:\n pass\n # Do nothing, instead use bfe_INSPIRE_conference\n # if conf_type:\n # conf_type = bfo.kb('talktype', conf_type)\n # out += cgi.escape(conf_type)\n # conf_code = get_kbd_values('conferences', conf_code)\n # out += \" \" + cgi.escape(conf_code)\n else:\n # no journal source and not a conference, we should do our best if\n # there is nothing else\n backup_out = publication_info.get('x')\n\n displayouts += errata\n\n if displayouts:\n # determine if there's an arxiv number to decide whether to put a\n # period after the pub-note. There should be a period after either\n # a pub-note or an Arxiv number, but not two periods, and no period if no pub/arxiv info.\n if latex_p:\n if not get_arxiv(bfo, category=\"no\"):\n latexperiod = '.'\n\n return separator.join(displayouts) + latexperiod\n elif backup_out:\n if backup_out.startswith('#DONE:'):\n backup_out = backup_out[6:]\n return backup_out", "def formatOutput(output):\n assert output is not None, \"Output is None\"\n return \"\\n\" + \" \".join(output)", "def prescription(self):\n prescription = \"\\n{0:>10}\\t{1:>10}\\t{2:>10}\\t{3:>10}\\n\".format(\"R\",\"Material\",\"d\",\"diameter\")\n for surface in self.lensSurfaces():\n prescription += \"{0:>10.2f}\\t{1:>10}\\t{2:>10.2f}\\t{3:>10.2f}\\n\".format(surface.R, str(surface.mat), surface.spacing, surface.diameter)\n return prescription", "def raw_out(self):\n\t\treturn '\\t'.join(self.raw_out_tab)", "def renter_accounting_report_gen(sid, start, end):\n results = renter_accounting(sid, start, end)\n print(\"Name: \" + results[0])\n sum_value = 0\n row_title = [\"Date\", \"Boat\", \"Rent\", \"Payment\", \"Sum\"]\n row_format = \"{:>15}\" * len(row_title)\n print(row_format.format(*row_title))\n for result in results[1]:\n temp = list(result.keys()) + [value for key, value in list(result.values())[0].items()]\n if temp[2]:\n sum_value += temp[3]\n temp[2] = \"\"\n else:\n sum_value -= temp[3]\n temp[2] = temp[3]\n temp[3] = \"\"\n temp.append(sum_value)\n print(row_format.format(*[str(x) for x in temp]))", "def __str__(self):\n return (\n f'{self.quantity}x {self.item.name} '\n f'({self.shopping_cart.user.email})'\n )", "def _prepare_output(self, full_encrypted_value, **options):\n\n return full_encrypted_value.decode(self._encoding)", "def _printable(self):\n to_print = \"EPR Request Header.\"\n to_print += \"Remote IP: {}\".format(self.remote_ip)\n to_print += \"Remote port: {}\".format(self.remote_port)\n to_print += \"Min Fidelity: {}\".format(self.min_fidelity)\n to_print += \"Max Time: {}\".format(self.max_time)\n to_print += \"Num Pairs: {}\".format(self.num_pairs)\n to_print += \"Priority: {}\".format(self.priority)\n to_print += \"Store: {}\".format(self.store)\n to_print += \"Atomic: {}\".format(self.atomic)\n to_print += \"Measure Directly: {}\".format(self.measure_directly)\n\n return to_print", "def format_result(result):\n output = ''\n for item in result:\n text = item['text']\n date = item['date']\n status = item['status']\n author = item['author_name']\n line = (\n f'Кейс: {text}\\n'\n f'<code>Дата: {date}</code>\\n'\n f'Статус: <b>{status}</b>\\n'\n f'Автор: {author}\\n'\n '\\n'\n )\n output += line\n return output", "def format(self, item):\n raise NotImplementedError()", "def __str__(self):\n return f\"Order Number: {self._order_number} \" \\\n f\"Product ID: {self._product_id} \" \\\n f\"Item: {self._item_type} \" \\\n f\"Name: {self._name} \" \\\n f\"Quantity: {self._quantity} \" \\\n f\"Product details: {self._product_details} \"", "def getReceipt(self):\n return self._Receipt" ]
[ "0.6561996", "0.6189167", "0.60160506", "0.59889376", "0.5762017", "0.57275635", "0.56979066", "0.5656792", "0.56265295", "0.5626407", "0.55991745", "0.55775195", "0.5537981", "0.55367833", "0.5487953", "0.5431081", "0.5430332", "0.5380763", "0.5369978", "0.53524035", "0.5349362", "0.53448206", "0.5334473", "0.53053594", "0.5294763", "0.52895546", "0.52810216", "0.52768064", "0.5259357", "0.5255496", "0.52431303", "0.52431303", "0.5226645", "0.5222827", "0.5214352", "0.5212606", "0.5211262", "0.5176179", "0.51588917", "0.51535", "0.51469743", "0.51414555", "0.5138925", "0.5128277", "0.5125952", "0.5112174", "0.51120216", "0.5092712", "0.5089299", "0.508922", "0.50818056", "0.50743127", "0.5073393", "0.5069644", "0.50659037", "0.5058638", "0.50574726", "0.5056463", "0.5048735", "0.5037436", "0.5033307", "0.5029445", "0.5025813", "0.50234044", "0.50229484", "0.50229484", "0.50195265", "0.50014526", "0.4996441", "0.4994335", "0.4991514", "0.4989537", "0.49888936", "0.49887764", "0.49780446", "0.4969261", "0.49586636", "0.49576828", "0.49570718", "0.49445245", "0.49349624", "0.49325663", "0.49199542", "0.49153396", "0.49054077", "0.49049044", "0.49046022", "0.49030027", "0.49009755", "0.49006492", "0.4896109", "0.48920432", "0.48864326", "0.48860073", "0.48814473", "0.48791808", "0.48766083", "0.48765707", "0.48717707", "0.48674116" ]
0.676757
0
Formats the output of a block to its proper values
def outputBlockFormatter(block): # Transform to number block["gasLimit"] = to_decimal(block["gasLimit"]) block["gasUsed"] = to_decimal(block["gasUsed"]) block["size"] = to_decimal(block["size"]) block["timestamp"] = to_decimal(block["timestamp"]) if block.get("number"): block["number"] = to_decimal(block["number"]) block["difficulty"] = to_decimal(block["difficulty"]) block["totalDifficulty"] = to_decimal(block["totalDifficulty"]) if is_array(block.get("transactions")): for item in block["transactions"]: if not is_string(item): item = output_transaction_formatter(item) return block
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reformat_block(specline, values):\n data = reformat_spec_line(specline)\n desc = '\\n'.join(values)\n data.append(desc)\n return data", "def verbose(self, block: Block):\n print('\\n\\n==============================')\n print('Hash:\\t\\t', block.hash.hexdigest())\n print('Previous Hash:\\t', block.previous_hash.hexdigest())\n print('Nounce:\\t\\t', block.nonce)\n print('Data:\\t\\t', block.data)\n print('\\n\\n==============================')", "def format(self):\n ...", "def format(self, data):", "def format_blocks(self):\n\n block_text = []\n for el, text in self._block_text.items():\n self.soft_break(el, text)\n content = ''.join(text)\n if content:\n block_text.append((content, self.additional_context + self.construct_selector(el)))\n return block_text", "def format_data(self, data):", "def _format(self):\n output = f\"\\n{color('>>> DUMP')} from {self.filename}: {color(f'L{self.line}')} in {color(f'{self.method}()')}\"\n\n for name, obj in self.objects.items():\n output += f\"\\n\\n{color(f' - {name}:')}\\n\"\n output += f\" {pformat(obj, width=110, indent=4)}\"\n\n output += color(\"\\n\\n<<< END\")\n return output", "def display_blocks(self):\n buf = \"\"\n cpt = 0\n\n for block in self.blocks:\n buf += \"Block N. %d\\n\" % cpt\n buf += \"H \\t%s\\n\" % getHashBlock(block)\n buf += \"Header \\t%s \\n\\n\" % str(block.header)\n cpt += 1\n\n buf += \"Is chain valid ? %r\" % self.chainIsValid()\n print buf", "def __repr__(self):\r\n rep = '\\n======= PRINT BLOCK HEADER =======\\n'\r\n rep += f'VersionNumber: {self.version_num}\\n'\r\n rep += f'hashPrevBlock: {self.hash_prev_block_header}\\n'\r\n rep += f'hashMerkleRoot: {self.hash_merkle_root}\\n'\r\n rep += f'Timestamp: {self.timestamp}\\n'\r\n rep += f'Bits: {self.bits}\\n'\r\n rep += f'Nonce: {self.nonce}\\n'\r\n rep += '======= END OF BLOCK HEADER ======='\r\n return rep", "def format_coin_output(coin):\n coin_output1 = \"Grabbing latest data for *\" + coin['name'] + \"*\\n\"\n coin_output2 = \"```{:20s}\\t${:.2f}\\n\".format(\"Price USD\",float(coin['price_usd']))\n coin_output3 = \"{:20s}\\t{:.8f}\\n\".format(\"Price BTC\",float(coin['price_btc']))\n coin_output4 = \"{:20s}\\t${:.2f}\\n\".format(\"Market Cap\",float(coin['market_cap_usd']))\n coin_output5 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 1hr\",float(coin['percent_change_1h']))\n coin_output6 = \"{:20s}\\t{:.2f}%\\n\".format(\"Change 24hr\",float(coin['percent_change_24h']))\n coin_output7 = \"{:20s}\\t{:.2f}%\\n```\".format(\"Change 7d\",float(coin['percent_change_7d']))\n return (coin_output1+coin_output2+coin_output3+coin_output4+coin_output5+coin_output6+coin_output7)", "def __str__(self):\n if self.filename:\n filename = self.filename\n else:\n filename = 'Unknown'\n if self.endian == '<':\n endian = 'Little Endian'\n else:\n endian = 'Big Endian'\n ret_val = ('FILE: %s\\nRecord Offset: %i byte\\n' +\n 'Header Endianness: %s\\n\\n') % \\\n (filename, self.record_offset, endian)\n ret_val += 'FIXED SECTION OF DATA HEADER\\n'\n for key in self.fixed_header.keys():\n ret_val += '\\t%s: %s\\n' % (key, self.fixed_header[key])\n ret_val += '\\nBLOCKETTES\\n'\n for key in self.blockettes.keys():\n ret_val += '\\t%i:' % key\n if not len(self.blockettes[key]):\n ret_val += '\\tNOT YET IMPLEMENTED\\n'\n for _i, blkt_key in enumerate(self.blockettes[key].keys()):\n if _i == 0:\n tabs = '\\t'\n else:\n tabs = '\\t\\t'\n ret_val += '%s%s: %s\\n' % (tabs, blkt_key,\n self.blockettes[key][blkt_key])\n ret_val += '\\nCALCULATED VALUES\\n'\n ret_val += '\\tCorrected Starttime: %s\\n' % self.corrected_starttime\n return ret_val", "def output(self):\n to_write = 'S '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x1'])+' '\n to_write += str(self.offset[1] + self.def_field['y1'])+' '\n to_write += str(self.offset[0] + self.def_field['x2'])+' '\n to_write += str(self.offset[1] + self.def_field['y2'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write", "def _format_output(selected_number, raw_data):\n tmp_data = {}\n data = collections.defaultdict(lambda: 0)\n balance = raw_data.pop('balance')\n for number in raw_data.keys():\n tmp_data = dict([(k, int(v) if v is not None else \"No limit\")\n for k, v in raw_data[number].items()])\n tmp_data['number'] = number\n if selected_number is None or selected_number == number:\n data[number] = tmp_data\n\n output = (\"\"\"Account Balance\n=======\n\nBalance: {:.2f} $\n\"\"\")\n print(output.format(balance))\n for number_data in data.values():\n _print_number(number_data)", "def _massage_raw_pg_output_vals(self):\n pass", "def outputLogFormatter(log):\n if log.get(\"blockNumber\"):\n log[\"blockNumber\"] = to_decimal(log[\"blockNumber\"])\n if log.get(\"transactionIndex\"):\n log[\"transactionIndex\"] = to_decimal(log[\"transactionIndex\"])\n if log.get(\"logIndex\"):\n log[\"logIndex\"] = to_decimal(log[\"logIndex\"])\n\n return log", "def asformat(self, format):", "def output(self):\n to_write = 'C '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x'])+' '\n to_write += str(self.offset[1] + self.def_field['y'])+' '\n to_write += str(self.def_field['radius'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write", "def __str__(self):\r\n return (str(self.blockNum) + \" \" + str(self.coords))", "def pformat(self, tree):\n return str(self.to_tree_text_block(tree))", "def reformat(ctx):\n pass", "def nice_output(self):\n return self.des", "def nice_output(self):\n return self.des", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def prettyPrint(description, ip_comp, host, width):\n value = (len(ip_comp) + len(host))\n #When printing values wider than the second column, split and print them\n if value > (int(width/3)):\n print(\"| \" + description.ljust(int(width/3)) + \" |\" ), \n i=0\n wrapped=textwrap.wrap(value, 60) \n for loop in wrapped:\n print(\"Fail point 3 inside loop\")\n if i == 0:\n print(loop + \"|\".rjust(int(width/3-(len(loop)))))\n else: \n print(\"| \".ljust(int(width/3+3)) + \" | \" + loop + \"|\".rjust(int(width/3-(len(loop)))))\n i=i+1\n else: \n print( \"| \" + description.ljust(int(width/3)) + \" | \" + ip_comp.rjust(int(width/3-6)) + \" | \" + host.rjust(int(width/3+2)) + \"|\")", "def formatted(self) -> str:\r\n ...", "def format_field(self, value, spec):\n cache = Cache()\n if spec == \"co\":\n # if cache(re.match(\"(.*)co$\", spec)):\n value = co_join(value)\n spec = \"s\"\n # cache.output.group(1) + \"s\"\n elif cache(re.match(r\"^sub(\\d?)_?(.*)$\", spec)):\n depth = (1 if cache.output.group(1) == \"\" else\n int(cache.output.group(1)))\n value = \"\\n\".join([\n \"{0}{1} = {2}\".format(depth * \" \", key, val)\n for key, val in value.items()])\n if cache.output.group(2) != \"\":\n value = (\n depth * \"[\" + cache.output.group(2) + depth * \"]\" + \"\\n\" +\n value)\n spec = \"s\"\n return super(Format, self).format_field(value, spec)", "def output(self, state):\n h, t = state\n\n return h", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def print_block():\n do_twice(do_block)\n do_twice(do_block)\n print_column()", "def print_block():\n do_twice(do_block)\n print_column()", "def block(cell):\n value=[0,0,cell[2]]\n for i in xrange(2):\n if cell[i] < 3:\n value[i] = 1\n if cell[i] >= 3 and cell[i] < 6:\n value[i] = 2\n if cell[i] >= 6:\n value[i] = 3\n return (\"block\",value[0],value[1],value[2])", "def __repr__(self):\n returnvalue = str()\n itemwidth = self._maxValueLength()\n for i in range(self._height):\n if i:\n returnvalue += '\\n'\n returnvalue += '['\n for j in range(self._width):\n if type(self._value[i][j]) is float:\n formatstring = \" %%%d.3f \" % itemwidth\n else:\n formatstring = \" %%%ds \" % itemwidth\n returnvalue += (formatstring % self._value[i][j])\n returnvalue += ']'\n return returnvalue", "def _format_output(**values):\r\n return WEATHER_TEXT.format(**values)", "def convert_textfile(self, input_textfile, output_textfile, block_number):\n input_textfile = './src/data/qvalue_files/' + input_textfile + '.txt'\n\n output_textfile = './src/data/qvalue_files/' + output_textfile + '.txt'\n \"\"\"\n block1\n \"\"\"\n box_conversion_map_1 = {\n 'D1': 'A1',\n 'D2': 'A2',\n 'D3': 'A3',\n 'D4': 'A4',\n 'D5': 'A5',\n 'D6': 'A6',\n 'D7': 'A7',\n 'D8': 'A8',\n 'D9': 'A9'\n }\n \"\"\"\n block2\n \"\"\"\n box_conversion_map_2 = {\n 'D1': 'A7',\n 'D2': 'A8',\n 'D3': 'A9',\n 'D4': 'A10',\n 'D5': 'A11',\n 'D6': 'A12',\n 'D7': 'A13',\n 'D8': 'A14',\n 'D9': 'A15'\n }\n \"\"\"\n block3\n \"\"\"\n box_conversion_map_3 = {\n 'U1': 'B7',\n 'U2': 'B8',\n 'U3': 'B9',\n 'U4': 'B10',\n 'U5': 'B11',\n 'U6': 'B12',\n 'U7': 'B13',\n 'U8': 'B14',\n 'U9': 'B15'\n }\n \"\"\"\n block0\n \"\"\"\n box_conversion_map_0 = {\n 'U1': 'B1',\n 'U2': 'B2',\n 'U3': 'B3',\n 'U4': 'B4',\n 'U5': 'B5',\n 'U6': 'B6',\n 'U7': 'B7',\n 'U8': 'B8',\n 'U9': 'B9'\n }\n box_maps_dict = {0: box_conversion_map_0, 1: box_conversion_map_1, 2: box_conversion_map_2,\n 3: box_conversion_map_3}\n box_conversion_map = box_maps_dict[block_number]\n\n f_read = open(input_textfile, 'r')\n f_write = open(output_textfile, 'w+')\n\n for i in f_read.read().split('\\n'):\n print(i, \"before\")\n for key in box_conversion_map.keys():\n i = i.replace(key + 'x', box_conversion_map[key] + 'x')\n i = i.replace(key + 'z', box_conversion_map[key] + 'z')\n i = i.replace(key + 'N', box_conversion_map[key] + 'N')\n i = i.replace(key + 'E', box_conversion_map[key] + 'E')\n i = i.replace(key + 'W', box_conversion_map[key] + 'W')\n i = i.replace(key + 'S', box_conversion_map[key] + 'S')\n i = i.replace(key + '|', box_conversion_map[key] + '|')\n print(i, \"after\")\n f_write.write(i + '\\n')\n f_read.close()\n f_write.close()", "def format_sampler(val):\n return val", "def format_bash(self,query_results):\n data=query_results.data\n \n name=\"ddb\"\n\n print (\"{0}_row_length={1}\".format(name,len(data)))\n print (\"{0}_column_length={1}\".format(name,len(query_results.columns)))\n print (\"\")\n\n column_index=0\n for column in query_results.columns:\n print(\"{0}_columns['{1}']='{2}'\".format(name,column_index,column))\n column_index+=1\n\n\n row_index=0\n for row in data:\n for column_index in range(0,len(query_results.columns)):\n print('{0}_data[{1}][{2}]=\"{3}\"'.format(name,row_index,column_index,row['data'][column_index]))\n row_index+=1\n # TODO return output for this\n return \"\"", "def the_display(self):\r\n return f\"\"\"\r\n {self.display[0]}\\n\r\n {self.display[1]}\\n\r\n {self.display[2]}\\n\r\n {self.display[3]}\\n\r\n {self.display[4]}\\n\r\n \"\"\"", "def format(self, item):\n raise NotImplementedError()", "def format(self, data):\r\n for name, value in sorted(data.items()):\r\n line = '{name} = {value}\\n'.format(\r\n name=name,\r\n value=value,\r\n )\r\n yield line", "def output(self):\n to_write = 'P '\n to_write += str(self.def_field['count'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n for xpos, ypos in self.def_field['XY_poly']:\n to_write += str(self.offset[0] + xpos) + ' ' \\\n + str(self.offset[1] + ypos) + ' '\n to_write += str(self.def_field['fill'])\n to_write += '\\n'\n return to_write", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def __call__(self, value):\n\n print('\\r', end='')\n self.updateAmount(value)\n writec(str(self), self.color, self.style)\n sys.stdout.flush()", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def format(self, data):\r\n for name, value in sorted(data.items()):\r\n full_text = ': {name} : {value}'.format(\r\n name=name,\r\n value=value,\r\n )\r\n wrapped_text = textwrap.fill(\r\n full_text,\r\n initial_indent='',\r\n subsequent_indent=' ',\r\n width=self.max_width,\r\n )\r\n yield wrapped_text + '\\n'", "def execute_print_block(arg):\n blockchain = Blockchain()\n blockchain.read_blockchain()\n\n height = arg['height']\n direction = arg['direction']\n\n if height is None:\n print('You must provide the height!!!')\n\n else:\n blockchain.print_blocks(height, direction)\n\n return", "def output(self):\n \n str_title_len = 50\n str_date_len = 40\n str_purpose_len = 30\n str_price_len = 10\n str_payer_len = 20\n #str_comment_len =\n \n if len(self.title) > (str_title_len - 2):\n out_title = self.title[:str_title_len - 2] + \" |\"\n else:\n out_title = self.title + (\" \" * (str_title_len - len(self.title) - 2)) + \" |\"\n \n # if date is presented with <datetime> object, then\n # then output it in format %d.%m.%y (31.12.99)\n if type(self.date) is datetime.datetime:\n out_date = \" \" + datetime.datetime.strftime(\"%d.%m.%y\") + \" |\"\n # or output as string otherwise\n else:\n if len(self.date) > (str_date_len - 4):\n out_date = \" \" + self.date[:str_date_len - 4] + \" |\"\n else:\n out_date = \" \" + self.date + (\" \" * (str_date_len - len(self.date) - 4)) + \" |\"\n \n if len(self.purpose) > (str_purpose_len - 4):\n out_purpose = \" \" + self.purpose[:str_purpose_len - 4] + \" |\"\n else:\n out_purpose = \" \" + self.purpose + (\" \" * (str_purpose_len - len(self.purpose) - 4)) + \" |\"\n \n # enormous sums aren't supported (over 9999999 at the moment)\n if len(str(self.price)) > (str_price_len - 4):\n raise Exception\n out_price = (' ' * (str_price_len - len(str(self.price)) - 4) ) + str(self.price) + ' |'\n \n if len(self.payer) > (str_payer_len - 2):\n out_payer = \" \" + self.payer[:str_payer_len - 2]\n else:\n out_payer = \" \" + self.payer + (\" \" * (str_payer_len - len(self.payer) - 2))\n \n out_line = out_title + out_date + out_purpose + out_price + out_payer\n return out_line", "def do_block():\n print_column()\n print_rows()", "def do_block():\n print_column()\n print_rows()", "def render_value(self, p, value):\n\n if isinstance(value, SList):\n p.b = value.n\n elif isinstance(value, str):\n p.b = value\n else:\n p.b = pprint.pformat(value)", "def print_block_msg(b):\n # Pull out fields\n version, prev_header, merkle_root = b[:4], b[4:36], b[36:68]\n timestamp, bits, nonce = b[68:72], b[72:76], b[76:80]\n txn_count = b[80:90].split(bytes.fromhex('01000000'))[0]\n txn_count = unmarshal_compactsize(txn_count)\n\n # Print report\n prefix = ' '\n print(prefix + 'BLOCK TRANSACTION')\n print(prefix + '-' * 56)\n prefix *= 2\n print('{}{:67} Version {}'.format(prefix, version.hex(), unmarshal_int(version)))\n print('{}{:67} Previous Block'.format(prefix, convertLittleBig(prev_header.hex())))\n print('{}{:67} Merkle Root'.format(prefix, convertLittleBig(merkle_root.hex())))\n time_str = strftime(\"%a, %d %b %Y %H:%M:%S GMT\", gmtime(unmarshal_int(timestamp)))\n print('{}{:67} Epoch time {}'.format(prefix, timestamp.hex(), time_str))\n print('{}{:67} Bits'.format(prefix, convertLittleBig(bits.hex())))\n print('{}{:67} Nonce'.format(prefix, convertLittleBig(nonce.hex())))\n print('{}{:67} Number of transactions: {}'.format(prefix, txn_count[0].hex(), txn_count[1]))", "def process_block(self, block):\r\n ret = []\r\n output = None\r\n input_lines = None\r\n lineno = self.IP.execution_count\r\n\r\n input_prompt = self.promptin%lineno\r\n output_prompt = self.promptout%lineno\r\n image_file = None\r\n image_directive = None\r\n\r\n for token, data in block:\r\n if token==COMMENT:\r\n out_data = self.process_comment(data)\r\n elif token==INPUT:\r\n (out_data, input_lines, output, is_doctest, image_file,\r\n image_directive) = \\\r\n self.process_input(data, input_prompt, lineno)\r\n elif token==OUTPUT:\r\n out_data = \\\r\n self.process_output(data, output_prompt,\r\n input_lines, output, is_doctest,\r\n image_file)\r\n if out_data:\r\n ret.extend(out_data)\r\n\r\n # save the image files\r\n if image_file is not None:\r\n self.save_image(image_file)\r\n\r\n return ret, image_directive", "def DebugFormat(self):\n print FormatAsBits((self.output, self.out_boff))\n for i in xrange(self.idx_byte*8 + self.idx_boff - 1):\n if not i % 8:\n sys.stdout.write(\"|\")\n sys.stdout.write(\"-\")\n print \"^\"", "def format(self) -> str:", "def blockprint(content, width=TERMINAL_CHARS):\n\n lines = content.split('\\n')\n print('_'*width)\n print('')\n for line in lines:\n p = line.strip()\n print(\"| \" + p)\n print('_'*width)", "def render_block(data):\n\tsnippet = data[2] \n\ttitle = data[0]['name']\n\tdescription = data[0]['description']\n\tblock_type = data[0]['type']\n\t\n\n\t# change the panel outline for\n\t# warnings and detections\n\tblock_border = 'yellow' if block_type == 'warning' else 'red1'\n\n\tcode_snippet = Syntax(\n\t\t\t\t\t\tsnippet, \n\t\t\t\t\t\tSYNTAX, \n\t\t\t\t\t\ttheme=THEME, \n\t\t\t\t\t\tline_numbers=True, \n\t\t\t\t\t\tstart_line=data[1]\n\t\t\t\t\t)\n\n\tdescription_txt = Markdown(\n\t\t\tf\"\"\" ## Explanation \\n {description} \"\"\",\n\t\t\tinline_code_lexer=SYNTAX,\n\t\t\tinline_code_theme=THEME,\n\t\t)\n\t\n\tcomponents = RenderGroup(\n\t\t\t\t\tcode_snippet,\n\t\t\t\t\tdescription_txt\n\t\t\t\t)\n\t\n\tblock = Panel(\n\t\t\tcomponents,\n\t\t\ttitle=f'[b white]{title}',\n\t\t\twidth=60,\n\t\t\tborder_style=block_border\n\t\t)\n\n\t# render\n\tprint('\\n')\n\tprint(block)", "def format_data(self)->float: \n try:\n formatted = chr(self.data[0])\n for i in range(1, len(self.data)): \n formatted = formatted + (chr(self.data[i])) \n return str(formatted)\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)", "def Display(self, unused_args, result):\n util.PrettyPrint(result)", "def Display(self, unused_args, result):\n util.PrettyPrint(result)", "def format_model_output(self, output, batch_size=1):\r\n return output", "def show_fixedblock(self):\n fb = self.station.get_raw_fixed_block(unbuffered=True)\n for i, ptr in enumerate(range(len(fb))):\n print('%02x' % fb[ptr], end=' ')\n if (i+1) % 16 == 0:\n print()", "def write_special_block(self, block, cell_content):\n if not cell_content:\n self.save_text()\n con = 1\n if block['t'] == 'Header':\n con = 2\n self.list_parse(block['c'][con])\n if not cell_content:\n self.save_text()", "def get_all():\n blocks = []\n for key, block in BLOCKS.iteritems():\n if len(block) != 3:\n raise BlockFormatError\n x, y, z = block\n blocks.append('%s=%.1f,%.1f,%.1f' % (key, x, y, z))\n return '|'.join(blocks)", "def print_in_block(message):\n print(\"|\", message)", "def calculate_output(self):", "def print_in_block(message):\n print(\"=\"*4, message)", "def _formatter(self, result):\n seclabels = []\n if 'seclabels' in result and result['seclabels'] is not None:\n for sec in result['seclabels']:\n sec = re.search(r'([^=]+)=(.*$)', sec)\n seclabels.append({\n 'provider': sec.group(1),\n 'label': sec.group(2)\n })\n\n result['seclabels'] = seclabels\n return result", "def get(self, get_params, block):\n value = f\"{{{self.key}}}\"\n try:\n value = value_ = get_params(self.key)\n if self.format.startswith(\":\"):\n # if a parameter has been set to be formatted as a numeric\n # type then we see if we can coerce it to be. This allows\n # the user to format types that normally would not be\n # allowed eg '123' it also allows {:d} to be used as a\n # shorthand for {:.0f}. Use {:g} to remove insignificant\n # trailing zeroes and the decimal point too if there are\n # no remaining digits following it. If the parameter cannot\n # be successfully converted then the format will be removed.\n try:\n if \"escape\" in self.format:\n value = escape(value)\n if \"ceil\" in self.format:\n value = ceil(float(value))\n if \"f\" in self.format:\n value = float(value)\n if \"g\" in self.format:\n value = float(value)\n if \"d\" in self.format:\n value = int(float(value))\n output = f\"{{[{self.key}]{self.format}}}\"\n value = output.format({self.key: value})\n value_ = float(value)\n except ValueError:\n pass\n elif self.format.startswith(\"!\"):\n output = f\"{{{self.key}{self.format}}}\"\n value = value_ = output.format(**{self.key: value})\n\n if block.commands.not_zero:\n valid = value_ not in [\"\", None, False, \"0\", \"0.0\", 0, 0.0]\n else:\n # '', None, and False are ignored\n # numbers like 0 and 0.0 are not.\n valid = not (value_ in [\"\", None] or value_ is False)\n enough = False\n except: # noqa e722\n # Exception raised when we don't have the param\n enough = True\n valid = False\n\n return valid, value, enough", "def coregister_formatted():\r\n\r\n print(\"begin coregister_formatted\")\r\n\r\n # check all records for pairs using the recordBegin time\r\n pair_records()\r\n\r\n # establish the beginning and end of the coregistered records\r\n define_pairedRecords()\r\n\r\n # coregister paired data in a single csv\r\n format_coregister()\r\n\r\n\r\n print(\"completed segment_formatted\")", "def output_format(result):\n if 'value' in result and isinstance(result['value'], list):\n result = result['value']\n obj_list = result if isinstance(result, list) else [result]\n return [_format_group(item) for item in obj_list]", "def _print(self):\n self.i += 1\n np.set_printoptions(precision=3, suppress=True)\n if self.i%40 == 0:\n self.i = 0\n print self.ekf.current_state_estimate[4:7]", "def formatted(self):\n\n # Remove repos with errors from totals\n if self.totals['errors'] > 0:\n for repo_id, error in self.errors:\n if repo_id in self.repos:\n if not 'error' in self.repos[repo_id]:\n self.totals['dlpkgs'] -= self.repos[repo_id]['dlpkgs']\n self.totals['numpkgs'] -= self.repos[repo_id]['numpkgs']\n self.repos[repo_id]['error'] = True\n\n self.linecount = 0 # reset line counter\n header, h1, h2, h3, h4, h5 = self.format_header()\n self.emit('-' * len(header))\n self.emit(self.color('{0}'.format(header), 'green'))\n self.emit('-' * len(header))\n\n error_repos = []\n complete_repos = []\n metadata_repos = []\n other_repos = []\n\n for repo_id in sorted(self.repos):\n if 'error' in self.repos[repo_id]:\n error_repos.append(repo_id)\n elif self.repos[repo_id]['repomd'] == 'complete':\n complete_repos.append(repo_id)\n elif self.repos[repo_id]['repomd']:\n metadata_repos.append(repo_id)\n else:\n other_repos.append(repo_id)\n\n for repo_id in itertools.chain(error_repos, complete_repos, metadata_repos, other_repos):\n self.emit(self.represent_repo(repo_id, h1, h2, h3, h4, h5))\n\n self.emit('-' * len(header))\n self.emit(self.represent_total(h1, h2, h3, h4, h5))\n self.emit('-' * len(header))\n\n # Append errors to output if any found.\n if self.totals['errors'] > 0:\n self.emit(self.color('Errors ({0}):'.format(self.totals['errors']), 'red'))\n for repo_id, error in self.errors:\n self.emit(self.color('{0}: {1}'.format(repo_id, error), 'red'))\n\n with self.term.location(x=0, y=self.linecount):\n sys.stdout.write(self.term.clear_eos())\n\n sys.stdout.flush()", "def format_output(output, case_number, status):\n output.append(\"Case #%s: %s\" % (case_number, status))", "def output(self):\n pdb.set_trace()\n return \"\".join(self.pieces)", "def __repr__(self):\n\n output = list()\n output.append('{resonance_id:6s}'.format(**self.par))\n output.append('{h_larmor_frq:6.1f}'.format(**self.par))\n output.append('{temperature:4.1f}'.format(**self.par))\n output.append('{:10.5f}'.format(self.val))\n output.append('{:10.5f}'.format(self.err))\n\n if self.cal:\n output.append('{:10.5f}'.format(self.cal))\n\n return ' '.join(output)", "def format_sampler(self, val):\n if isinstance(val, int):\n return \"[%s]\" % (2 * val)\n return val", "def formatResult(self, result):\r\n return '\\t'.join(map(str, result))", "def str_blocks_float(buf):\n return ' '.join(['%s%s%s%12.4e'%(\n '' if i == 0 or i%8 != 0 else '\\n', # add a line-feed every 8 values\n '' if i == 0 or i%32 != 0 else '\\n', # add an extra line-feed every 4 lines\n '' if i%4 != 0 else ' ', # add an extra space every 4 values\n x) for i, x in enumerate(buf)])", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def _populate_output(self):\n pass", "def _draw_block(self, block: Tuple[int, int], kind: str) -> None:\n # ToDo: implement display picture: https://pythonprogramming.net/displaying-images-pygame/\n if self.board_formatting[kind]['picture'] is not None:\n raise Exception('Displaying pictures has not yet been implemented!')\n else:\n rectangle = [block[1] * self.block_length, block[0] * self.block_length,\n self.block_length, self.block_length]\n pygame.draw.rect(self.display, self.board_formatting[kind]['color'], rectangle)", "def __str__(self) -> str:\n blockchain_string = 'Node ' + self.node_id + ' Blockchain: \\n'\n stack = collections.deque()\n for block in self.blockchain:\n stack.append(block)\n for i in range(0, len(stack)):\n block = stack.pop()\n blockchain_string += '-'*75 + '\\n'\n for k, v in block.__dict__.items():\n if k == 'transactions':\n blockchain_string += k + ':\\n'\n for tx in v:\n tx_short = Transaction(str(tx))\n tx_short_dict = tx_short.__dict__\n for k2, v2 in tx_short_dict.items():\n #\"2020-05-12 18:20:25.659289\"\n if k2 == 'timestamp':\n tx_short_dict[k2] = v2[11:22]\n elif k2 == 'unique_id':\n tx_short_dict[k2] = v2[:5] + '...'\n blockchain_string += '\\t' + str(tx_short) + '\\n'\n elif k == 'signatures':\n v_short = copy.deepcopy(v)\n new_sig = {}\n for k2, v2 in v_short.items():\n new_sig[k2[:8]+'...'] = v2\n blockchain_string += k + ': ' + str(new_sig) + '\\n'\n else:\n blockchain_string += k + ': ' + str(v) + '\\n'\n blockchain_string += '-' * 75 + '\\n'\n return blockchain_string", "def _adjustBlock(self, b):\n raise NotImplementedError", "def blocks_to_message(blocks):\n blocks[-1] = list(filter(lambda s: s != aes.EMPTY_SYMBOL_CODE, blocks[-1]))\n message = ''\n for block in blocks:\n for symbol in block:\n message += chr(symbol)\n return message", "def output(self) -> str:\n self._contents = str(self._line) + \", \" + str(self._line)\n return(super().output(0, None))", "def vformat(self, format_string, args, kwargs):\n self._used_kwargs = {}\n self._unused_kwargs = {}\n return super(MemorizeFormatter, self).vformat(format_string, args, kwargs)", "def reverse_convert_textfile(self, input_textfile, output_textfile, block_number):\n input_textfile = './src/data/qvalue_files/' + input_textfile + '.txt'\n\n output_textfile = './src/data/qvalue_files/' + output_textfile + '.txt'\n \"\"\"\n block1\n \"\"\"\n box_conversion_map_1 = {'A1': 'D1', 'A3': 'D3', 'A2': 'D2', 'A5': 'D5', 'A4': 'D4', 'A7': 'D7', 'A6': 'D6',\n 'A9': 'D9', 'A8': 'D8'}\n\n \"\"\"\n block2\n \"\"\"\n box_conversion_map_2 = {'A15': 'D9', 'A14': 'D8', 'A11': 'D5', 'A10': 'D4', 'A13': 'D7', 'A12': 'D6',\n 'A7': 'D1', 'A9': 'D3', 'A8': 'D2'}\n\n \"\"\"\n block3\n \"\"\"\n box_conversion_map_3 = {'B7': 'U1', 'B14': 'U8', 'B15': 'U9', 'B12': 'U6', 'B13': 'U7', 'B10': 'U4',\n 'B11': 'U5', 'B8': 'U2', 'B9': 'U3'}\n\n \"\"\"\n block0\n \"\"\"\n box_conversion_map_0 = {'B4': 'U4', 'B5': 'U5', 'B6': 'U6', 'B7': 'U7', 'B1': 'U1', 'B2': 'U2', 'B3': 'U3',\n 'B8': 'U8', 'B9': 'U9'}\n\n box_maps_dict = {0: box_conversion_map_0, 1: box_conversion_map_1, 2: box_conversion_map_2,\n 3: box_conversion_map_3}\n box_conversion_map = box_maps_dict[block_number]\n\n f_read = open(input_textfile, 'r')\n f_write = open(output_textfile, 'w+')\n\n for i in f_read.read().split('\\n'):\n print(i, \"before\")\n for key in box_conversion_map.keys():\n i = i.replace(key + 'x', box_conversion_map[key] + 'x')\n i = i.replace(key + 'z', box_conversion_map[key] + 'z')\n i = i.replace(key + 'N', box_conversion_map[key] + 'N')\n i = i.replace(key + 'E', box_conversion_map[key] + 'E')\n i = i.replace(key + 'W', box_conversion_map[key] + 'W')\n i = i.replace(key + 'S', box_conversion_map[key] + 'S')\n i = i.replace(key + '|', box_conversion_map[key] + '|')\n print(i, \"after\")\n f_write.write(i + '\\n')\n f_read.close()\n f_write.close()", "def _store_result(self, block):\n\n if not block:\n block = \"\"\n\n columns = 0\n column_name = \"\"\n scale = display_size = internal_size = precision = 0\n null_ok = False\n type_ = []\n\n for line in block.split(\"\\n\"):\n if line.startswith(mapi_async.MSG_INFO):\n logger.info(line[1:])\n self.messages.append((Warning, line[1:]))\n\n elif line.startswith(mapi_async.MSG_QTABLE):\n self._query_id, rowcount, columns, tuples = line[2:].split()[:4]\n\n columns = int(columns) # number of columns in result\n self.rowcount = int(rowcount) # total number of rows\n # tuples = int(tuples) # number of rows in this set\n self._rows = []\n\n # set up fields for description\n # table_name = [None] * columns\n column_name = [None] * columns\n type_ = [None] * columns\n display_size = [None] * columns\n internal_size = [None] * columns\n precision = [None] * columns\n scale = [None] * columns\n null_ok = [None] * columns\n # typesizes = [(0, 0)] * columns\n\n self._offset = 0\n self.lastrowid = None\n\n elif line.startswith(mapi_async.MSG_HEADER):\n (data, identity) = line[1:].split(\"#\")\n values = [x.strip() for x in data.split(\",\")]\n identity = identity.strip()\n\n if identity == \"name\":\n column_name = values\n elif identity == \"table_name\":\n _ = values # not used\n elif identity == \"type\":\n type_ = values\n elif identity == \"length\":\n _ = values # not used\n elif identity == \"typesizes\":\n typesizes = [[int(j) for j in i.split()] for i in values]\n internal_size = [x[0] for x in typesizes]\n for num, typeelem in enumerate(type_):\n if typeelem in [\"decimal\"]:\n precision[num] = typesizes[num][0]\n scale[num] = typesizes[num][1]\n else:\n msg = \"unknown header field: {}\".format(identity)\n logger.warning(msg)\n self.messages.append((Warning, msg))\n\n description = []\n for i in range(columns):\n description.append(\n Description(\n column_name[i],\n type_[i],\n display_size[i],\n internal_size[i],\n precision[i],\n scale[i],\n null_ok[i],\n )\n )\n self.description = description\n self._offset = 0\n self.lastrowid = None\n\n elif line.startswith(mapi_async.MSG_TUPLE):\n values = self._parse_tuple(line)\n self._rows.append(values)\n\n elif line.startswith(mapi_async.MSG_TUPLE_NOSLICE):\n self._rows.append((line[1:],))\n\n elif line.startswith(mapi_async.MSG_QBLOCK):\n self._rows = []\n\n elif line.startswith(mapi_async.MSG_QSCHEMA):\n self._offset = 0\n self.lastrowid = None\n self._rows = []\n self.description = None\n self.rowcount = -1\n\n elif line.startswith(mapi_async.MSG_QUPDATE):\n (affected, identity) = line[2:].split()[:2]\n self._offset = 0\n self._rows = []\n self.description = None\n self.rowcount = int(affected)\n self.lastrowid = int(identity)\n self._query_id = -1\n\n elif line.startswith(mapi_async.MSG_QTRANS):\n self._offset = 0\n self.lastrowid = None\n self._rows = []\n self.description = None\n self.rowcount = -1\n\n elif line == mapi_async.MSG_PROMPT:\n return\n\n elif line.startswith(mapi_async.MSG_ERROR):\n self._exception_handler(ProgrammingError, line[1:])\n\n self._exception_handler(InterfaceError, \"Unknown state, %s\" % block)", "def to_basic_block(self):\n return _uhd_swig.usrp_sink_sptr_to_basic_block(self)", "def formatOutput(output):\n assert output is not None, \"Output is None\"\n return \"\\n\" + \" \".join(output)", "def render_report(blocks):\n\tfor block in blocks:\n\t render_block(\n\t\t(\n\t\t block[0],\t\t# signature \n\t\t block[1], # line number\n\t\t block[2],\t\t# line\n\t\t)\n\t )", "def format_raw(self,query_results,output_stream):\n #print(query_results.data)\n delimiter=query_results.delimiter\n res=[]\n for row in query_results.data:\n if 'data' in row:\n raw=delimiter.join(row['data'])\n if output_stream=='STDIO':\n print(raw)\n else:\n res.append(raw)\n\n if output_stream=='STRING':\n return res", "def encode_block_string(self, block):\n\n args = [\n 'r%d' % block.num_repeat,\n 'k%s' % self._encode_ksize(block.kernel_size),\n 'a%s' % self._encode_ksize(block.expand_kernel_size),\n 'p%s' % self._encode_ksize(block.project_kernel_size),\n 's%d%d' % (block.strides[0], block.strides[1]),\n 'e%s' % block.expand_ratio,\n 'i%d' % block.input_filters,\n 'o%d' % block.output_filters\n ]\n\n if block.se_ratio > 0 and block.se_ratio <= 1:\n args.append('se%s' % block.se_ratio)\n\n if block.id_skip is False:\n args.append('noskip')\n\n if block.swish:\n args.append('sw')\n\n if block.dilated:\n args.append('dilated')\n\n return '_'.join(args)", "def _generate_code_blocks(self):\n\n printer = self._generate_pydy_c_printer()()\n\n self.code_blocks = {}\n\n lines = []\n for i, input_arg in enumerate(self.arguments):\n lines.append('double input_{}[{}],'.format(i, len(input_arg)))\n self.code_blocks['input_args'] = wrap_and_indent(lines, 14)\n\n lines = []\n for i, output_arg in enumerate(self.matrices):\n nr, nc = output_arg.shape\n lines.append('double output_{}[{}],'.format(i, nr * nc))\n self.code_blocks['output_args'] = \\\n wrap_and_indent(lines, 14)[:-1] # remove last comma\n\n lines = []\n for i, (input_arg, explan) in enumerate(zip(self.arguments,\n self.comma_lists())):\n lines.append('input_{}[{}] : [{}]'.format(i, len(input_arg),\n explan))\n self.code_blocks['input_docstring'] = wrap_and_indent(lines, 0)\n\n lines = []\n for var, expr in self.subexprs:\n var_str = printer.doprint(var)\n expr_str = printer.doprint(expr)\n lines.append('double {} = {};'.format(var_str, expr_str))\n self.code_blocks['subexprs'] = wrap_and_indent(lines)\n\n outputs = ''\n for i, output in enumerate(self.simplified_matrices):\n nr, nc = output.shape\n lhs = sm.MatrixSymbol('output_{}'.format(i), nr, nc)\n try:\n code_str = printer.doprint(output, lhs)\n except AttributeError:\n # The above fails in SymPy 0.7.4.1 because Matrix printing\n # isn't supported.\n code_lines = []\n for j, element in enumerate(output):\n assignment = 'output_{}[{}]'.format(i, j)\n code_lines.append(printer.doprint(element, assignment))\n code_str = '\\n'.join(code_lines)\n outputs += wrap_and_indent(code_str.split('\\n'))\n if i != len(self.simplified_matrices) - 1:\n outputs += '\\n\\n' # space between each output\n\n self.code_blocks['outputs'] = outputs", "def format_start(self):\n logging.info(\" itr h => cost set troom droom tout dout = t rwd\")\n logging.info(\" %7.1f %4.1f %7.1f %7.1f %4.1f %4.1f\" % (\n self.state['heat_cost'],\n self.state['set_temp'],\n self.state['room_temp'],\n self.state['room_temp_change'],\n self.state['outside_temp'],\n self.state['outside_temp_change'],\n ))", "def format(self, message):", "def make_node_text(self):\n fmtstr = ub.codeblock(\n '''\n process {name}\n :: {type}\n ''')\n parts = [fmtstr.format(name=self.name, type=self.type)]\n if self.config:\n if isinstance(self.config, six.string_types):\n parts.extend(self.config.splitlines())\n else:\n for key, val in self.config.items():\n parts.append(' :{key} {val}'.format(key=key, val=val))\n text = '\\n'.join(parts)\n return text", "def _format_ase(self, molecules, outputs):\n atom_buffer = []\n property_buffer = []\n for idx, molecule in enumerate(molecules):\n atom_types, positions = molecule\n atoms = Atoms(atom_types.cpu(), positions.cpu())\n\n props = outputs[idx]\n\n atom_buffer.append(atoms)\n property_buffer.append(props)\n return atom_buffer, property_buffer", "def process_cell_input_output(self, cell_piece):\n if cell_piece[:5] != 'sage:' and cell_piece[:12] != '&gt;'*3:\n piece = '<div class=\"highlight\"><pre>'\n piece += cell_piece\n piece = piece.replace('{','{&nbsp;')\n piece = piece.replace('}','}&nbsp;')\n piece += '</pre></div>'\n else:\n # group and format inputs and outputs\n pieces = cell_piece.split('\\n')\n output_flag = False\n piece = '{{{id=%s|\\n'%self.get_cellcount()\n for p in pieces:\n p = p.lstrip()\n\n if p[:5] == 'sage:' and not output_flag:\n piece += p[5:].lstrip() + '\\n'\n elif p[:5] == 'sage:' and output_flag:\n piece += '}}}\\n{{{id=%s|\\n'%self.get_cellcount() + p[5:].lstrip() + '\\n'\n output_flag = False\n elif p[:12] == '&gt;'*3 and not output_flag:\n piece += p[12:].lstrip() + '\\n'\n elif p[:12] == '&gt;'*3 and output_flag:\n piece += '}}}\\n{{{id=%s|\\n'%self.get_cellcount() + p[12:].lstrip() + '\\n'\n output_flag = False\n elif p[:3] == '...':\n piece += p[3:] + '\\n'\n else:\n # first occurrence of an output string\n # write /// denoting output\n if output_flag == False:\n piece += '///\\n'\n piece += p.lstrip() + '\\n'\n output_flag = True\n # multiple output lines exist, don't need /// repeated\n else:\n piece += p.lstrip() + '\\n'\n piece += '}}}\\n'\n return piece", "def convert_format(self, new_format):\n if new_format not in [0, 1, 2, 3]:\n raise ValueError(\"Unknown format specified\")\n\n inp_format = new_format\n if inp_format == 3:\n new_format = 2\n\n for block in self.frd.blocks:\n if hasattr(block, 'format'):\n block.format = new_format\n\n self.frd.node_block.format = inp_format", "def block(self, b):\n return self.flatten(b)" ]
[ "0.64059097", "0.6007487", "0.5995631", "0.5994798", "0.57876045", "0.5775755", "0.5706006", "0.56863886", "0.5680554", "0.5670126", "0.5669903", "0.5630315", "0.5629678", "0.5619763", "0.5568151", "0.55656964", "0.5549295", "0.55125326", "0.5497574", "0.5473819", "0.54342616", "0.54342616", "0.5427073", "0.5417499", "0.54116344", "0.54113334", "0.5399961", "0.53947765", "0.53895164", "0.5387718", "0.5385789", "0.5383649", "0.53797776", "0.53728926", "0.5369305", "0.5350667", "0.5349764", "0.53477716", "0.5335352", "0.5329524", "0.5319216", "0.5309259", "0.5304912", "0.5304646", "0.52979267", "0.5293801", "0.528386", "0.528386", "0.527213", "0.5266423", "0.52567077", "0.5256628", "0.52556807", "0.52544224", "0.52485955", "0.5243748", "0.5242372", "0.5242372", "0.52412945", "0.5234058", "0.52332383", "0.52284515", "0.5225369", "0.52148014", "0.52028215", "0.520117", "0.51982", "0.5196505", "0.51963556", "0.51858824", "0.5182084", "0.5179958", "0.5171081", "0.5170816", "0.5170741", "0.5169006", "0.5157012", "0.5155643", "0.5154534", "0.5151767", "0.5145868", "0.51447695", "0.51326007", "0.5132343", "0.51321566", "0.51312435", "0.5127601", "0.5127519", "0.512668", "0.51251405", "0.5116702", "0.5116663", "0.5116053", "0.51076746", "0.5105283", "0.51040226", "0.51027954", "0.51020896", "0.50977635", "0.509129" ]
0.7147282
0
Formats the output of a log
def outputLogFormatter(log): if log.get("blockNumber"): log["blockNumber"] = to_decimal(log["blockNumber"]) if log.get("transactionIndex"): log["transactionIndex"] = to_decimal(log["transactionIndex"]) if log.get("logIndex"): log["logIndex"] = to_decimal(log["logIndex"]) return log
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format(self, record):\n msg = logging.Formatter.format(self, record)\n label, color = self.label(record)\n if self.strip:\n return \"{:10s}{}\".format(label, sub(\"\\033\\\\[[0-9]+m\", \"\", msg, 0))\n else:\n return \"\\033[1;{}m{:10s}\\033[0m{}\".format(color, label, msg)", "def format(self, record):\n row = [self.formatTime(record, self.datefmt), record.name, record.levelname]\n keys = filter(self.filterer, record.__dict__)\n extra = [record.__dict__[k] for k in keys]\n\n self.writer.writerow(row + extra + [record.getMessage()])\n data = self.output.getvalue()\n self.output.truncate(0)\n self.output.seek(0)\n return data.strip()", "def format(self, record: LogRecord) -> str:\n record.asctime = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n message = record.getMessage()\n if record.exc_info:\n eno = record.exc_info\n stacktrace = \"\".join(traceback.format_exception(None, eno[1], eno[2]))\n message += f\" excp: {stacktrace}\"\n if record.stack_info:\n stack = self.formatStack(record.stack_info)\n message += f\" trace: {stack}\"\n\n log_output = {\n \"tool\": type(self.checker).__name__,\n \"type\": \"infrastructure\",\n \"severity\": record.levelname,\n \"severityLevel\": max(0, record.levelno // 10 - 1),\n \"timestamp\": record.asctime,\n \"module\": record.module,\n \"function\": record.funcName,\n \"flag\": self.checker.flag,\n \"flagIndex\": self.checker.flag_idx,\n \"runId\": self.checker.run_id,\n \"roundId\": self.checker.round,\n \"relatedRoundId\": self.checker.flag_round,\n \"message\": message,\n \"teamName\": self.checker.team,\n \"teamId\": self.checker.team_id,\n \"serviceName\": self.checker.service_name,\n \"method\": self.checker.method,\n }\n\n return LOGGING_PREFIX + json.dumps(log_output)", "def format_log(request, message):\n now = datetime.now().replace(microsecond=0)\n log = MESSAGE_LOG_FORMAT % dict(request.META, MESSAGE=message, TIME=now)\n return log + \"\\n\"", "def format(self, record):\n log_fmt = self.FORMATS.get(record.levelno)\n return BaseFormatter(log_fmt).format(record)", "def format_result(self):\n return ('{}\\n\\n{}'.format(\n LogParser.format_dict(LogParser.order_dict(self.urls)[:3]),\n LogParser.format_dict(LogParser.order_dict(self.status_codes))))", "def log_message(self, format, *args):", "def format(self, message):", "def format(self, record):\n message = record.getMessage()\n asctime = self.formatTime(record, self.datefmt)\n name = yellow(record.name)\n\n s = \"%(timestamp)s %(levelname)s %(name)s \" % {\n \"timestamp\": green(\"%s,%03d\" % (asctime, record.msecs), bold=True),\n \"levelname\": self.LEVELS[record.levelname],\n \"name\": name,\n }\n\n if \"\\n\" in message:\n indent_length = len(re_color_codes.sub(\"\", s))\n message = message.replace(\"\\n\", \"\\n\" + \" \" * indent_length)\n\n s += message\n return s", "def logger_format(self) -> str:\n\t\treturn ('%(asctime) -19s | %(levelname) -8s | %(threadName) -10s | '\n\t\t\t\t'%(funcName) -16s | %(message)s')", "def formatter(record):\n\n lines = record[\"message\"].splitlines()\n prefix = (\n \"{time:YY-MM-DD HH:mm:ss.S} | {level.name:<8} | \"\n + \"{file}.{function}:{line} - \".format(**record)\n )\n indented = (\n lines[0] + \"\\n\" + \"\\n\".join(\" \" * len(prefix) + line for line in lines[1:])\n )\n record[\"message\"] = indented.strip()\n return (\n \"<g>{time:YY-MM-DD HH:mm:ss.S}</> | <lvl>{level.name:<8}</> | \"\n + \"<e>{file}.{function}:{line}</> - <lvl>{message}\\n</>{exception}\"\n )", "def format(self, record: logging.LogRecord = None) -> str:\n # s = super().format(record)\n s = None\n e = {}\n e['id'] = uuid.uuid4().hex\n e['message'] = record.getMessage()\n # log.warning('record.message: %r', record.getMessage())\n # log.warning('record.args: %r', record.args)\n e['created'] = record.created\n e['priority'] = record.levelname\n e['args'] = record.args\n e['source_code'] = {}\n e['source_code']['pathname'] = record.pathname\n e['source_code']['funcName'] = record.funcName\n e['source_code']['lineno'] = record.lineno\n ctx = record.args.get(PIPELINE_CONTEXT_KEY, None)\n if ctx:\n e[PIPELINE_CONTEXT_KEY] = ctx.toDict()\n # use array enclosure a[] to mainain the log file\n # yaml compliant as new events are appended\n # - event1:\n # - event2:\n # - ...\n a = [e]\n s = yaml.dump(a)\n return s", "def format(self, record):\n record.message = indent_string(record.getMessage())\n if \"%(asctime)\" in self._fmt:\n record.asctime = self.formatTime(record, self.datefmt)\n s = self._fmt % record.__dict__\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if s[-1:] != \"\\n\":\n s = s + \"\\n\"\n s = \"{0} Exception:\\n {1}\".format(s, indent_string(record.exc_text))\n return s", "def format(self, record):\n\n\n if not hasattr(record, 'filename_'):\n record.file_indicator = '-'\n else:\n record.file_indicator = os.path.relpath(record.filename_.strip(),\n self.study_dir)\n record.line_indicator = self.format_aggregated(\n record,\n 'line_number',\n ' line %d:',\n ' lines [%s]:',\n optional=True)\n record.column_indicator = self.format_aggregated(\n record,\n 'column_number',\n ' column %d:',\n ' columns [%s]:',\n optional=True)\n record.cause_indicator = self.format_aggregated(\n record,\n 'cause',\n \"; value encountered: '%s'\",\n \"; values encountered: ['%s']\",\n join_string=\"', '\",\n optional=True)\n\n # format the string based on these fields\n formatted_result = super(LogfileStyleFormatter, self).format(record)\n\n # prepend an empty line if the filename is different than before\n current_filename = getattr(record, 'filename_', '')\n if (self.previous_filename is not None and\n current_filename != self.previous_filename):\n formatted_result = '\\n' + formatted_result\n self.previous_filename = current_filename\n\n return formatted_result", "def formatLogs(logs,format):\n formattedLogs=[]\n \n if(format.__eq__(\"json\")):\n for log in logs:\n formattedLogs.append(json.dumps(dict(log)))\n return formattedLogs\n elif(format.__eq__(\"xml\")):\n for log in logs:\n formattedLogs.append(dict2xml.dict2xml(dict(log)))\n return formattedLogs\n else:\n return logs", "def get_formatted_task_log(self):\n try:\n log = requests.get(self.gs_base_url + \"/out.log\").content\n except:\n return [f\"####-##-## ##:##:## Task ID: {self.name}\\n\"]\n return (f\"####-##-## ##:##:## Task ID: {self.name}\\n\" + log.decode('utf-8')).splitlines()", "def _format_msg(self, format_str, *args):\r\n return u\"{0} - - [{1}] {2}\\n\".format(\r\n self.client_address[0],\r\n self.log_date_time_string(),\r\n format_str % args\r\n )", "def _format_msg(self, format_str, *args):\n if not args:\n format_str = six.moves.urllib.parse.unquote(format_str)\n return \"{} - - [{}] {}\\n\".format(\n self.client_address[0],\n self.log_date_time_string(),\n format_str % args\n )", "def _stab_log_data(self, timestamp, data, logconf):\n print('[%d][%s]: %s' % (timestamp, logconf.name, data))", "def log_format_info(event_str, data = {}):\n\tcheck_type(event_str, StringType)\n\tcheck_type(data, DictType)\n\n\tinfo = []\n\tfor k in data:\n\t\tinfo.append('{0}: {1}'.format(k, data[k]))\n\treturn '{0}. Info: {1}'.format(event_str, ', '.join(info))", "def log_message(self, format, *args):\n if self.headers:\n xff = self.headers.getheader('X-Forwarded-For', '-')\n xgo = self.headers.getheader('X-Grafana-Org-Id', '-')\n ua = self.headers.getheader('User-Agent', '-')\n\n logging.info(\"%s - - [%s] %s [X-Forwarded-For: %s, X-Grafana-Org-Id: %s, User-Agent: %s]\" %\n (self.client_address[0], self.log_date_time_string(), format % args, xff, xgo, ua))\n else:\n logging.info(\"%s - - [%s] %s\" %\n (self.client_address[0], self.log_date_time_string(), format % args))", "def pretty_end_log(title):\n output = '>' * 10 + ' ' + title + ' ' + '<' * 10 + '\\n\\n'\n return output", "def _process_str(self, fmt, *args, **kwargs):\n log_str = fmt\n if len(args) > 0 or len(kwargs) > 0:\n log_str = fmt.format(*args, **kwargs)\n\n return log_str", "def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()", "def output_log():\r\n log_str = (\"Contents:\\n\"\r\n f\"Input file: {LOC}\\n\"\r\n f\"Length checked: {LENGTH}\\n\"\r\n f\"Rows where {COL_NAME} is non_standard: {FLAGGED_FILE}\\n\"\r\n f\"NB, if {FLAGGED_FILE} does not exist, no values with\"\r\n f\"length not of {LENGTH} were found.\\n\"\r\n f\"Rows where {COL_NAME} is of standard length: {CLEANED_FILE}\\n\"\r\n f\"NB, if {CLEANED_FILE} does not exist, no values with\"\r\n f\"length not of {LENGTH} were found.\"\r\n f\"Rows where {COL_NAME} is NULL: {NULL_FILE}\\n\"\r\n f\"NB, if {NULL_FILE} does not exist, no values with\"\r\n )\r\n with open(LOG_FILE, 'w+') as f:\r\n f.write(log_str)", "def log(string, *format):\n print (string+\"\\n\") % format", "def format_output(output, case_number, status):\n output.append(\"Case #%s: %s\" % (case_number, status))", "def set_log_format(format):\n root = logging.getLogger()\n handler = root.handlers[0]\n formatter = logging.Formatter(format)\n handler.setFormatter(formatter)", "def test_format_log_message(self, log_message):\n token = Token(\"NDY3MjIzMjMwNjUwNzc3NjQx\", \"XsySD_\", \"s45jqDV_Iisn-symw0yDRrk_jf4\")\n log_message.format.return_value = \"Howdy\"\n\n return_value = TokenRemover.format_log_message(self.msg, token)\n\n self.assertEqual(return_value, log_message.format.return_value)\n log_message.format.assert_called_once_with(\n author=self.msg.author,\n author_id=self.msg.author.id,\n channel=self.msg.channel.mention,\n user_id=token.user_id,\n timestamp=token.timestamp,\n hmac=\"x\" * len(token.hmac),\n )", "def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()", "def format(self, record):\n mappings = {\n 'asctime': create_timestamp,\n 'message': lambda r: r.msg,\n }\n\n formatters = self.parse()\n\n log_record = {}\n for formatter in formatters:\n try:\n log_record[formatter] = mappings[formatter](record)\n except KeyError:\n log_record[formatter] = record.__dict__[formatter]\n\n return json.dumps(log_record)", "def setFormat( self, fmt, style = '{' ):\n formatter = logging.Formatter( fmt, style = style )\n for handler in self.logger.handlers:\n handler.setFormatter( formatter )", "def format(self, record):\n return '[{}] {}'.format(QBShFormatter.LEVEL_DICT[record.levelname], record.getMessage())", "def log(text):\n print \"%s: %s\" % (str(datetime.datetime.now()), text)", "def create_log(self, exc):\n return self.formatter.formatException(exc)", "def format(self):\n ...", "def format(self) -> str:", "def fLOG (*l, **p) :\n path_add = p.get (\"LogPathAdd\", [] )\n\n lock = p.get(\"Lock\", None)\n if lock is not None : sys.hal_log_values[\"Lock\"] = lock\n \n if \"LogFile\" in p and \"LogPath\" in p : init (p [\"LogPath\"], p [\"LogFile\"])\n elif \"LogFile\" in p : init (filename = p [\"LogFile\"], path_add = path_add)\n elif \"LogPath\" in p : init (path = p [\"LogPath\"], path_add = path_add)\n \n def myprint(s): print(s)\n \n if \"OutputPrint\" in p : \n Print (p [\"OutputPrint\"])\n \n if \"LogFile\" in p :\n logfile = GetLogFile(True)\n \n dt = datetime.datetime (2009,1,1).now ()\n if len (l) > 0 :\n def _str_process (s) :\n if isinstance (s, str) : return s\n elif isinstance(s, bytes) : return s.decode(\"utf8\")\n else : \n try:\n return str (s)\n except Exception as e :\n raise Exception(\"unable to convert s into string: type(s)=\" + str(type(s))) from e\n \n message = str (dt).split (\".\")[0] + \" \" + \" \".join ( [_str_process(s) for s in l ] ) + sys.hal_log_values [\"__log_file_sep\"]\n \n if sys.hal_log_values [\"__log_display\"] : \n try :\n myprint (message.strip (\"\\r\\n\"))\n except UnicodeEncodeError :\n try :\n myprint (\"\\n\".join (repr (message.strip (\"\\r\\n\")).split (\"\\\\n\")))\n except UnicodeEncodeError :\n try :\n rr = repr (message.strip (\"\\r\\n\")).split (\"\\\\n\")\n for r in rr :\n myprint (r.encode(\"utf8\"))\n except UnicodeEncodeError :\n myprint (\"look error in log file\")\n GetLogFile ().write (message)\n st = \" \"\n else :\n st = str (dt).split (\".\")[0] + \" \"\n \n for k,v in p.items () :\n if k == \"OutputPrint\" and v : continue\n message = st + \"%s = %s%s\" % (str (k), str (v), sys.hal_log_values [\"__log_file_sep\"])\n if \"INNER JOIN\" in message :\n break\n GetLogFile ().write (message)\n if sys.hal_log_values [\"__log_display\"] : \n try :\n myprint (message.strip (\"\\r\\n\"))\n except UnicodeEncodeError :\n myprint (\"\\n\".join (repr (message.strip (\"\\r\\n\")).split (\"\\\\n\")))\n GetLogFile ().flush ()", "def clean_log(self, l, api_url):\n full_str = \"\"\n if len(l.split(\"- - \", 1)) > 1:\n s = l.split(\"- - \", 1)[1]\n date = s[s.find(\"[\") + 1 : s.find(\"]\")]\n method = s.split('\"')[1::2][0].split()[0]\n cur_call = s.split('\"')[1::2][0].split()[1].strip()\n status = sub(r\"\\D+\", \"\", s.split('\"', 2)[2])\n\n if cur_call != api_url + \"/\":\n full_str = (\n \"<span class='group_log'><span class='status_log code_\"\n + status\n + \"'>\"\n + status\n + \"</span>\"\n + \"<span class='date_log'>\"\n + date\n + \"</span><span class='method_log'>\"\n + method\n + \"</span></span>\"\n + \"<span class='group_log'><span class='call_log'><a href='\"\n + cur_call\n + \"' target='_blank'>\"\n + cur_call\n + \"</a></span></span>\"\n )\n\n return full_str", "def print_log(*content):\n now = datetime.datetime.now().strftime(\"%y-%m-%d %H:%M:%S\")\n print(\"MODEL INFO: \" + str(now)+ \" \", end='')\n print(*content)", "def writeToLogFile(self, event):\n outPutStr = '{:013}'.format(0)\n logOutPutStr = outPutStr + '\\t' + '{:.2f}'.format (time ()) + '\\t' + event + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ')\n printOutPutStr = outPutStr + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ') + '\\t' + event\n print (printOutPutStr)\n if self.logFP is not None:\n self.logFP.write(logOutPutStr + '\\n')\n self.logFP.flush()", "def format(self, record):\n\n level_colors = {\n 'DEBUG': strc('DEBUG', 'yellow', 'bold'),\n 'INFO': strc('INFO', 'blue', 'bold'),\n 'WARNING': strc('WARNING', 'yellow', 'bold'),\n 'ERROR': strc('ERROR', 'red', 'bold'),\n 'CRITICAL': strc('CRITICAL', 'red', 'bold')}\n\n if record.levelname in level_colors.keys():\n record.levelname = level_colors[record.levelname]\n record.name = strc(record.name, 'black', 'bold')\n\n return logging.Formatter.format(self, record)", "def make_log_context(log_events, width=None):\n error_lines = set(e.line_no for e in log_events)\n log_events = sorted(log_events, key=lambda e: e.line_no)\n\n num_width = len(str(max(error_lines or [0]))) + 4\n line_fmt = \"%%-%dd%%s\" % num_width\n indent = \" \" * (5 + num_width)\n\n if width is None:\n _, width = tty.terminal_size()\n if width <= 0:\n width = sys.maxsize\n wrap_width = width - num_width - 6\n\n out = StringIO()\n next_line = 1\n for event in log_events:\n start = event.start\n\n if isinstance(event, BuildError):\n color = \"R\"\n elif isinstance(event, BuildWarning):\n color = \"Y\"\n else:\n color = \"W\"\n\n if next_line != 1 and start > next_line:\n out.write(\"\\n ...\\n\\n\")\n\n if start < next_line:\n start = next_line\n\n for i in range(start, event.end):\n # wrap to width\n lines = _wrap(event[i], wrap_width)\n lines[1:] = [indent + ln for ln in lines[1:]]\n wrapped_line = line_fmt % (i, \"\\n\".join(lines))\n\n if i in error_lines:\n out.write(colorize(\" @%s{>> %s}\\n\" % (color, cescape(wrapped_line))))\n else:\n out.write(\" %s\\n\" % wrapped_line)\n\n next_line = event.end\n\n return out.getvalue()", "def compose_logfile_lines(start_time, db_format_time, blast_time, option_lines,\r\n formatdb_cmd, blast_results, options, all_ids,\r\n hit_ids, removed_hit_ids,\r\n included_ids, DEBUG):\r\n\r\n log_lines = []\r\n log_lines.append(\"Sequence exclusion analysis run on %s\" % strftime(\"%c\"))\r\n log_lines.append(\r\n \"Formatting subject database took %2.f seconds\" %\r\n (db_format_time))\r\n log_lines.append(\r\n \"BLAST search took %2.f minute(s)\" %\r\n ((blast_time) / 60.0))\r\n log_lines.append(\r\n \"Total analysis completed in %2.f minute(s)\" %\r\n ((time() - start_time) / 60.0))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Options |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.extend(option_lines)\r\n log_lines.append(\"Subject database formatted with command: %s\"\r\n % formatdb_cmd)\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Results |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\"BLAST results above e-value threshold:\")\r\n log_lines.append(\r\n \"\\t\".join([\"Query id\", \"Subject id\", \"percent identity\", \"alignment length\",\r\n \"mismatches\", \"gap openings\", \"q. start\", \"q. end\", \"s. start\", \"s. end\", \"e-value\", \"bit score\"]))\r\n\r\n for line in blast_results:\r\n if line.startswith(\"#\"):\r\n continue\r\n else:\r\n log_lines.append(line)\r\n\r\n log_lines.append(\r\n \"Hits matching e-value and percent alignment filter: %s\" %\r\n ','.join(sorted(hit_ids)))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Summary |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\"Input query sequences: %i\" % len(all_ids))\r\n log_lines.append(\r\n \"Query hits from BLAST: %i\" %\r\n (len(hit_ids) + len(removed_hit_ids)))\r\n log_lines.append(\r\n \"Query hits from BLAST lacking minimal percent alignment: %i\" %\r\n len(removed_hit_ids))\r\n log_lines.append(\"Final hits: %i\" % len(hit_ids))\r\n log_lines.append(\"Output screened sequences: %i\" % len(included_ids))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Output |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\r\n \"Writing excluded sequences (hits matching filters) to: %s\" %\r\n join(options.outputdir, \"matching.fna\"))\r\n log_lines.append(\r\n \"Writing screened sequences (excluding hits matching filters) to: %s\" %\r\n join(options.outputdir, \"non-matching.fna\"))\r\n log_lines.append(\r\n \"Writing raw BLAST results to: %s\" %\r\n join(options.outputdir, 'raw_blast_results.txt'))\r\n\r\n # format for printing\r\n revised_log_lines = []\r\n for line in log_lines:\r\n line = line + \"\\n\"\r\n revised_log_lines.append(line)\r\n\r\n if DEBUG:\r\n for line in log_lines:\r\n print line\r\n\r\n return revised_log_lines", "def gitLogValue(format,directory):\n return subprocess.check_output([\"git\",\"log\",\"-1\",\"--pretty=format:%\"+format],cwd=directory).strip()", "def log_message(self, fmt, *args):\n pass", "def log(content):\n\n now = datetime.datetime.now().strftime(\"%c\")\n now_time = time.time()\n # msg_last = '{} - {: >5.1f} seconds - {}'.format(now, now_time - TIME_LAST, content)\n\n if Logger._time_last is not None:\n msg_last = Logger.human_seconds(now_time - Logger._time_last)\n else:\n msg_last = ' ' * 13\n\n msgs = [now, msg_last, content]\n\n msg = \" │ \".join(msgs)\n\n msg_lines = [\"─\" * len(content) for content in msgs]\n\n msg_top = \"─┬─\".join(msg_lines)\n msg_lower = \"─┴─\".join(msg_lines)\n\n print(\" ┌─{}─┐\".format(msg_top))\n print(\" │ {} │\".format(msg))\n print(\" └─{}─┘\".format(msg_lower))\n\n Logger._time_last = time.time()", "def print_to_log(self, output):\n print(output)", "def writelog(self,*args):\n import sys\n print(' '.join([str(a) for a in args]),file=sys.stderr)", "def log(info):\n print(f\"[{info}]\")", "def asformat(self, format):", "def formatted(self) -> str:\r\n ...", "def format(self, record: LogRecord) -> str:\n json_record: Dict = self.json_record(record.getMessage(), record)\n mutated_record: Dict = self.mutate_json_record(json_record)\n mutated_record = mutated_record if mutated_record is not None else json_record\n\n return self.to_json(mutated_record)", "def _log(self, format, args, level=None):\n if level is None:\n level = self.log_level\n xbmc.log(\n \"metadata.movie.stupid: %s - - [%s] %s\\n\" % (\n self.client_address[0], self.log_date_time_string(),\n format % args),\n level)", "def template(self, record):\n\n def _log_format_onecolor(record):\n \"\"\"\n Normal console output format\n \"\"\"\n\n return LEVEL_COLORS.get(record.levelname)\n\n def _log_format_notset(record, stylized=True):\n \"\"\"\n Default log format.\n \"\"\"\n\n reset = Style.RESET_ALL\n\n levelname = {\n 'style_before': LEVEL_COLORS.get(record.levelname) + Style.BRIGHT,\n 'format': '(%(levelname)s)',\n 'style_after': reset,\n 'prefix': '',\n 'suffix': '',\n }\n\n name = {\n 'style_before': Fore.WHITE + Style.DIM + Style.BRIGHT,\n 'format': '%(name)s',\n 'style_after': Fore.RESET + Style.RESET_ALL,\n 'prefix': ' ',\n 'suffix': ' ',\n }\n\n # format prefix + style_before + message + style_after + suffix\n result = reset\n for i in [levelname, name]:\n result += f\"{i['prefix']}{i['style_before']}{i['format']}{i['style_after']}{i['suffix']}\"\n result += reset\n\n return result\n\n # Template Switcher\n templates = {\n 'NOTSET': _log_format_notset,\n 'INFO': _log_format_onecolor,\n 'DELIMITER': _log_format_onecolor,\n 'TOPIC': _log_format_onecolor,\n 'WARNING': _log_format_onecolor,\n }\n\n return templates.get(record.levelname, _log_format_notset)(record)", "def format(self, data):", "def generateLog(outq1, outq2, outq3):\n # generating formatted string for output of question 1\n output_q1 = \"\\n\\t\\tArticles Ranked by Popularity\\n\"\n output_q1 += '-'*60 + \"\\n\"\n output_q1 += '{0:40} | {1:20}'.format('Article', 'Number Of Views') + '\\n'\n output_q1 += '-'*60 + \"\\n\"\n for ele in outq1:\n output_q1 += '{0:40} | {1:15}'.format(ele[0], ele[1]) + \"\\n\"\n output_q1 += '-'*60 + \"\\n\"\n\n # generating formatted string for output of question 2\n output_q2 = \"\\n\\t\\tAuthors Ranked by Popularity\\n\"\n output_q2 += '-'*60 + \"\\n\"\n output_q2 += '{0:40} | {1:20}'.format('Authors', 'Number Of Views') + '\\n'\n output_q2 += '-'*60 + \"\\n\"\n for ele in outq2:\n output_q2 += '{0:40} | {1:15}'.format(ele[0], ele[1]) + \"\\n\"\n output_q2 += '-'*60 + \"\\n\"\n\n # generating formatted string for output of question 3\n output_q3 = \"\\n\\t\\tDays with more than 1% error returns\\n\"\n output_q3 += '-'*60 + \"\\n\"\n output_q3 += '{0:15} | {1:20} | {2:20}'.format(\n 'Date',\n 'Number Of Views',\n 'Number of Errors') + '\\n'\n output_q3 += '-'*60 + \"\\n\"\n for ele in outq3:\n output_q3 += '{0:15} | {1:20} | {2:15}'.format(\n str(ele[0]),\n ele[1], ele[2]) + \"\\n\"\n output_q3 += '-'*60 + \"\\n\"\n\n with open('report.txt', 'w') as f:\n f.write(output_q1)\n f.write(output_q2)\n f.write(output_q3)\n f.close()\n\n print(output_q1)\n print(output_q2)\n print(output_q3)", "def send_log():\n log.info(f\"UUID={UUID}\")\n log.info(f\"SPLIT={SPLIT}\")\n log.info(f\"BATCH_SIZE={BATCH_SIZE}\")\n log.info(f\"EPOCHS={EPOCHS}\")\n log.info(f\"PATIENCE={PATIENCE}\")\n log.info(f\"X_FREQ={X_FREQ}\")\n log.info(f\"LOOK_BACK={LOOK_BACK}\")\n log.info(f\"LOOK_AHEAD={LOOK_AHEAD}\")\n log.info(f\"KERNEL_SIZE={KERNEL_SIZE}\")\n log.info(f\"FILTERS={FILTERS}\")\n log.info(f\"L1L2={L1L2}\")\n log.info(f\"D1={D1}\")\n log.info(f\"D2={D2}\")\n log.info(f\"DOUT={DOUT}\")\n log.info(f\"PLOT={PLOT}\")\n log.info(f\"SHUFFLE={SHUFFLE}\")", "def logOutput(self, line):\r\n self.writeToLog('output', line)", "def format(self) -> pulumi.Output[Optional['outputs.FlowLogFormatParametersResponse']]:\n return pulumi.get(self, \"format\")", "def logline(msg):\n print msg", "def __build_message_to_print_in_log(log: LogModel) -> Optional[str]:\n\n if log is None:\n return None\n\n log_level_name: str = LogHelper.get_log_level_name(log.log_level)\n message: str = \\\n f'{log.creation_date} |->\\t[{log_level_name}]\\t{log.message}\\t\\t[Line: {log.line_number}]\\t[{log.filename}]'\n\n return message", "def test_custom_log_format(log_tracker, monkeypatch, server):\n monkeypatch.setattr(\n 'cherrypy._cplogging.LogManager.access_log_format',\n '{h} {l} {u} {t} \"{r}\" {s} {b} \"{f}\" \"{a}\" {o}',\n )\n log_tracker.markLog()\n host = webtest.interface(webtest.WebCase.HOST)\n port = webtest.WebCase.PORT\n requests.get(\n 'http://%s:%s/as_string' % (host, port),\n headers={\n 'Referer': 'REFERER',\n 'User-Agent': 'USERAGENT',\n 'Host': 'HOST',\n },\n )\n log_tracker.assertLog(-1, '%s - - [' % host)\n log_tracker.assertLog(\n -1,\n '] \"GET /as_string HTTP/1.1\" '\n '200 7 \"REFERER\" \"USERAGENT\" HOST',\n )", "def formatOutput(output):\n assert output is not None, \"Output is None\"\n return \"\\n\" + \" \".join(output)", "def format(self, record):\n data = record.__dict__.copy()\n\n # if record.args:\n # msg = record.msg % record.args\n # else:\n # msg = record.msg\n\n data.update(\n username=getpass.getuser(),\n time=datetime.now(),\n host=gethostname(),\n #args=tuple(unicode(arg) for arg in record.args)\n args=record.args\n )\n if 'exc_info' in data and data['exc_info']:\n data['exc_info'] = self.formatException(data['exc_info'])\n return data", "def getLog():\n with open(webapp.config['LOGFILE'], 'r') as logfile:\n output = logfile.read()\n if request.headers['Accept'] == 'application/json':\n return output, 200\n else:\n return render_template(\"output.html\", output=output)", "def format_log_message(message, transaction=None, *args):\n if transaction or args:\n format_args = [transaction]\n format_args.extend(args)\n return message % tuple(format_args)\n else:\n return message", "def __init__(self, fmt, datefmt=None):\n logging.Formatter.__init__(self, fmt, datefmt)", "def format(self) -> Optional[pulumi.Input['FlowLogFormatParametersArgs']]:\n return pulumi.get(self, \"format\")", "def define_log_post_format_hooks(self):\n # TODO remove this once structlog supports hooks or handlers\n # these hooks accept a 'msg' and do not return anything\n return []", "def appendLog(self):\n if self.logBuffer == None :\n self.logBuffer = \"Some header\\nhere\\n\\n\"\n self.logBuffer += \"\\tx\\ty\\ttheta : ul\\tur\\tt-neur\\n\";\n \n self.logBuffer += '%2.1f: %2.6f\\t %2.6f\\t %2.6f : ' % \\\n\t ( self.t, self.env.state[0], self.env.state[2], self.env.state[4] )\n self.logBuffer += '%1.3f\\t %1.3f \\t%1.2f \\t' % \\\n ( self.env.action[0], self.env.action[1], self.env.action[2] )\n self.logBuffer += 'Dst/Theta/Speed: \\t%f\\t%f\\t%f \\tF: %.2f \\n' % \\\n ( self.env.getDistance(), self.env.getOrientation(), self.env.getDistance(), self.getReward() )", "def format(self, record):\n extra = {\n \"message\": record.getMessage(),\n \"time\": self.formatTime(record, self.datefmt),\n \"msecs\": record.msecs,\n \"name\": record.name,\n \"level\": record.levelname,\n }\n\n keys = filter(self.filterer, record.__dict__)\n extra.update({k: record.__dict__[k] for k in keys})\n return str(CustomEncoder().encode(extra))", "def format(self, record):\n message = super(ConsoleFormatter, self).format(record)\n color_code = self.color(self.log_colors, record.levelname)\n if hasattr(record, 'ctx'):\n metadata = record.ctx.invocation_metadata()\n for item in metadata:\n if item.key == 'author_name':\n setattr(record, 'user', item.value)\n elif item.key == 'correlation_id':\n setattr(record, 'correlationId', item.value)\n\n for key, value in record.__dict__.items():\n #this allows to have numeric keys\n if (key not in RESERVED_ATTR_HASH\n and not (hasattr(key, \"startswith\")\n and key.startswith('_'))):\n message = append(color_code=color_code, message=message, key=key, value=value)\n return message", "def _write_log(self, log_data):\n # for data in log_data:\n # self.log_file.write(\"{}\\n\".format(data).encode('utf-8'))\n self.log_file.write(\"{}\\n\".format(log_data).encode('utf-8'))\n self.log_file.flush()", "def log_format_error(caught_exception, event_str):\n\tcheck_type(caught_exception, Exception)\n\tcheck_type(event_str, StringType)\n\t\n\treturn '{0}, Class: {1}:{2}'.format(event_str, str(type(caught_exception)), caught_exception)", "def log(msg):\n print(str(msg))", "def log(self, message):", "def wiki_log(self, reponame):\n if reponame and reponame != '(default)':\n return '[log:%s@%s %s/%s]' % (reponame, ','.join([str(r) for r in\n self.get_compact()]), _(\"Revision Log\"), reponame)\n else:\n return '[log:@%s %s]' % (','.join([str(r) for r in\n self.get_compact()]),\n _(\"Revision Log\"))", "def format(self, record: logging.LogRecord) -> str:\n return filter_datum(self.fields, self.REDACTION,\n super().format(record), self.SEPARATOR)", "def log_display_to_file(pressure_msl, presstrend_str, presstrendval,\n cumulus_forecast, pressure_forecast,\n temp_c, dew_point, humidity,\n rrate, beaufort, wdir, winddeg, gust,\n is_lightning_possible_str,\n fog_str,\n line_pressure,\n line_metrics1, line_metrics2, line_metrics3, line_metrics4, line_metrics5, line_metrics6,\n line_moon,\n alert_str):\n log_filename = definitions.DISPLAY_ROOT + '/' + 'ptendency.tsv'\n log_rec = time.ctime() + '\\t' + \\\n pressure_msl.__str__() + '\\t' + \\\n presstrend_str + presstrendval.__str__() + '\\t' + \\\n pressure_forecast + '\\t' + \\\n temp_c.__str__() + '\\t' + \\\n dew_point.__str__() + '\\t' + \\\n humidity.__str__() + '\\t' + \\\n rrate.__str__() + '\\t' + \\\n beaufort.__str__() + '\\t' + \\\n gust.__str__() + '\\t' + \\\n wdir + '\\t' + \\\n winddeg.__str__() + '\\t' + \\\n is_lightning_possible_str.__str__() + '\\t' + \\\n fog_str.__str__() + '\\t' + \\\n '\"' + line_pressure + '\"' + '\\t' + \\\n '\"' + line_metrics1 + '\"' + '\\t' + \\\n '\"' + line_metrics2 + '\"' + '\\t' + \\\n '\"' + line_metrics3 + '\"' + '\\t' + \\\n '\"' + line_metrics4 + '\"' + '\\t' + \\\n '\"' + line_metrics5 + '\"' + '\\t' + \\\n '\"' + line_metrics6 + '\"' + '\\t' + \\\n alert_str + '\\t' + \\\n cumulus_forecast + '\\t' + \\\n '\\n'\n\n fp_out = open(log_filename, 'a')\n fp_out.write(log_rec)\n fp_out.close()\n\n print(log_rec.rstrip())\n\n return", "def logstr(*args, **kwargs):\n msg = \"\"\n # Seperation\n sep = kwargs.pop(\"sep\", \" \")\n # Get all arguments\n for arg in args:\n msg += str(arg) + sep\n # Remove final seperation of necessary\n if sep:\n msg = msg[:-len(sep)]\n return msg + kwargs.pop(\"endl\", \"\\n\")", "def dump(log, file):\n file.write('FSH|%s|PyDL7|ZXU|%s|\\n' %\n (log.metadata.get('encoding_characters', '^~<>{}'),\n log.created.strftime('%Y%m%d%H%M%S')))\n file.write('ZRH|%s|%s|%s|%s|%s|%s|%s|%s|\\n' %\n (log.metadata.get('encoding_characters', '^~<>{}'),\n log.computer_model,\n log.computer_serial,\n log.depth_pressure_unit,\n log.altitude_unit,\n log.temperature_unit,\n log.tank_pressure_unit,\n log.tank_volume_unit))\n for dive in log.dives:\n file.write('ZDH|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\\n' %\n (dive.metadata.get('export_sequence', dive.sequence_number),\n dive.sequence_number,\n dive.metadata.get('record_type', 'M'),\n dive.recording_interval,\n dive.leave_surface_time.strftime('%Y%m%d%H%M%S'),\n dive.air_temperature,\n dive.tank_volume,\n dive.O2_mode,\n dive.rebreather_diluent_gas,\n dive.altitude))\n if dive.record:\n file.write('ZDP{\\n')\n for detail in dive.record:\n file.write('|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|\\n' %\n (detail.elapsed_time,\n detail.depth,\n detail.gas_switch,\n detail.current_PO2,\n str(detail.ascent_rate_violation)[0],\n str(detail.decompression_violation)[0],\n detail.current_ceiling,\n detail.water_temperature,\n detail.warning_number,\n detail.main_cylinder_pressure,\n detail.diluent_cylinder_pressure,\n detail.oxygen_flow_rate,\n detail.CNS_toxicity,\n detail.OUT,\n detail.ascent_rate))\n file.write('ZDP}\\n')\n file.write('ZDT|%s|%s|%s|%s|%s|%s|\\n' %\n (dive.metadata.get('export_sequence', dive.sequence_number),\n dive.sequence_number,\n dive.max_depth,\n dive.reach_surface_time.strftime('%Y%m%d%H%M%S'),\n dive.min_water_temperature,\n dive.pressure_drop))", "def format(self, record):\n message = {\n \"time\": datetime.utcfromtimestamp(record.created).isoformat(),\n \"level\": record.levelname,\n \"name\": record.name,\n \"message\": record.getMessage(),\n \"process\": record.process,\n \"thread\": record.threadName,\n \"hostname\": self.hostname,\n \"filename\": record.filename,\n \"function\": record.funcName,\n \"lineNo\": record.lineno,\n }\n\n if record.exc_info:\n message[\n \"exception\"\n ] = f\"{record.exc_info[0].__name__}: {record.exc_info[1]}\"\n message[\"traceback\"] = traceback.format_exc()\n\n return json.dumps(message, ensure_ascii=False)", "def _Log(self, logf, s):\r\n if logf:\r\n logf(s + '\\n')", "def write_log(self, msg, level = \"DEBUG\"):\r\n if len(self.parent)> 13:\r\n spacer = \"\\t\"\r\n elif len(self.parent) < 8:\r\n spacer = \"\\t\\t\\t\"\r\n else:\r\n spacer = \"\\t\\t\"\r\n \r\n log = level + \"\\t\" + self.parent +spacer +str(msg)\r\n print(log)", "def log(self, msg=\"\"):\n if len(msg):\n msg = \"[%.03fs] %s\" % (time.time()-self.timeStart, msg)\n print(msg)\n self.logLines.append(msg)", "def format(self, record):\n # type: (LogRecord) -> str\n try:\n return str(getattr(self, record.levelname)(record))\n except AttributeError as err:\n raise RuntimeError('Unknown record level (name: %s)' % record.levelname) from err", "def print_log (self, n = None):\r\n\t\tif n is None:\r\n\t\t\tn = len(self.log)\r\n\t\t\r\n\t\tfor i in range(-n,0):\r\n\t\t\tprint('@ {0: 8.1f} ms, {1} : {2}'.format(1000*self.log[i]['proctime'], self.log[i]['type'], self.log[i]['desc']) )", "def command(ctx):\n ctx.setup_logger(format='')", "def getLogs():", "def getLogs():", "def log(self, request, response, time):\n try:\n fmt_info = self.colorize_atoms(\n self._format_line(request, response, time)\n )\n\n values = list()\n extra = dict()\n for key, value in fmt_info:\n values.append(value)\n\n if key.__class__ is str:\n extra[key] = value\n else:\n extra[key[0]] = {key[1]: value}\n\n self.logger.info(\n self.colorize_msg(\n str(response.status), self._log_format % tuple(values),\n ),\n extra=extra\n )\n\n except Exception:\n self.logger.exception(\"Error in logging\")", "def format(self, filename, timestamp, line):\n return self._formatter({\n '@source': \"file://{0}{1}\".format(self._current_host, filename),\n '@type': self._file_config.get('type', filename),\n '@tags': self._file_config.get('tags', filename),\n '@fields': self._file_config.get('fields', filename),\n '@timestamp': timestamp,\n '@source_host': self._current_host,\n '@source_path': filename,\n '@message': line.strip(os.linesep),\n })", "def _format_output(**values):\r\n return WEATHER_TEXT.format(**values)", "def outputlogMessage(message):\n global logfile\n timestr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime() )\n outstr = timestr +': '+ message\n print(outstr)\n f=open(logfile,'a')\n f.writelines(outstr+'\\n')\n f.close()", "def read_linelog():", "def getLogFormat(self):\n\t\treturn AbsentSafeRawConfigParser.absentSafeGet(self, \n\t\t\tLogConfigParser.__LOG_CONFIG_SECTION, \n\t\t\tLogConfigParser.__LOG_FORMAT_KEY)", "def output(data, output, dateformat):\n\t\n\tif (output == \"CSV\") or (output ==\"csv\"):\n\t\t# Pretty print to the console\n\t\tprintformat = \"%s,%s,%s\"\n\telif (output == \"TSV\") or (output == \"tsv\"):\n\t\t# Tab seperated\n\t\tprintformat = \"%s\\t%s\\t%s\"\n\telse:\n\t\t#Default to console\n\t\tprintformat = \"Date: %s\\t\\t In: %s\\t\\tOut:%s\"\n\n\t# data is list of lists. row is [date, in, out] message counts\n\ttry:\n\t\tfor row in data:\n\t\t\tprint printformat % (time.strftime(dateformat, row[0]), row[1], row[2])\n\texcept TypeError, e:\n\t\t\tprint \"Error: Bad date returned, exiting.\"\n\t\t\tsys.exit(2)", "def log(text, array=None):\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} \".format(str(array.shape)))\n if array.size:\n text += (\"min: {:10.5f} max: {:10.5f}\".format(array.min(), array.max()))\n else:\n text += (\"min: {:10} max: {:10}\".format(\"\", \"\"))\n text += \" {}\".format(array.dtype)\n print(text)", "def logs(self, container: Container) -> str:" ]
[ "0.6809159", "0.6568247", "0.6517458", "0.64604336", "0.63606316", "0.63424927", "0.6341781", "0.63280874", "0.6323291", "0.63123155", "0.6245183", "0.6183246", "0.6177255", "0.6145785", "0.61174196", "0.61150354", "0.6070282", "0.60515445", "0.6051452", "0.6049557", "0.60433686", "0.603381", "0.60109496", "0.5991823", "0.5966236", "0.5930233", "0.5929388", "0.5907589", "0.59047997", "0.5894704", "0.58778703", "0.5864373", "0.5863256", "0.58343977", "0.58302677", "0.58183074", "0.58144015", "0.5805358", "0.5794794", "0.5786402", "0.5785099", "0.57796466", "0.5768931", "0.5764559", "0.5763946", "0.5762259", "0.57455605", "0.5704302", "0.56999165", "0.569975", "0.56956494", "0.5692284", "0.56839675", "0.5680149", "0.5679212", "0.5678204", "0.56702936", "0.56582236", "0.5646127", "0.5632194", "0.5628115", "0.56242085", "0.56206197", "0.56203854", "0.5588413", "0.5584432", "0.5581225", "0.55675656", "0.55655336", "0.5561965", "0.55508024", "0.5550637", "0.5545547", "0.553351", "0.55335", "0.55332094", "0.55322444", "0.55282164", "0.55264705", "0.5521786", "0.5506383", "0.55008644", "0.549949", "0.5497214", "0.549656", "0.549373", "0.5487632", "0.5487057", "0.54838246", "0.5482906", "0.5482906", "0.5482375", "0.5480642", "0.5477343", "0.5468764", "0.5464076", "0.54589266", "0.5458666", "0.54574805", "0.54568005" ]
0.71652734
0
Formats the input of a whisper post and converts all values to HEX
def inputPostFormatter(post): post["ttl"] = from_decimal(post["ttl"]) post["workToProve"] = from_decimal(post.get("workToProve", 0)) post["priority"] = from_decimal(post["priority"]) if not is_array(post.get("topics")): post["topics"] = [post["topics"]] if post.get("topics") else [] post["topics"] = [topic if is_0x_prefixed(topic) else encode_hex(topic) for topic in post["topics"]] return post
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outputPostFormatter(post):\n\n post[\"expiry\"] = to_decimal(post[\"expiry\"])\n post[\"sent\"] = to_decimal(post[\"sent\"])\n post[\"ttl\"] = to_decimal(post[\"ttl\"])\n post[\"workProved\"] = to_decimal(post[\"workProved\"])\n\n if not post.get(\"topics\"):\n post[\"topics\"] = []\n\n post[\"topics\"] = [decode_hex(topic) for topic in post[\"topics\"]]\n\n return post", "def _encode_post(self):\n\t\tpost_data = self.config.get('post_data')\n\t\tif post_data is not None:\n\t\t\tpost_data = self._encode_data(post_data, self.config.get('post_data_bits'))\n\t\t\tif self.config.get('post'):\n\t\t\t\tpost_pulse = self._encode_tuple(self.config['post'])\n\t\t\t\treturn post_pulse + post_data\n\t\t\telse:\n\t\t\t\treturn post_data", "def stringify(self):\n hexcode = \"#\"\n for x in self.value:\n part = hex(x)[2:]\n if len(part) < 2: part = \"0\" + part\n hexcode += part\n return hexcode", "def w__format_hex(self, string):\n d = map(None, string)\n d = map(ord, d)\n d = map(lambda x: \"%02x\" % x, d)\n return ' '.join(d)", "def format_data(self, data):", "def _encode_text(self):\n\n print(f\"Hex encode; received message is {self.message}\")\n return self.message.encode(\"utf-8\").hex()", "def format(self, data):", "def hex(space, w_val):\n return space.hex(w_val)", "def _stata_hex_format(self, value):\n return self._convert_hex(float(value).hex())", "def hexify(text):\r\n return ' '.join([hexify_word(word) for word in text.split()])", "def encode(self):\n color_str = []\n if self.brightness is not None:\n color_str.append(f\"brightness:{self.brightness}\")\n if self.hue is not None:\n color_str.append(f\"hue:{self.hue}\")\n if self.saturation is not None:\n color_str.append(f\"saturation:{self.saturation}\")\n if self.kelvin is not None:\n color_str.append(f\"kelvin:{self.kelvin}\")\n\n return ' '.join(color_str)", "def _format_data(self, data, charset):\n\n return self._encode_data(data) if data else u''", "def formatData(self, temp):\n \n bits = 32 # Required for this protocol\n temp = int(temp*100) # Multiply by 100 to preserve decimal places\n \n if temp == 0:\n r ='0x00000000'\n elif temp < 0: # 2's complement for negatives\n temp = 2**bits + temp\n r = hex(temp)[:-1] # Remove trailing L for Long\n else:\n temph = hex(temp)\n r = '0x'+'0'*(10-len(temph)) + temph[2:]\n \n return r[2:]", "def hex_str (self):\n return \"#%02X%02X%02X\"%(self.r, self.g, self.b)", "def __formatHex(self,hex_):\n output = []\n lineEnd = hex_.find('\\n')\n while lineEnd > 0:\n output.append(hex_[0:lineEnd])\n hex_ = hex_[lineEnd+1:len(hex_)]\n lineEnd = hex_.find('\\n')\n Flash, EEPROM,IDlocs,Config = self.__formatAsPICFlash(output)\n return Flash, EEPROM,IDlocs,Config", "def sanatize_hex(data: str) -> str:\n return data.replace(\"0x\", \"\").replace(\"0X\", \"\")", "def hex_form(hash):\n final_hash = ''\n for i in range(len(hash)):\n final_hash += format(hash[i], '02x')\n return final_hash", "def format_value(text):\n return text.encode('utf8').replace('\\n', ' ').replace('\\r', ' ')", "def encode_feed_hashes(self, params):\n body = \"\"\n\n for p in params:\n body += \"feed[]=%s&\" % p\n\n return body", "def toHex(self):\r\n rgb = self.toRGB()\r\n return ('#%02s%02s%02s' % (hex(rgb[0])[2:], hex(rgb[1])[2:],\r\n hex(rgb[2])[2:])).replace(' ', '0')", "def escape_values(bfo):\n return 0", "def escape_values(bfo):\n return 0", "def escape_values(bfo):\n return 0", "def phex(value, expected):\n return f\"{value:#0{expected}x}\"", "def pretty_hebrew(val):\n return 'font-size:20px; font-family: Times New Roman; text-align: right; max-width: 500px'", "def to_hex6_string(self):\n def c(x):\n return int(x * 255.0)\n return '#{:02x}{:02x}{:02x}'.format(c(self.r), c(self.g), c(self.b))", "def format_data(self)->float: \n try:\n formatted = chr(self.data[0])\n for i in range(1, len(self.data)): \n formatted = formatted + (chr(self.data[i])) \n return str(formatted)\n except ValueError as e:\n print(e)\n except TypeError as e:\n print(e)", "def toHex(self):\n \n t=self.boolVals[:]\n t.reverse()\n \n string=str(self)\n \n \n string=hex(int(string,2))\n string=string[2:]\n\n d=ceil(self.n/4)-len(string)\n string=d*\"0\"+string\n return string", "def wkb_hex(self): # -> str:\n ...", "def sanitizte_color(value):\n if len(value) == 7 and value[0] == '#':\n return \"#%06x\" % int(value[1:], 16)\n raise ValueError('invalid color')", "def test_phred_to_ascii(self):\r\n self.assertEqual(phred_to_ascii(0, 120), 'x')\r\n self.assertEqual(phred_to_ascii(1, 119), 'x')", "def test_repr_format(self):\n t = OneHotEncode(3)\n assert t.repr_format(\"asfa\") == \"OneHotEncode(asfa)\"", "def entity_encode_hex(input, errors='strict'):\n output = ''\n for character in input:\n if character in ('&', '<', '>'):\n output += \"&#x%s;\" % character.encode('hex')\n else:\n output += character\n\n return (output, len(input))", "def convertirHexadecimal(self):\n self.convertir(lambda c: hex(ord(c))[2:], sep=' ')", "def test_toHex(self):\r\n self.assertEqual(self.black.toHex(), '#000000')\r\n self.assertEqual(self.red.toHex(), '#ff0000')\r\n self.assertEqual(self.pink.toHex(), '#640000')", "def hex_str (self):\n return \"#%02X%02X%02X\"%(self._intern[0],self._intern[1],self._intern[2])", "def _massage_raw_pg_output_vals(self):\n pass", "def test_ascii_to_phred(self):\r\n self.assertEqual(ascii_to_phred('x', 120), 0)\r\n self.assertEqual(ascii_to_phred('x', 119), 1)", "def normalize(self, text):\n\n return binascii.hexlify(text)", "def pack(self, input_string):\r\n #This function lacks basic error checking....\r\n klaf = ''\r\n for s in input_string:\r\n klaf += bin((ord(s) % 128) % 64)[2:].zfill(6)\r\n result = ''\r\n for i in range(0, 6):\r\n result = result + hex(int('' + klaf[i * 8:i * 8 + 8],\r\n 2))[2:].zfill(2)\r\n return result", "def rendermsg(self,msg):\n return ' '.join(['%02x'%ord(x) for x in msg])", "def encodeformats(self):\n return self.rpc.call(MsfRpcMethod.ModuleEncodeFormats)", "def upp_stringer(input_list): #input a characteristics list\r\n\toutput_list=[]\r\n\tfor item in input_list:\r\n\t\toutput_list.append(str(stellagama.pseudo_hex(item)))\r\n\treturn ''.join (output_list) #output a string\r", "def hexify_word(word):\r\n\r\n return ''.join([str(hex(ord(c))[2::]) for c in word])", "def encode(self, text):", "def hex(cls, x):\n return c_hex(x)", "def to_h(self):\n return str(self).encode('hex')", "def int2hex(n: int) -> str:", "def test_bytes_to_pretty_hex():\n data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\n expected = (\n \"0000 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 \"\n \"|................|\\n\"\n )\n\n result = cmds._bytes_to_pretty_hex(data=data)\n\n assert expected == result", "def convert(value):\n if value < 256 and value > 32 and chr(value) in string.printable:\n ascii_val = chr(value)\n else:\n ascii_val = \"\"\n\n return (hex(value), bin(value), ascii_val, value)", "def rgb_hex_str(self, x):\n return \"#%02x%02x%02x\" % self.rgb_bytes_tuple(x)", "def ether_btoa(bytes):\n\n pretty = \"\"\n for i in (range(5)):\n pretty += hex(bytes[i])[2:4] # Strip the 0x from the string\n pretty += ':'\n \n pretty += hex(bytes[5])[2:4] # Strip the 0x from the string\n\n return pretty", "def hexcode(self):\n hexc = \"#%.02X%.02X%.02X\" % (int(self.rgb_255[0]), int(self.rgb_255[1]), int(self.rgb_255[2]))\n return hexc", "def base64_to_hex(apps, schema_editor):\n APNSDevice = apps.get_model('push_notifications', 'APNSDevice')\n for device in APNSDevice.objects.all():\n device.registration_id = b2a_hex(a2b_base64(device.registration_id))\n device.save()", "def _convert_hex(self, hex_value):\n if not isinstance(hex_value, str):\n raise TypeError(\"given hex value must be str\")\n m = HEX_RE.match(hex_value)\n if m is None:\n raise ValueError(\"given string does not seem to be Python hex\")\n sign_char, base, exp_sign, exp = [m.group(i) for i in range(1,5)]\n new_sign = \"+\" if sign_char is None else sign_char\n # Line below converts exp to hex value. The \"0x\" prefix is removed \n # with [2:]. The exponent is padded with (too many) zeros (Stata \n # requires 3 digits), and reduced to last 3 digits with [-3:].\n new_exp = (\"000\" + hex(int(exp))[2:])[-3:]\n return \"\".join((new_sign, base, 'X', exp_sign, new_exp))", "def hexify(c):\n try:\n s = c.encode(\"utf-8\").encode(\"hex\")\n except UnicodeDecodeError:\n s = 0\n n = len(s)\n if n <= 2: return s\n a = ' - '.join([s[i:i+2] for i in range(0,n,2)])\n return a[:-1]", "def handle_hex_stream(data):\n stream = extract_stream(data)\n if stream is not False: # stream can actually be null (HexStream = \\\\x00\\\\x00). so we explicitly check for False\n is_binary = not all(c in string.printable for c in stream)\n return stream.encode(\"base64\") if is_binary else stream\n return data", "def render_list_as_hex(self, data):\n s = '[ '\n for c in data:\n s += '%02x ' % c\n s += ']'\n return s", "def hex(self):\n return binascii.hexlify(self.data)", "def print_as_hex(s):\n print(\":\".join(\"{0:x}\".format(ord(c)) for c in s))", "def ToHex(val):\n return 'None' if val is None else '%#x' % val", "def color_hex(x):\n\n quest_hex = {\"choice\": \"FF4530\",\n \"short\": \"FCAA03\",\n \"code\": \"5CB130\"\n }\n\n if x == \"programming\":\n hex_code = quest_hex[\"code\"]\n elif x == \"short_answer\":\n hex_code = quest_hex[\"short\"]\n else:\n hex_code = quest_hex[\"choice\"]\n\n return hex_code", "def hex(string):\n return string.encode('hex')", "def escape_binary(message):\n out = \"\"\n for c in message:\n d = ord(c)\n if d in (0x23, 0x24, 0x7d):\n out += chr(0x7d)\n out += chr(d ^ 0x20)\n else:\n out += c\n return out", "def format_hex(value, mask):\n if mask is None:\n return \"0x{:x}\".format(value)\n return \"0x{:x}/0x{:x}\".format(value, mask)", "def urlencodeall(str):\n if not str:\n return \"\"\n\n return string.join(['%' + s.encode('hex') for s in str], '')", "def repr2col(tups):\n quant = tuple(round(comp * 255) for comp in tups)\n if quant[3] != 255: return \"\".join([\"{0:02x}\".format(m) for m in quant])\n quant = quant[:3]\n for w in shortaliases:\n if quant == shortaliases[w]: return w\n return \"#\" + \"\".join([\"{:02x}\".format(n) if max(n % 17 for n in quant) else \"{:x}\".format(n >> 4) for n in quant])", "def encode(self, strs):\n s = \"\"\n for i in strs:\n s += str(len(i)) + \"#\" + i\n return s", "def test_phred_to_ascii64(self):\r\n self.assertEqual(phred_to_ascii64(0), '@')\r\n self.assertEqual(phred_to_ascii64(30), '^')", "def bytesToStr(self, argvbytes, hexformat):\r\n msg = bytes(argvbytes)\r\n if hexformat:\r\n s = \"\"\r\n for i in range(len(msg)):\r\n hhex = \"%02x\" % msg[i]\r\n s += hhex + ' '\r\n return s\r\n else:\r\n return msg.decode(\"utf-8\")", "def encode_hex(b):\n if isinstance(b, str):\n b = bytes(b, \"utf-8\")\n if isinstance(b, bytes):\n return str(hexlify(b), \"utf-8\")\n raise TypeError(\"Value must be an instance of str or bytes\")", "def preprocess_msg(self):\n self.tmp_msg = self.tmp_msg.lower()\n cleared = ''\n for ch in self.tmp_msg:\n if ch in string.ascii_lowercase:\n cleared += ch\n\n c = ''\n for ch in cleared:\n c += '{:02d}'.format(ord(ch) - 97)\n if len(c) % 4 != 0:\n c += '99'\n self.tmp_msg = c\n\n super().preprocess_msg()", "def tohex(data: str) -> str:\n match = re.fullmatch(r\"^0[x|X][0-9a-fA-F]+\", data)\n if match:\n return data.lower()\n match = re.fullmatch(r\"^[0-9a-fA-F]+[h|H]$\", data)\n if not match:\n raise ValueError(f\"Required hex of the form `0x` or `H` found {data}\")\n match = re.match(r\"^[0-9a-fA-F]+\", data)\n return f\"0x{match.group().lower()}\"", "def data() -> str:\n return \"1721\\n979\\n366\\n299\\n675\\n1456\"", "def pack(self, data):\n for a, b in [(x, chr(ord(x) ^ 0x20)) for x in ['}','*','#','$']]:\n data = data.replace(a,'}%s' % b)\n crc = (sum(ord(c) for c in data) % 256) \n return \"$%s#%02X\" %(data, crc)", "def ascii_to_phred33(c):\r\n return ascii_to_phred(c, 33)", "def e(d):\n return d.encode('UTF-8')", "def printunichars(row):\n print(\"Title:\")\n print(row[0].encode('utf-8'))\n print(\"Body:\")\n print(row[1].encode('utf-8'))\n print(\"Ref:\")\n print(row[2].encode('utf-8'))\n print(\"Url:\")\n print(row[3].encode('utf-8'))", "def _encode_post_dict(self, post_dict):\r\n return json.dumps({\r\n k: v.encode('utf-8') if v is not None else v\r\n for k, v in post_dict.items()\r\n })", "def hx(i):\n a = hex(i)[2:]\n if len(a)<2: a = ''.join(['0',a])\n return a", "def ByteToHex( bins ):\r\n\r\n return ''.join( [ \"%02X\" % x for x in bins ] ).strip()", "def to_hex(text):\n return ' '.join([hex(ord(char)) for char in unicode(text, 'UTF-8')])", "def pack_ascii(self):\n\n out = ''\n for w in sorted(self.all_words()):\n assert isinstance(self.value[w], LOTHypothesis), \"*** Can only pack Lexicons with FunctionNode values\"\n out += \"%s:%s;\" % (w, self.value[w].grammar.pack_ascii(self.value[w].value) )\n return out", "def _preprocess(self, sent: str) -> str:\n sent = sent.replace(\" \", \"▁\")\n return \" \".join([c for c in sent])", "def Encode(cls, db, bom, device_info, version, is_rma_device):\n getter = _ConfiglessFieldGetter(\n db, bom, device_info, version, is_rma_device)\n return '-'.join(\n hex(getter(field)).upper().replace('0X', '') for field in cls.FIELDS)", "def hex_value(value, width=8):\n return 'X\"{0:0{1}x}\"'.format(value, width)", "def hexbyte(string):\n#\treturn repr(string)\n\ts = \"\"\n\tfor i in string:\n\t\tif (ord(i) >= ord('A') and ord(i) <= ord('z')) \\\n\t\t\tor (ord(i) >= ord('0') and ord(i) <= ord('9')) \\\n\t\t\tor (ord(i) == ord(\" \")):\n\t\t\ts += \"%s\" % i\n\t\telse:\n\t\t\ts += \"\\\\x%02x\" % ord(i)\n\n#\t\ts += \" \"\n\treturn s", "def encode(self, strs):", "def encode(self, strs):", "def __str__(self):\n\n\t\tif self.rawValue == None: return str()\n\n\t\tx = self.rawValue\n\n\t\tif not x.isdigit() or len(x) != 44 or len(set(x)) == 1:\n\t\t\treturn self.rawValue\n\n\t\treturn '{} {} {} {} {} {} {} {} {} {} {}'.format(x[:4], x[4:8], x[8:12], x[12:16], x[16:20], x[20:24], x[24:28], x[28:32], x[32:36], x[36:40], x[40:44])", "async def encode(self, ctx, value = None , from_type = None, *, to_type = None):\n\n if value == None or from_type == None or to_type == None:\n msg = 'Usage: `{}encode \"[value]\" [from_type] [to_type]`\\nTypes include ascii, hex, and base64.'.format(ctx.prefix)\n await ctx.send(msg)\n return\n\n types = [ \"base64\", \"hex\", \"ascii\" ]\n\n if not from_type.lower() in types:\n await ctx.send(\"Invalid *from* type!\")\n return\n\n if not to_type.lower() in types:\n await ctx.send(\"Invalid *to* type!\")\n return\n\n if from_type.lower() == to_type.lower():\n await ctx.send(\"*Poof!* Your encoding was done before it started!\")\n return\n\n try:\n if from_type.lower() == \"base64\":\n if to_type.lower() == \"hex\":\n await ctx.send(self.suppressed(ctx.guild, self._base64_to_hex(value)))\n return\n elif to_type.lower() == \"ascii\":\n await ctx.send(self.suppressed(ctx.guild, self._base64_to_ascii(value)))\n return\n elif from_type.lower() == \"hex\":\n if to_type.lower() == \"ascii\":\n await ctx.send(self.suppressed(ctx.guild, self._hex_to_ascii(value)))\n return\n elif to_type.lower() == \"base64\":\n await ctx.send(self.suppressed(ctx.guild, self._hex_to_base64(value)))\n return\n elif from_type.lower() == \"ascii\":\n if to_type.lower() == \"hex\":\n await ctx.send(self.suppressed(ctx.guild, self._ascii_to_hex(value)))\n return\n elif to_type.lower() == \"base64\":\n await ctx.send(self.suppressed(ctx.guild, self._ascii_to_base64(value)))\n return\n except Exception:\n await ctx.send(\"I couldn't make that conversion!\")\n return", "def format_hex(hex):\n octets = [hex[i:i+2] for i in range(0, len(hex), 2)]\n pairs = [\" \".join(octets[i:i+2]) for i in range(0, len(octets), 2)]\n return \"\\n\".join(pairs)", "def test_phred_to_ascii33(self):\r\n self.assertEqual(phred_to_ascii33(0), '!')\r\n self.assertEqual(phred_to_ascii33(30), '?')", "def escapeEncode(s: unicode) -> unicode:\n ...", "def get_hex_color(self) -> str:\n return f'#{self.color.hex()}'", "def ascii_to_hex_string(eng, control_codes={}):\n eng_bytestring = \"\"\n if not eng:\n return \"\"\n else:\n try:\n eng = str(eng)\n except UnicodeEncodeError:\n # Tried to encode a fullwidth number. Encode it as sjis instead.\n eng = eng.encode('shift-jis')\n\n eng_bytestring = eng\n\n for cc in control_codes:\n cc_hex = ascii_to_hex_string(cc)\n if cc_hex in eng_bytestring:\n eng_bytestring = eng_bytestring.replace(cc_hex, control_codes[cc])\n\n return eng_bytestring", "def print_ofpt_echo_request(msg):\n if len(msg.data.value) > 0:\n hexdump(msg.data.value)", "def format(self) -> str:", "def format_hex(self, list_converted):\n dict_hex = {10: 'A', 11: 'B', 12: 'C', 13: 'D', 14: 'E', 15: 'F'}\n list_converted = [dict_hex[n] if n in dict_hex.keys() else str(n) for n in list_converted]\n return list_converted", "def formatText(input_text):\n\tdata = {\"text\": input_text}\n\tprint 'Waiting for return ...'\n\treq = requests.post('http://34.212.39.136:5678/format', json = data)\n\n\toutput_text = req.json()['result']\n\treturn output_text" ]
[ "0.6161056", "0.6024747", "0.5889979", "0.5726662", "0.5721067", "0.5715071", "0.5712564", "0.56954545", "0.56508905", "0.5574359", "0.5507912", "0.5491416", "0.5455812", "0.543887", "0.54006004", "0.537959", "0.536963", "0.5341674", "0.5320729", "0.5312651", "0.5311158", "0.5311158", "0.5311158", "0.53070915", "0.5299621", "0.52676135", "0.5263652", "0.5242282", "0.521867", "0.5197538", "0.51835537", "0.5180486", "0.5179168", "0.5154518", "0.513498", "0.51348335", "0.51335686", "0.5130629", "0.50906134", "0.508619", "0.50778365", "0.5077807", "0.50738865", "0.5072734", "0.5062664", "0.50567967", "0.50463957", "0.50266945", "0.50247896", "0.5024031", "0.50228584", "0.50154835", "0.50130427", "0.49944735", "0.49906373", "0.49875605", "0.4987045", "0.49827552", "0.49770916", "0.49765328", "0.49588975", "0.4955632", "0.49406326", "0.4935696", "0.49146673", "0.48969498", "0.48961145", "0.48919344", "0.48878103", "0.48875198", "0.488697", "0.48855174", "0.4885055", "0.4870307", "0.4852188", "0.48420936", "0.483684", "0.48333704", "0.48329508", "0.48307502", "0.48264876", "0.48253676", "0.48198363", "0.48106432", "0.47976455", "0.4793193", "0.4785525", "0.4778648", "0.4778648", "0.47745183", "0.476053", "0.47554675", "0.47536385", "0.47477174", "0.47396284", "0.47331285", "0.47307256", "0.47290075", "0.47242644", "0.47225696" ]
0.6506106
0
Formats the output of a received post message
def outputPostFormatter(post): post["expiry"] = to_decimal(post["expiry"]) post["sent"] = to_decimal(post["sent"]) post["ttl"] = to_decimal(post["ttl"]) post["workProved"] = to_decimal(post["workProved"]) if not post.get("topics"): post["topics"] = [] post["topics"] = [decode_hex(topic) for topic in post["topics"]] return post
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format(self, message):", "def inputPostFormatter(post):\n\n post[\"ttl\"] = from_decimal(post[\"ttl\"])\n post[\"workToProve\"] = from_decimal(post.get(\"workToProve\", 0))\n post[\"priority\"] = from_decimal(post[\"priority\"])\n\n if not is_array(post.get(\"topics\")):\n post[\"topics\"] = [post[\"topics\"]] if post.get(\"topics\") else []\n\n post[\"topics\"] = [topic if is_0x_prefixed(topic) else encode_hex(topic)\n for topic in post[\"topics\"]]\n\n return post", "def post(self):\n return write_msg(request.json)", "def render_POST(self, request):\n\t\tprint request.args[\"message\"][0]\n\t\tself.jabberClient.sendMessage(request.args[\"message\"][0])\n\t\treturn ''", "def render_post(response, post):\n response.out.write('<b>' + post.subject + '</b><br>')\n response.out.write(post.content)", "def render_post(response, post):\n\n response.out.write('<b>' + post.subject + '</b><br>')\n response.out.write(post.content)", "def formatText(input_text):\n\tdata = {\"text\": input_text}\n\tprint 'Waiting for return ...'\n\treq = requests.post('http://34.212.39.136:5678/format', json = data)\n\n\toutput_text = req.json()['result']\n\treturn output_text", "def input_post(): #TODO, error handling for privacy checks\n\n message = request.form['message']\n page_token = session['page']['access_token']\n resp = utils.post_message(message, page_token, session['visibility'])\n return render_template('success.html', post_id = resp['id'])", "def process_post_result(resp):\n resp_json = resp.json()\n if 'message' in resp_json:\n message = resp_json['message']\n print_info(f'{message}.')\n return\n\n raise Exception(f'{response_message(resp_json)}')", "def send_echo(self, post_data):\n # Get sent message\n message = self.extract_message(\"/echo\", post_data.text)\n return message", "def format(self, data):", "def _encode_post(self):\n\t\tpost_data = self.config.get('post_data')\n\t\tif post_data is not None:\n\t\t\tpost_data = self._encode_data(post_data, self.config.get('post_data_bits'))\n\t\t\tif self.config.get('post'):\n\t\t\t\tpost_pulse = self._encode_tuple(self.config['post'])\n\t\t\t\treturn post_pulse + post_data\n\t\t\telse:\n\t\t\t\treturn post_data", "def printPost(self, id):\n enc = getpreferredencoding()\n output = self._extractPost(id)['formatted_text']\n print output.encode(enc)", "def create_output(self, messages):", "def postMessage(self, message):\n if self.BotOutputRequested:\n pass\n else:\n SiteDetailOutput.PrintStandardOutput(message, verbose=self._verbose)", "def print_post():\n print('| | |'),", "def post(self):\n user = request.form['user_name']\n text = request.form['text']\n return {\"text\": 'msg sent successfully.\\ntext:'\n + text + '\\nuser:' + user}", "def post(self):\n text = 'HELLO from socnet API Server!'\n return push_to_mattermost(text)", "def format(self, message):\n\t\tif type(self.protocol[0]).__name__ == \"Raw\":\n\t\t\treturn self.name + \":\" + message\n\t\treturn message", "def output(self, msg):", "def post(self):\n r = request.get_json()['text']\n # Recupero dalla richiesta il JSON che mi è stato inviato\n # e salvo il valore contenuto \n # sotto la chiave text su una variabile r.\n # In reguito ritorno r in formato stringa.\n # In quanto il valore di ritorno deve essere una stringa.\n return str(r)", "def pretty_print_POST(req):\n print('{}\\n{}\\n{}\\n\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))", "async def render_post(self, request):\n self._hx711.tare()\n return Message(code=CHANGED)", "def format_response(response):\n start_line = _format_status_line(response.status, response.reason)\n msg = _format_message(start_line, response.header, response.body)\n return msg", "def post_data():\n return json.loads('{\"success\":true, \"message\":\"Data created (but not really)\" }')", "def post_command(self) -> str:\n rtn = ''\n if self.terminator:\n rtn += self.terminator\n\n if self.suffix:\n rtn += ' ' + self.suffix\n\n if self.pipe_to:\n rtn += ' | ' + self.pipe_to\n\n if self.output:\n rtn += ' ' + self.output\n if self.output_to:\n rtn += ' ' + self.output_to\n\n return rtn", "def pretty_print_POST(req):\n print('{}\\n{}\\n{}\\n\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))\n print('------------END------------\\n')", "def create_series_msg(self, post_url: str) -> str:\n return self._render(\"series-pm\", post_url=post_url)", "def horde_message(self, message):", "def format_response_for_display(self, response, case):\n out_bits = []\n parsed = self.parse_response(response, case)\n\n request = parsed['request']\n out_bits.append(request['request_line'])\n for header, value in request['headers'].items():\n out_bits.append('%s: %s' % (header, value))\n if request['body']:\n out_bits.extend(('', request['body']))\n\n out_bits.extend([''] * 2)\n\n response = parsed['response']\n out_bits.append(response['response_line'])\n for header, value in response['headers'].items():\n out_bits.append('%s: %s' % (header, value))\n if response['body']:\n out_bits.extend(('', response['body']))\n\n return '\\n'.join(out_bits)", "def output_raw_message(text):\n database.messages_output_queue.put(text)", "def _respond_message(self, msg):\n self.set_status(200)\n self.set_header(\"Content-Type\", \"application/x-mplane+json\")\n self.write(mplane.model.unparse_json(msg))\n self.finish()", "def pretty_print_POST(req):\r\n print('{}\\n{}\\r\\n{}\\r\\n\\r\\n{}'.format(\r\n '-----------START-----------',\r\n req.method + ' ' + req.url,\r\n '\\r\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\r\n req.body,\r\n ))", "def process_multi_body_format(commands):", "def pretty_print_POST(req):\n print('{}\\n{}\\n{}\\n\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))", "def pretty_print_POST(req):\n print('{}\\n{}\\n{}\\n\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))", "def pretty_print_POST(req):\n print('{}\\n{}\\n{}\\n\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))", "def pretty_print_POST(req):\n print('{}\\n{}\\n{}\\n\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))", "def pretty_print_POST(req):\n print('{}\\n{}\\r\\n{}\\r\\n\\r\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\r\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))", "def post(self):\n try:\n msg_json = json.loads(request.form['text'])\n except json.decoder.JSONDecodeError:\n return {\"text\": \"Failed to send msg. Incorrect json format.\"}\n msgId = write_msg(msg_json)\n responseText = 'successfully created msg ' \\\n 'with id: ' + msgId\n return {\"text\": responseText}", "def post(self):\n code, status = run_handlers.handle_data_post(self.request.headers, self.request.body)\n self.set_status(code)\n self.write(status)\n self.finish()", "async def render_post(self, request):\n\n # Parses the POSTed data into a dictionary sends an empty payload if failed.\n try:\n data = parse_util.parse_post_data(request.payload)\n except Exception as e:\n return aiocoap.Message(payload = ''.encode('ascii'))\n\n # Gets the values from the POST request\n device_key = data.get('u', None)\n device_auth = data.get('p', None)\n device_food_eaten = data.get('f', None)\n device_drop_success = data.get('d', None)\n\n # Pet feeders need to send their product and authentication keys.\n if not device_key or not device_auth:\n return aiocoap.Message(payload = ''.encode('ascii'))\n\n # Grabs the information about the feeder from the database\n feeder_info= db.getFeederByProductKey(device_key)\n\n feeder_id = feeder_info[\"_id\"]\n if not feeder_info:\n return aiocoap.Message(payload = ''.encode('ascii'))\n\n # Verifies if the pet feeder client sent the correct authentication key\n if not db.verifyFeeder(feeder_id, device_auth):\n return aiocoap.Message(payload = ''.encode('ascii'))\n\n # If the POST parameter \"d\" is present then it will update the status\n # of the pet feeder.\n # Otherwise it will log the amount of food consumed\n if device_drop_success:\n if device_drop_success == '1':\n db.updateFeeder(feeder_info[\"_id\"], status=\"FAIL\")\n else:\n db.updateFeeder(feeder_info[\"_id\"], status=\"OK\")\n return aiocoap.Message(payload = 'status updated'.encode('ascii'))\n elif device_food_eaten:\n try:\n food_eaten = int(device_food_eaten)\n except:\n return aiocoap.Message(payload = ''.encode('ascii'))\n\n db.logOngoingConsumption(feeder_id, food_eaten)\n\n # Checks with the database if the pet feeder has any drop food events it\n # needs to execute.\n jobs = db.getReadyEventsForFeeder(feeder_id)\n payload = 'd' if len(jobs) > 0 else 'n'\n if payload == 'd':\n # Clears the pending events from the database.\n for job in jobs:\n db.logFeedingResult(feeder_id, job[\"type\"], dt.datetime.now(), job[\"count\"], \"OK\")\n db.deleteScheduledEventById(job[\"_id\"])\n\n return aiocoap.Message(payload = payload.encode('ascii'))", "def format_data(self, data):", "def post_message(self, message: dict) -> any:\n logging.info(message)\n # If the level is greater, do not post messages\n if message[\"level\"] > BaseConfig.LOGGING_LEVEL_FILTER:\n return\n try:\n blocks = self._generate_slack_block(message=message)\n response = self.client.chat_postMessage(\n channel=self.channel_id,\n blocks=blocks,\n mrkdwn=True,\n text=generate_mkdn_message(message=message, format=\"JIRA\")\n )\n return response\n except SlackApiError as e:\n logging.error(f\"Got an error: {e.response['error']}\")", "def render_POST(self, request, query=None):\n # make a parser and parse the request\n parser = qp.QueryParser(request)\n if not query: query = request.content.read() \n try: \n # run the query locally\n d = parser.runquery(self.db, query)\n except Exception, e:\n log.err(\"Failing query: \" + str(query))\n log.err()\n setResponseCode(request, e, 400)\n return str(e)\n else:\n # and send the reply\n request.setHeader('Content-type', 'application/json')\n\n if not query.strip().startswith('apply'):\n # apply streams the output out itself\n d.addCallback(lambda reply: (request, reply))\n d.addCallback(self.send_reply)\n d.addErrback(lambda x: self.send_error(request, x))\n return server.NOT_DONE_YET", "def deliver_post(data, access=None):\n\n schema = get_post_schema(data)\n return deliver_fields(schema, data, access)", "def _post(level, tag, message=None):\n if message == None:\n message = tag\n tag = \"hotword\"\n\n message = \"%s%s\\033[0;37;40m\" % (Log.COLOURS[level], message)\n\n logger = Log._get_logger(level, tag)\n method = getattr(logger, level)\n method(Log._message(message))", "def _post(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr__post(self, *args, **kwargs)", "def cli_formatter(self, data):\r\n if data:\r\n self._generic_cli_formatter(self.Response, data)", "def _FormatMessage(self, message):\n script_name = os.path.basename(sys.argv[0])\n timestamp = datetime.now().isoformat()\n formatted_message = '[{0:s}] {1:s}: {2:s} - {3:s}\\n'.format(\n timestamp, script_name, self._sender, message)\n return formatted_message", "def format_package_output(self):\n if self.package_type == \"TCP\":\n\n content_format = \"[{package_type}]\\t[{timestamp_num}\\t{timestamp}]\\t{src_ip}:{src_port}({src_mac}) ----->\" \\\n \"{dst_ip}:{dst_port}({dst_mac})\\t\" \\\n \"SEQ={tcp_seq}\\tACK={tcp_ack}\\tFLAGS={tcp_flags_list}\\tWIN={tcp_win}\\t\" \\\n \"DATA={tcpudp_data}\\t\" \\\n \"ttl={ttl}\\tDATA_BINARY={tcpudp_data_binary}\\tLEN={len_tcpudp_data}\"\n content = content_format.format(\n package_type=self.package_type,\n timestamp_num=self.timestamp_num,\n timestamp=self.timestamp,\n src_ip=self.src_ip,\n src_port=self.src_port,\n src_mac=self.src_mac,\n dst_ip=self.dst_ip,\n dst_port=self.dst_port,\n dst_mac=self.dst_mac,\n tcp_seq=self.tcp_seq,\n tcp_ack=self.tcp_ack,\n tcp_flags_list=self.tcp_flags_list,\n tcp_win=self.tcp_win,\n tcpudp_data=self.tcpudp_data,\n tcpudp_data_binary=self.tcpudp_data_binary,\n len_tcpudp_data=self.len_tcpudp_data,\n ttl=self.ttl\n )\n elif self.package_type == \"UDP\":\n content_format = \"[{package_type}]\\t[{timestamp_num}\\t{timestamp}]\\t{src_ip}:{src_port}({src_mac}) ----->\" \\\n \"{dst_ip}:{dst_port}({dst_mac})\\t\" \\\n \"ttl={ttl}\\tDATA_BINARY={tcpudp_data_binary}\\tLEN={len_tcpudp_data}\"\n content = content_format.format(\n package_type=self.package_type,\n timestamp_num=self.timestamp_num,\n timestamp=self.timestamp,\n src_ip=self.src_ip,\n src_port=self.src_port,\n src_mac=self.src_mac,\n dst_ip=self.dst_ip,\n dst_port=self.dst_port,\n dst_mac=self.dst_mac,\n tcpudp_data_binary=self.tcpudp_data_binary,\n len_tcpudp_data=self.len_tcpudp_data,\n ttl=self.ttl\n )\n elif self.package_type.startswith(\"ICMP\"):\n content_format = \"[{package_type}]\\t[{timestamp_num}\\t{timestamp}]\\t{src_ip}:{src_port}({src_mac}) ----->\" \\\n \"{dst_ip}:{dst_port}({dst_mac})\\t{icmp_type}:{icmp_code}[{icmp_message}]\\t\" \\\n \"ttl={ttl}\\tDATA_BINARY={tcpudp_data_binary}\\tLEN={len_tcpudp_data}\"\n content = content_format.format(\n package_type=self.package_type,\n timestamp_num=self.timestamp_num,\n timestamp=self.timestamp,\n src_ip=self.src_ip,\n src_port=self.src_port,\n src_mac=self.src_mac,\n dst_ip=self.dst_ip,\n dst_port=self.dst_port,\n dst_mac=self.dst_mac,\n tcpudp_data_binary=self.tcpudp_data_binary,\n len_tcpudp_data=self.len_tcpudp_data,\n icmp_type=self.icmp_type,\n icmp_code=self.icmp_code,\n icmp_message=self.icmp_message,\n ttl=self.ttl\n )\n return content", "def _format_and_write(self, message, values, process_key_num, level):\n if not process_key_num:\n raise ValueError('process_key_num not defined')\n\n message = self._formatted_string(message, values)\n\n if message_id.get():\n message += f' RequestId={message_id.get()}'\n if correlation_id.get():\n message += f' CorrelationId={correlation_id.get()}'\n if inbound_message_id.get():\n message += f' InboundMessageId={inbound_message_id.get()}'\n if interaction_id.get():\n message += f' InteractionId={interaction_id.get()}'\n\n message += f' ProcessKey={self.process_key_tag + process_key_num}'\n\n self.logger.log(level, message)", "def post_flow_form(self, req, **_kwargs):\n if req.POST:\n res = Response()\n s = self.api.process_flow_message(req.json)\n res.text = s if PYTHON3 else unicode(s, \"utf-8\")\n return res\n return Response(status=400) # bad request", "def echo(params):\n return params['message']", "def format(self, *args):\n\t\tweb.header('Content-Type', 'application/json; charset=utf-8')\n\t\treturn json.dumps(self.content)", "def _create_message(self, msg):\n head = msg[\"head\"]\n body = msg[\"body\"]\n body = body.format(**self.data)\n length = len(body)\n head = head.format(length=length, **self.data)\n return head + body", "def protocol_output(self, message, req=None):\n try:\n # so, utf16 doubles the size of the FLAP packets, which\n # really limits our max message size. if none of the ordinals\n # are outside the 7bit ascii range, convert to ascii bytes\n if not [ch for ch in message if ord(ch) > 127]:\n message = message.encode('us-ascii')\n\n # i don't know what's going on here anymore.. let's try something\n # completely different!\n message = message.replace('&', '&amp;')\n message = message.replace('<', '&lt;')\n message = message.replace('>', '&gt;')\n message = newline_re.sub('<br>', message)\n\n # AIM reacts indignantly to overlong messages, so we need to\n # wrap. try not to break up html tags injected by colorlib.\n if not hasattr(req, 'chat'):\n req.chat = None\n if not hasattr(req, 'aim'):\n req.aim = self.oscar_connection\n\n if req.chat:\n width = 2048\n func = req.chat.sendMessage\n else:\n width = 2545 # longer than chatrooms, who knows...\n func = req.aim.sendMessage\n\n # unicode stuff takes two bytes due to shitty utf-16\n if isinstance(message, unicode):\n width = int(width / 2) - 1\n\n for line in self.xmlwrap(message, width):\n args = [line]\n if not req.chat:\n if not req.nick:\n req.nick = req.sendto\n args.insert(0, req.nick)\n reactor.callFromThread(func, *args)\n\n # don't spam ourselves off the server\n sleep(1)\n\n except Exception, error:\n self.log.exception(error)", "def toData(self):\n\n lines = []\n # 1. Request and protocol version\n lines.append(self.request + \" \" + BANNER)\n # 2. Request arguments\n lines.extend(['%s: %s' % (arg, self.args[arg]) for arg in self.args])\n # 3. End of message (double CR-LF)\n data = \"\\r\\n\".join(lines) + \"\\r\\n\\r\\n\"\n # In debug mode, parse our own message to check it is well-formed\n assert checkMessage(data), \"Bad generated message: \" + data\n return data", "def output_message(message):\n output = json.dumps({\"error\": message}, indent=4)\n return output", "def __create_msg(self, ping):\n now = rospy.get_rostime()\n output = {\n \"info\": {},\n \"timestamp\": int(now.secs * 1e3 + now.nsecs * 1e-6),\n \"data\": ping.T.tolist()\n }\n return json.dumps(output)", "def post():\n pass", "def post(self):\n send_slack_log('Entered /slack/post_msg')\n send_slack_log('Request info:')\n send_slack_log(str(request.form))\n # unknown request.form\n trigger_id = request.form['trigger_id']\n channel_id = request.form['channel_id']\n response = open_form(channel_id,\n trigger_id,\n config['slack_post_form_path'])\n send_slack_log('Response info:')\n send_slack_log(str(response))\n return 'Please enter the new msg information in the form'", "def _process_message(self, response):\n message = str()\n try:\n message = response.json()\n except (simplejson.JSONDecodeError, ValueError) as e:\n message = response.text\n return message", "def make_stdout(data):\n return ' 1553110400.424 5583 5658 D Tag: %s' % data", "def submitPost(self):\n headers, params, proxy = self.getHeaderParamProxyInfo()\n try:\n resp = requests.post(self.FullURL, data=self.PostData, headers=headers, params=params, proxies=proxy, verify=False)\n return str(resp.content)\n except ConnectionError as ce:\n try:\n self.postErrorMessage('[-] Cannot connect to {url}. Server response is {resp} Server error code is {code}'.\n format(url=self.FullURL, resp=ce.message[0], code=ce.message[1][0]))\n except:\n self.postErrorMessage('[-] Cannot connect to ' + self.FullURL)\n except:\n self.postErrorMessage('[-] Cannot connect to ' + self.FullURL)", "def on_post(self, req, resp):\n self._set_default_response(resp)\n\n string = req.media.get('string')\n if string is None:\n resp.body = \"\"\"{ \"result\": \"error: request payload must contain a JSON object with a 'string' attribute\" }\"\"\"\n else:\n resp.status = falcon.HTTP_200\n result = \"pass\" if self._validate(string) else \"fail\"\n resp.body = json.dumps({'result': result})", "async def process_post(self, form: dict) -> str:\n\n self.__class__._vet_keys({'type', 'data'}, set(form.keys())) # all tokens need type and data\n\n if form['type'] == 'schema-claim-def-send':\n # write schema and claim def to ledger; respond with dict on both\n self.__class__._vet_keys(\n {'schema', 'attr-names'},\n set(form['data'].keys()),\n hint='data')\n self.__class__._vet_keys(\n {'issuer-did', 'name', 'version'},\n set(form['data']['schema'].keys()),\n hint='schema')\n schema_json = self.send_schema(json.dumps({k: form['data'][k] for k in form['data'] if k in keys}))\n schema = json.loads(schema_json)\n claim_def_json = self.send_claim_def(schema)\n\n # cache schema metadata en passant for future use\n self.schema_metadata = {\n 'issuer': form['data']['issuer-did'],\n 'name': form['data']['schema-name'],\n 'version': form['data']['schema-version'],\n 'seq_no': schema['seqNo'],\n 'json': schema_json,\n 'claim_def_json': claim_def_json}\n\n return json.dumps({'schema': schema, 'claim-def': json.loads(claim_def_json)}) \n\n elif form['type'] == 'schema-claim-def-lookup':\n # init schema and claim def; respond with dict on both\n self.__class__._vet_keys(\n {'schema', 'attr-names'},\n set(form['data'].keys()),\n hint='data')\n self.__class__._vet_keys(\n {'issuer-did', 'name', 'version'},\n set(form['data']['schema'].keys()),\n hint='schema')\n schema_json = self.send_schema(json.dumps({k: form['data'][k] for k in form['data'] if k in keys}))\n schema = json.loads(schema_json)\n claim_def_json = self.send_claim_def(schema)\n\n # cache schema metadata en passant for future use\n self.schema_metadata = {\n 'issuer': form['data']['issuer-did'],\n 'name': form['data']['schema-name'],\n 'version': form['data']['schema-version'],\n 'seq_no': schema['seqNo'],\n 'json': schema_json,\n 'claim_def_json': claim_def_json}\n\n return json.dumps({'schema': schema, 'claim-def': json.loads(claim_def_json)}) \n\n elif form['type'] == 'set-master-secret':\n self.__class__._vet_keys({'label'}, set(form['data'].keys()), hint='data')\n if self.schema_metadata == None:\n raise ValueError('Schema metadata not set')\n await self.create_master_secret(form['data']['label'])\n await self.store_claim_req(\n self.schema_metadata['issuer'],\n self.schema_metadata['seq_no'],\n self.schema_metadata['claim_def_json'])\n\n return json.dumps({})\n\n elif form['type'] in ('claim-request', 'proof-request'):\n self.__class__._vet_keys(\n {'claim-filter'},\n set(form['data'].keys()),\n hint='data')\n self.__class__._vet_keys(\n {'attr-match', 'predicate-match'},\n set(form['data']['claim-filter'].keys()),\n hint='claim-filter')\n # TODO: predicates\n\n resp_proxy_json = self._response_from_proxy(form, 'prover-did')\n if resp_proxy_json != None:\n return resp_proxy_json # it's proxied\n\n # it's local, carry on\n schema_json, schema_seq_no, claim_def_json = self._schema_and_claim_def_info(form['data'])\n find_req = {\n 'nonce': str(int(time() * 1000)),\n 'name': 'find_req_0', # configure this?\n 'version': '0', # configure this?\n 'requested_attrs': {\n '{}_uuid'.format(attr): {\n 'schema_seq_no': schema_seq_no,\n 'name': attr\n } for attr in form['data']['claim-filter']['attr-match']\n },\n 'requested_predicates': {\n # TODO: predicates\n }\n }\n filter_enc = {k: asc2decstr(form['data']['claim-filter']['attr-match'][k])\n for k in form['data']['claim-filter']['attr-match']}\n (claim_uuids, claims_found_json) = await self.get_claims_for_proof_req(json.dumps(find_req), filter_enc)\n assert(len(claim_uuids) == 1)\n claims_found = json.loads(claims_found_json)\n\n if form['type'] == 'claim-request':\n return json.dumps({\n 'proof-req': find_req,\n 'claims': claims_found\n })\n\n # FIXME: what if there are multiple matching claims to prove? How to encode requested attrs/preds?\n print(\"\\n\\n^^^ SK CLAIMS FOR PROOF {}\\n\".format(json.dumps(claims_found, indent=4)))\n claim_uuid = claim_uuids.pop()\n requested_claims = {\n 'self_attested_attributes': {},\n 'requested_attrs': {\n attr: [claim_uuid, True]\n for attr in find_req['requested_attrs'] if attr in claims_found['attrs']\n },\n 'requested_predicates': {\n pred: claim_uuid\n for pred in find_req['requested_predicates']\n }\n }\n\n print(\"\\n\\n^^^ SK REQ_CLAIMS {}\\n\".format(json.dumps(requested_claims, indent=4)))\n proof_json = await self.create_proof(\n find_req, \n json.loads(schema_json),\n self._master_secret,\n json.loads(claim_def_json),\n requested_claims)\n return json.dumps({\n 'proof-req': find_req,\n 'proof': json.loads(proof_json)\n })\n\n elif form['type'] == 'verification-request':\n self.__class__._vet_keys({'proof-req', 'proof'}, set(form['data'].keys()), hint='data')\n\n resp_proxy_json = self._response_from_proxy(form, 'verifier-did')\n if resp_proxy_json != None:\n return resp_proxy_json # it's proxied\n\n # it's local, carry on\n schema_json, schema_seq_no, claim_def_json = self._schema_and_claim_def_info(form['data'])\n return await self.verify_proof(\n form['data']['proof-req'],\n form['data']['proof'],\n json.loads(schema_json),\n json.loads(claim_def_json))\n\n elif form['type'] == 'claim-hello':\n resp_proxy_json = self._response_from_proxy(form, 'prover-did')\n if resp_proxy_json != None:\n return resp_proxy_json # it's proxied\n\n # it's local, carry on\n schema_json, schema_seq_no, claim_def_json = self._schema_and_claim_def_info(form['data'])\n if self.claim_req_json is None: # FIXME: support multiple schema, a claim req per schema\n await self.store_claim_req(\n json.loads(schema_json)['dest'],\n schema_seq_no,\n claim_def_json)\n\n return self.claim_req_json\n\n elif form['type'] == 'claim-create':\n self.__class__._vet_keys({'claim-req', 'claim-attrs'}, set(form['data'].keys()), hint='data')\n\n # it's local, carry on (no use case for proxy claim creation, so far)\n _, rv = await self.create_claim(\n json.dumps(form['data']['claim-req']),\n {k: [form['data']['claim-attrs'][k], asc2decstr(form['data']['claim-attrs'][k])]\n for k in form['data']['claim-attrs']})\n return rv\n\n elif form['type'] == 'claim-store':\n self.__class__._vet_keys({'claim-req'}, set(form['data'].keys()), hint='data')\n\n resp_proxy_json = self._response_from_proxy(form, 'prover-did')\n if resp_proxy_json != None:\n return resp_proxy_json # it's proxied\n\n # it's local, carry on\n self.store_claim(json.dumps(form['data']['claim']))\n return json.dumps({})\n\n else: # token-type\n raise ValueError('Unsupported token - unsupported type field')", "def message(self, text):\n\n if type(text) in (bytes, str):\n T = text\n else:\n # list probably:\n T = '\\n'.join(text)\n print(('-'*60))\n print(T)\n print(('='*60))", "def post(self):\n try:\n msgId = int(request.form['text'])\n except ValueError:\n return {\"text\": \"please enter a valid integer as msg Id.\"}\n msg = str(read_msg(msgId))\n return {\"text\": msg}", "def message_post(self, data, system):\r\n\r\n event_info = dict()\r\n event_info['problem_id'] = self.location_string\r\n event_info['student_id'] = system.anonymous_student_id\r\n event_info['survey_responses'] = data\r\n _ = self.system.service(self, \"i18n\").ugettext\r\n\r\n survey_responses = event_info['survey_responses']\r\n for tag in ['feedback', 'submission_id', 'grader_id', 'score']:\r\n if tag not in survey_responses:\r\n # This is a student_facing_error\r\n return {\r\n 'success': False,\r\n # Translators: 'tag' is one of 'feedback', 'submission_id',\r\n # 'grader_id', or 'score'. They are categories that a student\r\n # responds to when filling out a post-assessment survey\r\n # of his or her grade from an openended problem.\r\n 'msg': _(\"Could not find needed tag {tag_name} in the \"\r\n \"survey responses. Please try submitting \"\r\n \"again.\").format(tag_name=tag)\r\n }\r\n try:\r\n submission_id = int(survey_responses['submission_id'])\r\n grader_id = int(survey_responses['grader_id'])\r\n feedback = str(survey_responses['feedback'].encode('ascii', 'ignore'))\r\n score = int(survey_responses['score'])\r\n except:\r\n # This is a dev_facing_error\r\n error_message = (\r\n \"Could not parse submission id, grader id, \"\r\n \"or feedback from message_post ajax call. \"\r\n \"Here is the message data: {0}\".format(survey_responses)\r\n )\r\n log.exception(error_message)\r\n # This is a student_facing_error\r\n return {\r\n 'success': False,\r\n 'msg': _(\r\n \"There was an error saving your feedback. Please \"\r\n \"contact course staff.\"\r\n )\r\n }\r\n\r\n xqueue = system.get('xqueue')\r\n if xqueue is None:\r\n return {'success': False, 'msg': _(\"Couldn't submit feedback.\")}\r\n qinterface = xqueue['interface']\r\n qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)\r\n anonymous_student_id = system.anonymous_student_id\r\n queuekey = xqueue_interface.make_hashkey(str(system.seed) + qtime +\r\n anonymous_student_id +\r\n str(len(self.child_history)))\r\n\r\n xheader = xqueue_interface.make_xheader(\r\n lms_callback_url=xqueue['construct_callback'](),\r\n lms_key=queuekey,\r\n queue_name=self.message_queue_name\r\n )\r\n\r\n student_info = {\r\n 'anonymous_student_id': anonymous_student_id,\r\n 'submission_time': qtime,\r\n }\r\n contents = {\r\n 'feedback': feedback,\r\n 'submission_id': submission_id,\r\n 'grader_id': grader_id,\r\n 'score': score,\r\n 'student_info': json.dumps(student_info),\r\n }\r\n\r\n error, error_message = qinterface.send_to_queue(\r\n header=xheader,\r\n body=json.dumps(contents)\r\n )\r\n\r\n # Convert error to a success value\r\n success = True\r\n message = _(\"Successfully saved your feedback.\")\r\n if error:\r\n success = False\r\n message = _(\"Unable to save your feedback. Please try again later.\")\r\n log.error(\"Unable to send feedback to grader. location: {0}, error_message: {1}\".format(\r\n self.location_string, error_message\r\n ))\r\n else:\r\n self.child_state = self.DONE\r\n\r\n # This is a student_facing_message\r\n return {'success': success, 'msg': message}", "def format(self, kwmsg):\n return kwmsg[\"msg\"]", "def post_one(message, rnd, cid, cookie, fontsize = 25, mode = 1, color = 16777215, playTime = 0, pool = 0, fake_ip = False):\n headers = {'Origin': 'http://static.hdslb.com', 'X-Requested-With': 'ShockwaveFlash/15.0.0.223', 'Referer': 'http://static.hdslb.com/play.swf', 'User-Agent': BILIGRAB_UA, 'Host': 'interface.bilibili.com', 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': cookie}\n if fake_ip:\n FAKE_IP = \".\".join(str(randint(1, 255)) for i in range(4))\n headers.update({'X-Forwarded-For' : FAKE_IP, 'Client-IP' : FAKE_IP})\n #print(headers)\n url = 'http://interface.bilibili.com/dmpost'\n try:\n date = getdate()\n payload = {'fontsize': int(fontsize), 'message': str(message), 'mode': int(mode), 'pool': pool, 'color': int(color), 'date': str(date), 'rnd': int(rnd), 'playTime': playTime, 'cid': int(cid)}\n encoded_args = urllib.parse.urlencode(payload)\n r = requests.post(url, data = encoded_args, headers = headers)\n #print(r.text)\n if int(r.text) <= 0:\n logging.warning('Line failed:')\n logging.warning('Message:' + str(message))\n logging.warning('ERROR Code: ' + str(r.text))\n else:\n print(message)\n #logging.info(message)\n except Exception as e:\n print('ERROR: Line failed: %s' % e)\n print('Payload:' + str(payload))\n pass", "def _send_command_postprocess(output):\n return \"\\n\".join(filter(len, output.split('\\n')))", "def define_log_post_format_hooks(self):\n # TODO remove this once structlog supports hooks or handlers\n # these hooks accept a 'msg' and do not return anything\n return []", "def post(self):\n # use parser and find the user's query\n args = parser.parse_args()\n title = args['title']\n author = model.encode_author(args['author'])\n text = args['text']\n\n X = model.vector_and_stack(title=title, text=text, author=author)\n\n prediction = model.predict(X)\n\n # Output either 'Negative' or 'Positive' along with the score\n if round(prediction[0]) == 0:\n pred_text = 'Reliable News'\n else:\n pred_text = 'Unreliable News'\n\n # round the predict proba value and set to new variable\n confidence = round(prediction[0], 3)\n\n # create JSON object\n output = {'prediction': pred_text, 'fake_rate': confidence}\n\n return output, 200", "def post(self, *args, **kwargs):\n as_text = kwargs.pop('as_text', True)\n kwargs['follow_redirects'] = kwargs.get('follow_redirects', True)\n response = self.app.post(*args, **kwargs)\n if as_text:\n return response.get_data(as_text=True)\n return response", "def format(cmd, src, dst, msg):\n mgs_dict = {\n \"cmd\": cmd,\n \"src\": src,\n \"dst\": dst,\n \"msg\": msg\n }\n \n return json.dumps(mgs_dict)", "def request_message_txt(self):\r\n headers, body = self.create_request()\r\n\r\n header_txt = \"\\n\".join(\r\n \"{}: {}\".format(h, v) for h, v in sorted(headers.items())\r\n )\r\n body_txt = json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8')\r\n\r\n return header_txt + \"\\n\\n\" + body_txt", "def output(self):\n \n str_title_len = 50\n str_date_len = 40\n str_purpose_len = 30\n str_price_len = 10\n str_payer_len = 20\n #str_comment_len =\n \n if len(self.title) > (str_title_len - 2):\n out_title = self.title[:str_title_len - 2] + \" |\"\n else:\n out_title = self.title + (\" \" * (str_title_len - len(self.title) - 2)) + \" |\"\n \n # if date is presented with <datetime> object, then\n # then output it in format %d.%m.%y (31.12.99)\n if type(self.date) is datetime.datetime:\n out_date = \" \" + datetime.datetime.strftime(\"%d.%m.%y\") + \" |\"\n # or output as string otherwise\n else:\n if len(self.date) > (str_date_len - 4):\n out_date = \" \" + self.date[:str_date_len - 4] + \" |\"\n else:\n out_date = \" \" + self.date + (\" \" * (str_date_len - len(self.date) - 4)) + \" |\"\n \n if len(self.purpose) > (str_purpose_len - 4):\n out_purpose = \" \" + self.purpose[:str_purpose_len - 4] + \" |\"\n else:\n out_purpose = \" \" + self.purpose + (\" \" * (str_purpose_len - len(self.purpose) - 4)) + \" |\"\n \n # enormous sums aren't supported (over 9999999 at the moment)\n if len(str(self.price)) > (str_price_len - 4):\n raise Exception\n out_price = (' ' * (str_price_len - len(str(self.price)) - 4) ) + str(self.price) + ' |'\n \n if len(self.payer) > (str_payer_len - 2):\n out_payer = \" \" + self.payer[:str_payer_len - 2]\n else:\n out_payer = \" \" + self.payer + (\" \" * (str_payer_len - len(self.payer) - 2))\n \n out_line = out_title + out_date + out_purpose + out_price + out_payer\n return out_line", "def parse(message):\n html = render(message['text'])\n\n return html", "def output_message(message):\n print message\n return json.dumps({\"vms\": [{\"error\": message}]}, indent=4)", "def do_POST(self):\n self.send_response(200)\n self.send_header('Content-type','text/html')\n self.send_header('Access-Control-Allow-Origin','*')\n self.end_headers()\n # Send the html message\n self.wfile.write(\"Aivis Panovs, 161REB125\")\n return", "def post(self):\n msg = latest_deployment()\n msgToSend = msg[0] + \" was deployed at \" + msg[1]\n send_slack_log(msgToSend)\n return msgToSend", "def construct_message(self):\n msg_type = self.msg_type\n if msg_type == \"PUBMSG\":\n msg_type = \"PRIVMSG\"\n ret = \"{} {}\".format(msg_type, self.target)\n if self.content:\n ret += \" :{}\".format(self.content)\n return ret + \"\\r\\n\"", "def render(self):\n self._render_text = self.content.replace('\\n', '<br>') # deal with new line\n return render_str(\"post.html\", p = self)", "def format(self, record):\n data = record.__dict__.copy()\n\n # if record.args:\n # msg = record.msg % record.args\n # else:\n # msg = record.msg\n\n data.update(\n username=getpass.getuser(),\n time=datetime.now(),\n host=gethostname(),\n #args=tuple(unicode(arg) for arg in record.args)\n args=record.args\n )\n if 'exc_info' in data and data['exc_info']:\n data['exc_info'] = self.formatException(data['exc_info'])\n return data", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "def get_as_text(self):\n d = {\n 'user': self.user or self.name,\n 'date': self.submit_date,\n 'text': self.text,\n 'domain': self.site.domain,\n 'url': self.get_absolute_url()\n }\n return _('Posted by %(user)s at %(date)s\\n\\n%(review)s\\n\\nhttp://%(domain)s%(url)s') % d", "def _massage_raw_pg_output_vals(self):\n pass", "def create_preview(message):", "def postprocess(self, text):\r\n return text", "def submit_blogpost(request):\n from_address = request.POST.get('from_address')\n message = request.POST.get('message')\n rpc_raw = rpcRawProxy(helpers.get_rpc_url())\n\n if request.POST.get('wallet_passphrase', False):\n rpc_raw.walletpassphrase(request.POST.get('wallet_passphrase'), 60)\n try:\n message += \"|\" + helpers.sign_string(rpc_raw, message, from_address)\n except JSONRPCException, e:\n if \"passphrase\" in e.error['message']:\n return HttpResponse(json.dumps({\n \"status\": \"error\",\n \"message\":\"Wallet locked.\",\n \"type\":\"wallet_locked\"\n }, default=helpers.json_custom_parser), content_type='application/json')\n else:\n return HttpResponse(json.dumps({\n \"status\": \"error\",\n \"message\":\"Error while trying to sign public key.\"\n }, default=helpers.json_custom_parser), content_type='application/json')\n\n message = helpers.format_outgoing(message)\n opreturn_key = external_db.post_data(message)\n\n op_return_data = \"pm\" #program code (peermessage), 2 chars\n op_return_data += \"blg\" #opcode (blogpost), 3 chars\n op_return_data += opreturn_key #key pointing to external datastore\n\n rpc_processed = rpcProcessedProxy()\n blockchain_func.submit_opreturn(rpc_processed, from_address, op_return_data)\n return HttpResponse(json.dumps({\n \"status\": \"success\"\n }, default=helpers.json_custom_parser), content_type='application/json')", "def format_result(self):\n return ('{}\\n\\n{}'.format(\n LogParser.format_dict(LogParser.order_dict(self.urls)[:3]),\n LogParser.format_dict(LogParser.order_dict(self.status_codes))))", "def create_response(content, debug, debug_cmd, cmd_buttons=cmd_buttons):\n return \"\"\"\\\n<html>\n<form action=\"/\" method=\"post\">\n<textarea name=\"input\" style=\"width:100%%;height:25%%;\" placeholder=\"%(workingfile)s\">%(content)s</textarea>\n<input type=\"submit\" value=\"Submit\">\n</form>\n<hr />\n%(cmd_buttons)s\n<hr />\n<h3>Debug (%(debug_cmd)s):</h3>\n<pre>%(debug)s</pre>\n</html>\"\"\" % {\"content\": content,\n \"debug\": debug,\n \"debug_cmd\": debug_cmd,\n \"cmd_buttons\": cmd_buttons,\n \"workingfile\": workingfile}", "def handle_message(self, data, task_type, msgtype):\n data['message'] = data['message'].upper()\n return data", "def render_POST(self, request):\n jsonPayload = request.content.read()\n jsonParsed = json.loads(jsonPayload)\n if \"SeenByFakeDocker\" in jsonParsed:\n raise Exception(\"already seen by a fake docker?!\")\n jsonParsed[\"SeenByFakeDocker\"] = 42\n if not self.rawStream:\n request.setHeader(\"Content-Type\", \"application/json\")\n else:\n request.setHeader(\"Content-Type\", \"application/vnd.docker.raw-stream\")\n if self.chunkedResponse:\n request.setHeader(\"Transfer-Encoding\", \"chunked\")\n return json.dumps(jsonParsed)", "def post_message(self, component_instance):\n self.write_data(component_instance)", "def post(self):\n\n\t\treturn MessageStore.create(api.payload), 201", "def do_POST(self):\n if not self.path.endswith(\"/\"): self.path += \"/\"\n if self.path == \"/annotate/\":\n # Read message\n length = int(self.headers.get('content-length'))\n msg = self.rfile.read(length)\n\n # Do the annotation\n doc = Document()\n parseFromDelimitedString(doc, msg)\n self.annotator.annotate(doc)\n\n with io.BytesIO() as stream:\n writeToDelimitedString(doc, stream)\n msg = stream.getvalue()\n\n # write message\n self.send_response(HTTPStatus.OK)\n self.send_header(\"Content-Type\", \"application/x-protobuf\")\n self.send_header(\"Content-Length\", len(msg))\n self.end_headers()\n self.wfile.write(msg)\n\n else:\n self.send_response(HTTPStatus.BAD_REQUEST)\n self.end_headers()" ]
[ "0.6517435", "0.6332223", "0.6274383", "0.6151796", "0.6082813", "0.60550827", "0.5994535", "0.580875", "0.58030015", "0.57783115", "0.5777758", "0.57161885", "0.56622404", "0.5656379", "0.56329155", "0.5609236", "0.5584325", "0.5571311", "0.55095625", "0.5502143", "0.5492511", "0.54570204", "0.54463303", "0.5421501", "0.5417131", "0.541599", "0.5402953", "0.5394032", "0.53855395", "0.53842014", "0.53774637", "0.5375555", "0.53752005", "0.5369244", "0.5352171", "0.5352171", "0.5352171", "0.5352171", "0.5351647", "0.5343023", "0.5341834", "0.5341427", "0.5333841", "0.5331285", "0.53226", "0.5301273", "0.52716947", "0.5270853", "0.526512", "0.52605027", "0.52596146", "0.5237432", "0.523042", "0.52293193", "0.52278346", "0.52245355", "0.52243936", "0.5218817", "0.5216881", "0.5183451", "0.51747924", "0.51732", "0.51696223", "0.5161068", "0.5157474", "0.51436317", "0.5140059", "0.5136467", "0.5136257", "0.5121058", "0.51203156", "0.5114963", "0.5097948", "0.50909084", "0.5080549", "0.5080542", "0.50720066", "0.506965", "0.50657123", "0.50652224", "0.5063739", "0.50621486", "0.5061248", "0.5055845", "0.50553775", "0.50549185", "0.5054511", "0.5054511", "0.5050734", "0.5049403", "0.5041933", "0.5038268", "0.5036686", "0.503435", "0.5032393", "0.502574", "0.5025284", "0.50250375", "0.5024519", "0.50225395" ]
0.71348196
0
DO NOT TOUCH THIS FUNCTION. IT IS USED FOR COMPUTER EVALUATION OF YOUR CODE
def main(): test_cases = ast.literal_eval(sys.argv[1]) results = str(my_info()) + '\t\t' for test_case in test_cases: mode = test_case[0] id_1 = int(test_case[1]) id_2 = int(test_case[2]) if mode == 'jc': results += str(Jaccard_Coefficient(id_1, id_2)) + '\t\t' elif mode == 'cc': results += str(Correlation_Coefficient(id_1, id_2)) + '\t\t' else: exit('bad command') print results + '\n'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exercise_b2_106():\r\n pass", "def exercise_b2_113():\r\n pass", "def exercise_b2_107():\r\n pass", "def exercise_b2_53():\r\n pass", "def exo2():", "def exercise_b2_69():\r\n pass", "def substantiate():", "def cx():", "def exercise_b2_70():\r\n pass", "def exercise_b2_98():\r\n pass", "def exercise_b2_52():\r\n pass", "def exercise_b2_39():\r\n pass", "def exercise_b2_27():\r\n pass", "def exercise_b2_82():\r\n pass", "def exercise_b2_56():\r\n pass", "def code():", "def exercise_b2_26():\r\n pass", "def exercise_b2_43():\r\n pass", "def exercise_b2_93():\r\n pass", "def support(self):", "def regular(self):", "def func():", "def exercise_b2_95():\r\n pass", "def fn():", "def degibber(self):", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def preprocess(self):", "def sth():", "def CL(self):", "def g():", "def _regr_basic():", "def main(self):", "def falcon():", "def firstFunction(self):", "def realsense():\n pass", "def mezclar_bolsa(self):", "def problem_298():\n pass", "def simple():", "def simple():", "def exercise_b2_86():\r\n pass", "def implement(self):\n\t#@DEBUG remove comments", "def exercise_2b():\n\n return", "def preprocess_main():", "def apply(self) -> None:", "def apply(self) -> None:", "def cpp_function(self):", "def use(self):", "def check():", "def example_function():", "def script(self):", "def apply(self):", "def main():\n\tpass", "def task4_1(self):\n\n pass", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def main():\n pass", "def smarter():\r\n pass", "def _optimise(self):\n pass", "def input(self):", "def util():\n pass", "def util():\n pass", "def __call__(self) -> None:", "def function(self):\n raise NotImplementedError", "def think(s):", "def common(self):", "def test_4_4_1_1(self):\n pass", "def _prepare(self):", "def _prepare(self):", "def preprocess(self):\n raise RuntimeError(\"please implement this function!\")", "def result(self):", "def result(self):", "def base():", "def part_2():\n pass", "def section_4_8():\n pass", "def task4(self):\n\n pass", "def decide():", "def __call__(self):\n\t\treturn" ]
[ "0.6566932", "0.6499155", "0.64852047", "0.64321226", "0.6431028", "0.6361083", "0.6330522", "0.625579", "0.62346584", "0.6231363", "0.6221362", "0.61771435", "0.61722547", "0.6136739", "0.61362576", "0.6126891", "0.611284", "0.6068753", "0.6059188", "0.60250646", "0.59817815", "0.5976061", "0.5966837", "0.5963602", "0.5951094", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5939497", "0.5927232", "0.59269655", "0.5904306", "0.5879552", "0.5806857", "0.57934594", "0.5792036", "0.5757048", "0.5741753", "0.570423", "0.56814384", "0.5645648", "0.5645648", "0.5635917", "0.5605269", "0.56014276", "0.5565781", "0.55558443", "0.55558443", "0.55326736", "0.55156124", "0.5482444", "0.5466039", "0.5453238", "0.54371274", "0.5406889", "0.53902453", "0.5389823", "0.5389823", "0.5389823", "0.5389823", "0.5389823", "0.5359883", "0.5336091", "0.53340304", "0.53223574", "0.53160954", "0.53160954", "0.5307753", "0.5289119", "0.5283473", "0.5281889", "0.52795047", "0.5279449", "0.5279449", "0.5276443", "0.52759063", "0.52759063", "0.527068", "0.5266801", "0.52518696", "0.5227285", "0.52254134", "0.5209496" ]
0.0
-1
Shorthand for assert. Saves 3 whole characters!
def ok_(expr, msg=None): if not expr: raise AssertionError(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_third_equal(self):\n self.assertEqual(heaviest_word(\"take me to semynak\"), \"semynak\")", "def test_sanity(self):\n self.assertEquals(2 + 2, 4)", "def test_spaces(self):\n self.assertValue({\n 'foo bar': 'something here',\n },\n \"foo_bar: something_here\\n\")", "def test_others(self):\n outputAssert = self.buildingTests([\"Hola me gust@ programar en ICC 1.03\"])\n self.assertTrue((outputAssert[0][4] == outputAssert[1][4] and outputAssert[0][5] == outputAssert[1][5]) ^ (outputAssert[0][4] == outputAssert[1][5]) , f\"El resultado debería ser: \\\"{outputAssert[1][5]}\\\"\")", "def test_valid_name_valid():\n assert valid_name(\"1\")\n assert valid_name(\"a\"*20)", "def test_three_word_sentence_is_three_words_long():\n \n given = \"three words here\"\n expected = 3\n actual = len(words(given))\n assert expected == actual", "def test_check_name_is_3_parts():\n check_name_length()", "def test():\r\n\tassert 1609693773.1609693773() == \"1609693773\", \"test failed\"\r\n\t#assert 1609693773.<function>(<values>) == <the result(s) you would like to have>, \"<the fail message>\"\r", "def test_assert_truth(self):\n\n # Confused? This video should help:\n #\n # http://bit.ly/about_asserts\n\n self.assertTrue(True) # This should be true", "def test_tc():\n assert 1 == 1", "def test_valid_name_invalid():\n assert not valid_name(\"\")\n assert not valid_name(\"a\"*21)", "def test_vec3_square(self):\n\n vec = Vec3(2, 3, 4)\n\n self.assertEqual(\"Vec3(2.0,3.0,4.0)\", str(vec))", "def assert_verbose(actual, expected):\n assert expected == actual, f\"Expected value: {expected}. But actual value is {actual}\"", "def main():\n assert_verbose(triple(\"hello\"), \"hhheeellllllooo\")\n assert_verbose(triple(\"kevin szuchet\"), \"kkkeeevvviiinnn ssszzzuuuccchhheeettt\")\n assert_verbose(triple(\" \"), \" \")\n assert_verbose(triple(\"\"), \"\")\n print(\"--- All tests passed ---\")", "def test_func(input, expected):\n from parenthetics import paren\n assert paren(input) == expected", "def test_vec3_square(self):\n\n vec = Vec3(2, 3, 4)\n\n self.assertEqual(\"Vec3(2.0,3.0,4.0)\", repr(vec))", "def test_index_lt_3(self):\n self.insert()\n data = self.tbl[:6]\n assert self.check(self.idata[:2], data)", "def test_three(self):\n name = get_formatted_name('david', 'malan', 'j')\n self.assertEqual(name, 'David J Malan')", "def test_valid_password_valid():\n assert valid_password(\"123456\")\n assert valid_password(\"abcdef\")", "def testExample(self, ref):\n assert 3 < 4", "def test_string():", "def test_validate_wc3(self):\r\n assert self.wc2_tree != 0", "def inner_test(param: str):\n self.assertEqual(param, '256')", "def test_basic():\n line = \"test\"\n assert wrap_line(line) == \"test\"", "def _assert(self, data):\n lengths = [len(r) for r in data]\n least = min (lengths)\n most = max (lengths)\n assert least == most", "def test3(self):\n cases = (\n (2**10*'a',),\n (2**10*'abcd',),\n #(2**20*'a',), # 1 MB, takes about 160 sec. on a 233 Mhz Pentium.\n )\n\n for i in range(len(cases)):\n res = self.compare(cases[i][0])\n if res is not None:\n d1, d2 = res\n message = cases[i][0]\n self.print_diff(message, d1, d2)\n assert res is None", "def test_T3():", "def test_T3():", "def test_single_char(self):\n self.assertTrue(all_unique_chars_no_set(\"a\"))\n self.assertTrue(all_unique_chars_no_set(\"b\"))", "def test_absolute_truth():\n assert True", "def test_random_string():\n for i in range(100000):\n if randomString()[-1] == 'f':\n return\n assert False", "def test_assert_bytes():\n if backwards.PY2: # pragma: Python 2\n # backwards.assert_bytes(bytearray('hello', 'utf-8'))\n backwards.assert_bytes('hello')\n nt.assert_raises(AssertionError, backwards.assert_bytes,\n unicode('hello'))\n else: # pragma: Python 3\n # backwards.assert_bytes(bytearray('hello', 'utf-8'))\n backwards.assert_bytes(b'hello')\n nt.assert_raises(AssertionError, backwards.assert_bytes,\n 'hello')", "def test_trailing_data(self):", "def test_example():\n x = 0\n y = 1\n assert x != y", "def test_hex():\n assert hex(Quantity(1, unit('m'))) == hex(1)", "def test_exactly(self):\n\n x = t.Exactly(\"x\")\n self.assertEqual(writePython(x),\n dd(\"\"\"\n _G_exactly_1, lastError = self.exactly('x')\n self.considerError(lastError, None)\n _G_exactly_1\n \"\"\"))", "def test_first_equal(self):\n self.assertEqual(heaviest_word(\"man i need a taxi up to ubud\"), \"taxi\")", "def assert_equal(self, first, second, msg=\"\"):\r\n assert first == second", "def test_single_char(self):\n self.assertTrue(all_unique_chars(\"a\"))\n self.assertTrue(all_unique_chars(\"b\"))", "def test_assert_unicode():\n if backwards.PY2: # pragma: Python 2\n # backwards.assert_unicode(unicode('hello'))\n # nt.assert_raises(AssertionError, backwards.assert_unicode, 'hello')\n backwards.assert_unicode('hello')\n nt.assert_raises(AssertionError, backwards.assert_unicode,\n unicode('hello'))\n nt.assert_raises(AssertionError, backwards.assert_unicode,\n bytearray('hello', 'utf-8'))\n else: # pragma: Python 3\n backwards.assert_unicode('hello')\n nt.assert_raises(AssertionError, backwards.assert_unicode, b'hello')\n nt.assert_raises(AssertionError, backwards.assert_unicode,\n bytearray('hello', 'utf-8'))", "def test_print3(self):\n writer = StringIO()\n collatz_print(writer, 900, 1000, 174)\n self.assertEqual(writer.getvalue(), \"900 1000 174\\n\")", "def test_with_single_space(self):\n self.assertEqual(escapespaces('Hi there'),\n 'Hi there')", "def test_error_type():\n assert error_type('{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"\\\nSource currency code is invalid.\" }') == 1\n assert error_type('{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"\\\nExchange currency code is invalid.\" }') == 2\n assert error_type('{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"\\\nCurrency amount is invalid.\" }') == 3", "def test_message_letter(Message, letter):\n assert get_message_letter(Message) == letter", "def test_art_from_taste_space(self):", "def test_domino_with_3_numbers():\n assert compute(3) == 2, \"Not ok\"", "def test_has_letter(row):\n assert not sudoku.no_letters(row)", "def test_unicode_string(self):\n result = attributeAsLDIF(\"another key\", \"another value\")\n self.assertEqual(result, b\"another key: another value\\n\")", "async def test_assertion_rewriting(_):\n with pytest.raises(AssertionError) as e:\n assert 1 == 42\n assert \"42\" in str(e), f\"Assertion rewriting seems not to work, message was {e}\"", "def test_invalid_scalene():\n assert 'invalid' == classify_triangle(1,2,3)", "def test_str3(self):\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n r3 = Rectangle(1, 2, 3, 4, 5)\n print(r3)\n sys.stdout = sys.__stdout__\n str_r3 = \"[Rectangle] (5) 3/4 - 1/2\\n\"\n self.assertEqual(capturedOutput.getvalue(), str_r3)", "def test_before_space():\n \n \n assert(1 == before_space(\"1 2 3\"))\n assert(\"NO SPACE\" == before_space(\"1\"))\n assert(\"Error\" == before_space(None))", "def test_with_multiple_spaces(self):\n self.assertEqual(escapespaces('Hi there'),\n 'Hi&nbsp; there')", "def test_obtener_tipo_notas():\n nota1 = 10\n nota2 = 10\n nota3 = 10\n assert \"Regular\" == obtener_tipo_notas(nota1, nota2, nota3)", "def test_example():\n answer = True\n expected = True\n assert answer == expected", "def test_buoy_format2():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_2)\n assert str(err_info.value) == 'Input length incorrect, see instructions'", "def test_the_tests():\n\n assert True is True", "def test_conversion_modifiers() -> None:\n animal, name = (\"eel\", \"Bob\")\n assert f\"The {animal!s}'s name is {name!r}\" == \"The eel's name is 'Bob'\"", "def test_empty_string(self):\n self.assertTrue(all_unique_chars(\"\"))", "def test_str(self, r, rep):\n assert str(r) == rep", "def test_numbers(number):\n assert number ** 2 == number ** 2", "def test_second_equal(self):\n self.assertEqual(heaviest_word(\"what time are we climbing up to the volcano\"), \"volcano\")", "def assertion_passed(self, func):", "def test_str_magic_method():\n LINES = (\n \"One morn before me were three figures seen,\",\n \"And once more came they by:-alas! wherefore?\",\n )\n for line in LINES:\n assert(str(LineBuilder(line)) == line)", "def test_invalid_isosceles():\n assert 'invalid' == classify_triangle(1,1,3)", "def test03(self):\n\n t = tree(\"a\");\n s = str(t)\n self.assertEqual(re.sub(\"\\s+\", \"\", s), \"a;\") # Remove spaces and test equality", "def test_valid_password_invalid():\n assert not valid_password(\"\")\n assert not valid_password(\"1234567\")\n assert not valid_password(\"abcdefg\")", "def test_func():\n assert func(3) == 5 # noqa: S101", "def test_has_no_letters(row):\n assert sudoku.no_duplicates(row)", "def test_text_roundtrip():\n for text in (\"\", \"a\", \"Hello, world!\", \"9\" * 1000):\n assert text == String.read(String.to_bytes(text))", "def test_str(self):\r\n x = self.FWP({'x': 3})\r\n lines = ['FWP parameters:', 't:True', 'Application:None',\r\n 'Algorithm:None', 'Citation:None', 'x:3']\r\n self.assertEqual(str(x), '\\n'.join(lines))", "def _assert(ok, detail):\n\n if not ok:\n raise UserException(\"the project file is invalid\", detail)", "def test_fizzbuzz(self):\n\t\tself.assertEqual(fizz_buzz(15), \"FizzBuzz\", msg=\"Not Divisible by both 3 and 5\")", "def test_words():\n manage = \"stop hello\"\n words = manage\n assert words is not None\n assert len(words) == 10", "def test_buzz(self):\n\t\tself.assertEqual(fizz_buzz(6), \"Fizz\", msg=\"Failed 'Fizz' test i.e Not Divisible by 3\")", "def inner_test(param: bytes):\n self.assertEqual(param, b'Test bytes.')", "def test_strings_with_foo(self):\n write this test!", "def testA():\n \n cunittest.assert_equals(\"0.814663951\",before_space(\"0.814663951 Euros\"))\n cunittest.assert_equals(\"Euros\",after_space(\"0.814663951 Euros\"))", "def test_empty_string(self):\n self.assertTrue(all_unique_chars_no_set(\"\"))", "def assertVariableValue(self, file, a, b):\n file.write(\"ASSERT({} = {});\\n\".format(a, b))\n return", "def test_len(self):\n self.assertEqual(len(self.tester), 27)", "def test_len(self):\n self.assertEqual(len(self.tester), 27)", "def test_something(self):\n self.assertEqual(\n b\"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n<Foo>bar</Foo>\"\"\",\n self.successResultOf(to_xml(tags.Foo(\"bar\"))),\n )", "def test_case_05_not_legal_triangle(self):\n self.__assert_equals_test_case([(4, 6, 11)], 'NotATriangle')", "def _assert(space, w_check, desc=None):\n if w_check.tp == space.tp_str:\n source_orig = space.str_w(w_check)\n source = \"<? return %s ?>\" % source_orig\n bc = compile_php(None, source, space)\n interp = space.ec.interpreter\n w_res = interp.run_local_include(bc, interp.global_frame)\n if not w_res.is_true(space):\n space.ec.warn('assert(): Assertion \"%s\" failed' % source_orig)\n return space.w_Null\n if not w_check.is_true(space):\n space.ec.warn('assert(): Assertion failed')\n return space.w_Null\n return space.wrap(w_check.is_true(space))", "def test_containsOnly(self) -> None:\n assert containsOnly('.83', '0123456789.')\n assert not containsOnly('43221', '123')", "def test_for_correct_updating_two(self):\r\n assert increase_sentence_count_if_we_should('one. two. three. four.five.six.seven.eight. nine. ten.', 10, 'a') \\\r\n == (10, 'one. two. three. four.five.six.seven.eight. nine. ten.')", "def test_02_this_step_will_fail(self):\n\n self.assertIn(5, arr)", "def test_password_is_okay():\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('qqqqqqqq') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\t\"\"\"test that valid passwords work\"\"\"\n\tassert password_is_ok('Q8qqqqqqqq') == True\n\tassert password_is_ok('q8qqqqqqqq') == True\n\tassert password_is_ok('Qqqqqqqqqq') == True\n\tassert password_is_ok('qqqqqqqqqq') == True", "def test_part1_example1(example1):\n assert aoc.part1(example1) == 2 + 2 + 654 + 33583", "def test_multiple_char_not_unique(self):\n self.assertFalse(all_unique_chars_no_set(\"aa\"))\n self.assertFalse(all_unique_chars_no_set(\"alabama\"))\n self.assertFalse(all_unique_chars_no_set(\"Ricardio\"))\n self.assertFalse(all_unique_chars_no_set(\"aardvark\"))\n self.assertFalse(all_unique_chars_no_set(\"Zimbabwe\"))\n self.assertFalse(all_unique_chars_no_set(\"....What?....\"))", "def test_multiple_char_not_unique(self):\n self.assertFalse(all_unique_chars(\"aa\"))\n self.assertFalse(all_unique_chars(\"alabama\"))\n self.assertFalse(all_unique_chars(\"Ricardio\"))\n self.assertFalse(all_unique_chars(\"aardvark\"))\n self.assertFalse(all_unique_chars(\"Zimbabwe\"))\n self.assertFalse(all_unique_chars(\"....What?....\"))", "def test_str(self):\n self.assertEqual(str(self.subject), \"Test Subject\")", "def test_len(self):\n self.assertEqual(len(self.tester), 21)", "def assert_(expr, msg=None):\r\n statistics.assertions += 1\r\n if not expr:\r\n if msg is None:\r\n raise AssertionError\r\n raise AssertionError(msg)\r\n return expr", "def test_three_divided_by_nothing():\n assert divide(3) == 1", "def test_str(self):\n self.assertEqual(str(self.bioe), str(self.wbioe))\n self.assertEqual(str(self.uioe), str(self.wuioe))\n self.assertEqual(str(self.bose), str(self.wbose))\n self.assertEqual(str(self.uose), str(self.wuose))", "def test_split_string_wrong_input_data(self):\n self.assertEqual(\"Wrong input data\", split_string(13))", "def test_positive_integer_2():\n assert 1 == positive_integer('1')", "def test_multiple_char_unique(self):\n self.assertTrue(all_unique_chars(\"ab\"))\n self.assertTrue(all_unique_chars(\"ba\"))\n self.assertTrue(all_unique_chars(\"make\"))\n self.assertTrue(all_unique_chars(\"thorn\"))\n self.assertTrue(all_unique_chars(\"malibu\"))\n self.assertTrue(all_unique_chars(string.ascii_letters))", "def check(self, s):\n bufferValue = self.f.getvalue()\n if isinstance(s, str):\n bufferValue = bufferValue.decode(\"utf-8\")\n self.assertEqual(bufferValue, s)" ]
[ "0.6651435", "0.6275559", "0.624227", "0.62251574", "0.61541015", "0.6134255", "0.61262155", "0.60943294", "0.60038877", "0.60038704", "0.59775376", "0.59726155", "0.59587276", "0.5954388", "0.593926", "0.5934084", "0.5925226", "0.59227633", "0.5909867", "0.5902318", "0.5897738", "0.5879457", "0.58648074", "0.58536386", "0.58410066", "0.58399475", "0.58384514", "0.58384514", "0.5832497", "0.5824264", "0.581988", "0.581973", "0.5796358", "0.5787364", "0.576959", "0.576887", "0.5768198", "0.575727", "0.5740472", "0.57396317", "0.573708", "0.5735134", "0.5721421", "0.572031", "0.5710911", "0.5703201", "0.5701332", "0.56891155", "0.5686611", "0.5676756", "0.5663013", "0.56611466", "0.56587386", "0.5650478", "0.5649054", "0.5646905", "0.56395584", "0.56362176", "0.56346244", "0.56337523", "0.56301904", "0.5622827", "0.5615034", "0.5612209", "0.56082606", "0.5603566", "0.56008446", "0.55990946", "0.5591725", "0.5589342", "0.55775255", "0.5568516", "0.5558629", "0.5550923", "0.5546422", "0.5545058", "0.5543729", "0.55431163", "0.5539737", "0.55373293", "0.5535316", "0.5535316", "0.5534796", "0.55326927", "0.55315924", "0.5530294", "0.5528468", "0.55232954", "0.55204046", "0.55190897", "0.55181295", "0.5517745", "0.5512103", "0.5512063", "0.5510304", "0.5506009", "0.55030346", "0.54993504", "0.54982877", "0.5495397", "0.54858685" ]
0.0
-1
Shorthand for 'assert a == b, "%r != %r" % (a, b)
def eq_(a, b, msg=None): if not a == b: raise AssertionError(msg or "%r != %r" % (a, b))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eq_(a, b, msg=None):\n assert a == b, msg or \"%r != %r\" % (a, b)", "def eq_(a, b, msg=None):\n assert a == b, msg or \"%r != %r\" % (a, b)", "def assert_eq(a, b, msg=None):\n assert a == b, msg or __safe_error(\"!=\", a, b)", "def assert_not_equal(self, first, second, msg=\"\"):\r\n assert first != second", "def assert_equal(self, first, second, msg=\"\"):\r\n assert first == second", "def eq_(a, b, msg=None):\n if not a == b:\n raise AssertionError(msg or \"%r != %r\" % (a, b))", "def ne_(a, b, msg=None):\n assert a != b, msg or \"%r == %r\" % (a, b)", "def eq_msg(a, b, msg=None):\n assert a == b, (str(msg) or '') + ' (%r != %r)' % (a, b)", "def assert_equal(left, right):\n msg = \"{} != {}\".format(left, right)\n assert left == right, msg", "def assert_equal(self, first, second):\n if first != second:\n raise AssertionError('%s and %s not equal' % (str(first), str(second)))", "def assert_not_equal(self, first, second):\n if not first != second:\n raise AssertionError('%s and %s is equal' % (str(first), str(second)))", "def test_example():\n x = 0\n y = 1\n assert x != y", "def equality():\n\n Assert(1) == 1\n Assert(1) != 0\n\n with Assert.raises(AssertionError):\n Assert(1) == 0\n\n with Assert.raises(AssertionError):\n Assert(1) != 1", "def equality():\r\n\r\n Assert(1) == 1\r\n Assert(1) != 0\r\n\r\n with Assert.raises(AssertionError):\r\n Assert(1) == 0\r\n\r\n with Assert.raises(AssertionError):\r\n Assert(1) != 1", "def expected_value(expected, actual):\n assert expected == actual", "def assertNotEqual(self, first, second, msg=None):\r\n if not first != second:\r\n msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first), \r\n safe_repr(second)))\r\n raise self.failureException(msg)", "def assert_verbose(actual, expected):\n assert expected == actual, f\"Expected value: {expected}. But actual value is {actual}\"", "def assert_is(a, b):\n assert a is b, __safe_error(\"is not\", a, b)", "def verify_not_equal(self, first, second, msg=\"\"):\r\n try:\r\n self.assert_not_equal(first, second, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def _baseAssertEqual(self, first, second, msg=None):\r\n if not first == second:\r\n standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))\r\n msg = self._formatMessage(msg, standardMsg)\r\n raise self.failureException(msg)", "def assert_is_not(self, first, second, msg=None):\r\n assert first is not second", "def eq_(thing1, thing2, msg = None):\n return nose.tools.eq_(thing1, thing2,\n msg = (msg if msg != None else \"'%s' != '%s'\" % (thing1, thing2))\n )", "def verify_equal(self, first, second, msg=\"\"):\r\n try:\r\n self.assert_equal(first, second, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def testEqual(a, b):\n if a == b:\n print('Pass')\n else:\n print('Fail')", "def assertEqual(self, first, second, msg=None):\r\n assertion_func = self._getAssertEqualityFunc(first, second)\r\n assertion_func(first, second, msg=msg)", "def assert_strings_equal(a, b):\n assert len(a) == len(b)\n for x, y in zip(a, b):\n assert x == y", "def assert_is(self, first, second, msg=None):\r\n assert first is second", "def assert_not_equals(expected,received,message=None):\n if (expected == received):\n if message is None:\n message = 'assert_not_equals: expected something different from %s' % repr(expected)\n quit_with_error(message)", "def _almost_equal(x, y):\n pass", "def assert_equals(expected,received,message=None):\n if (expected != received):\n if message is None:\n message = 'assert_equals: expected %s but instead got %s' % (repr(expected),repr(received))\n quit_with_error(message)", "def _check(self, expected, actual):\n\n assert expected == actual, 'Assert fail. expected={} but actual={}'.format(expected, actual)", "async def test_assertion_rewriting(_):\n with pytest.raises(AssertionError) as e:\n assert 1 == 42\n assert \"42\" in str(e), f\"Assertion rewriting seems not to work, message was {e}\"", "def _assert_not_series_equal_both(a, b, **kwargs):\n _assert_not_series_equal(a, b, **kwargs)\n _assert_not_series_equal(b, a, **kwargs)", "def test_equal(self):\n self.assertTrue(self.a == self.a)\n self.assertFalse(self.a != self.a)", "def assertOutput(cls, expected, actual):\n if expected != actual:\n raise Exception(\"'\" + expected + \"' != '\" + actual + \"'\")", "def assert_identical(a: T, b: T) -> None:\n try:\n _assert_identical_impl(a, b)\n except AssertionError as exc:\n if hasattr(exc, '__notes__'):\n # See comment above.\n notes = []\n rest = -1\n for i, note in enumerate(exc.__notes__):\n if 'PREPOSITION' in note:\n notes.append(note.replace('PREPOSITION', 'in'))\n rest = i\n break\n notes.extend(\n note.replace('PREPOSITION', 'of') for note in exc.__notes__[rest + 1 :]\n )\n exc.__notes__ = notes\n raise", "def test_absolute_truth():\n assert True", "def _eq(a, b):\n return (a - b) % 2 == 0", "def assert_greater_equal(a, b, msg=None):\n if not a >= b:\n standardMsg = '%s not greater than or equal to %s' % (repr(a), repr(b))\n fail(_formatMessage(msg, standardMsg))", "def assert_less_equal(self, a, b):\n if not a <= b:\n raise AssertionError('%s not less than or equal to %s' % (str(a), str(b)))", "def assertIsNot(self, expr1, expr2, msg=None):\r\n if expr1 is expr2:\r\n standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)\r\n self.fail(self._formatMessage(msg, standardMsg))", "def assertMultiLineEqual(self, first, second, msg=None):\r\n self.assert_(isinstance(first, basestring), (\r\n 'First argument is not a string'))\r\n self.assert_(isinstance(second, basestring), (\r\n 'Second argument is not a string'))\r\n\r\n if first != second:\r\n standardMsg = '%s != %s' % (safe_repr(first, True), safe_repr(second, True))\r\n diff = '\\n' + ''.join(difflib.ndiff(first.splitlines(True),\r\n second.splitlines(True)))\r\n standardMsg = self._truncateMessage(standardMsg, diff)\r\n self.fail(self._formatMessage(msg, standardMsg))", "def assertVariableValue(self, file, a, b):\n file.write(\"ASSERT({} = {});\\n\".format(a, b))\n return", "def assert_allclose_na(a, b):\n if _is_na(a) and _is_na(b):\n pass\n else:\n npt.assert_allclose(a, b)", "def verify_is_not(self, first, second, msg=None):\r\n try:\r\n self.assert_is_not(first, second, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def test_example():\n answer = True\n expected = True\n assert answer == expected", "def assertTuplesAlmostEqual(self, actual, expected):\n try:\n for a, e in exactZip(actual, expected):\n assertTupleAlmostEqual(self, a, e)\n except AssertionError as e:\n raise AssertionError(\"Lines {0} were expected to be {1}; {2}\".format(actual, expected, e))", "def assertLessEqual(self, a, b, msg=None):\r\n if not a <= b:\r\n standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))\r\n self.fail(self._formatMessage(msg, standardMsg))", "def assertStringsEqual(self, a, b, msg=None, strip=False):\n if strip:\n a = a.strip()\n b = b.strip()\n\n if a != b:\n sys.stderr.write(\"The strings differ %s(lengths %d vs %d); \"\n \"a diff follows\\n\"\n % ('when stripped ' if strip else '',\n len(a), len(b),\n ))\n\n from difflib import unified_diff\n diff = unified_diff(a.splitlines(True),\n b.splitlines(True),\n 'a', 'b')\n for line in diff:\n sys.stderr.write(line)\n\n self.fail(msg)", "def _assert_not_series_equal(a, b, **kwargs):\n try:\n tm.assert_series_equal(a, b, **kwargs)\n msg = \"The two Series were equal when they shouldn't have been\"\n\n pytest.fail(msg=msg)\n except AssertionError:\n pass", "def assert_same(result, expect):\n assert sorted(result) == sorted(expect)", "def assertTupleAlmostEqual(self, actual, expected):\n try:\n for a, e in exactZip(actual, expected):\n self.assertAlmostEqual(a, e)\n except AssertionError as e:\n raise AssertionError(\"Tuple {0} was expected to be {1}; {2}\".format(actual, expected, e))", "def assertIs(self, expr1, expr2, msg=None):\r\n if expr1 is not expr2:\r\n standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))\r\n self.fail(self._formatMessage(msg, standardMsg))", "def assert_path_eq(p1: Path, p2: Path):\n __tracebackhide__ = True\n # Pytest's error messages are far better for strings than Paths.\n # It shows you the difference between them.\n s1, s2 = str(p1), str(p2)\n # And we use extra s1/s2 variables so that pytest doesn't print the\n # expression \"str()\" as part of its output.\n assert s1 == s2", "def assertIs(self, expr1, expr2, msg=None):\n if expr1 is not expr2:\n standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))\n self.fail(self._formatMessage(msg, standardMsg))", "def test_neq():\n # Test for not equal special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x != 2) == False\n assert (x != 1) == True\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for equality special method with two scalar Dual object\n x = Rnode(2.0)\n y = Rnode(2.0)\n z = Rnode(1.0)\n try:\n assert (x != y) == False\n assert (x != z) == True\n except AssertionError as e:\n print(e)\n raise AssertionError", "def assert_equal_with_printing(\n test_case, expected, actual, uniform_formatter: Optional[Callable[[str], str]] = None\n):\n str_actual = str(actual)\n print(\"Expected:\")\n print(expected)\n print(\"Actual:\")\n print(str_actual)\n\n if uniform_formatter is not None:\n expected = uniform_formatter(expected)\n str_actual = uniform_formatter(str_actual)\n\n test_case.assertEqual(expected, str_actual)", "def assertSourceEqual(self, first, second, msg=None):\r\n self.assertEqual(dedent(first), dedent(second), msg=msg)", "def assert_in(self, first, second, msg=\"\"):\r\n assert first in second", "def assert_not_in(self, first, second, msg=\"\"):\r\n assert first not in second", "def assert_result_equal(cls, x, y):\n if isinstance(x, dict):\n if not isinstance(y, dict): # pragma: debug\n raise AssertionError(\"Second variable is not a dictionary.\")\n for k in x.keys():\n if k not in y: # pragma: debug\n print('x')\n pprint.pprint(x)\n print('y')\n pprint.pprint(y)\n raise AssertionError(\"Key '%s' not in second dictionary.\" % k)\n cls.assert_result_equal(x[k], y[k])\n for k in y.keys():\n if k not in x: # pragma: debug\n print('x')\n pprint.pprint(x)\n print('y')\n pprint.pprint(y)\n raise AssertionError(\"Key '%s' not in first dictionary.\" % k)\n elif isinstance(x, (list, tuple)):\n if not isinstance(y, (list, tuple)): # pragma: debug\n raise AssertionError(\"Second variable is not a list or tuple.\")\n if len(x) != len(y): # pragma: debug\n print('x')\n pprint.pprint(x)\n print('y')\n pprint.pprint(y)\n raise AssertionError(\"Sizes do not match. %d vs. %d\"\n % (len(x), len(y)))\n for ix, iy in zip(x, y):\n cls.assert_result_equal(ix, iy)\n elif isinstance(x, np.ndarray):\n np.testing.assert_array_equal(x, y)\n else:\n if isinstance(y, (dict, list, tuple, np.ndarray)): # pragma: debug\n print('x')\n pprint.pprint(x)\n print('y')\n pprint.pprint(y)\n raise AssertionError(\"Compared objects are different types. \"\n \"%s vs. %s\" % (type(x), type(y)))\n assert_equal(x, y)", "def assertMultiLineEqual(self, first, second, msg=None):\n self.assertTrue(isinstance(first, str),\n 'First argument is not a string')\n self.assertTrue(isinstance(second, str),\n 'Second argument is not a string')\n\n if first != second:\n message = ''.join(difflib.ndiff(first.splitlines(True),\n second.splitlines(True)))\n if msg:\n message += \" : \" + msg\n self.fail(\"Multi-line strings are unequal:\\n\" + message)", "def test_not_equal_on_equal_and_empty(self):\n a = Digest()\n b = Digest()\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def testNotEquals():\n assert not isEqual([1, 2, 3], [1, 2, 3, 4])\n assert not isEqual([1, 2, 3, 4], [1, 2, 3])", "def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertTrue(v1 != v2)\n self.assertTrue(v2 != v1)", "def test_exact_to_inexact(doctest):", "def testEquality(self):\n pass", "def assert_greater(a, b, msg=None):\n if not a > b:\n standardMsg = '%s not greater than %s' % (repr(a), repr(b))\n fail(_formatMessage(msg, standardMsg))", "def test_b(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertFalse(v1 == v2)\n self.assertFalse(v2 == v1)", "def _assert(condition, message):\n if not condition:\n raise AssertionError(message)", "def test_not_equal_on_equal(self):\n a = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n b = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def assert_greater_equal(self, a, b):\n if not a >= b:\n raise AssertionError('%s not greater than or equal to %s' % (str(a), str(b)))", "def expected(x, y):", "def expected(x, y):", "def expected(x, y):", "def test_a(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n v2 = versions.Version(version='2.2.3', name='bar')\n\n self.assertTrue(v1 != v2)\n self.assertTrue(v2 != v1)", "def verify_is(self, first, second, msg=None):\r\n try:\r\n self.assert_is(first, second, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def test_equal_on_not_equal(self):\n a = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n b = Digest()\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def test_eq_not_two_states(self):\n assert not State(substance=\"water\") == 3\n assert not 3 == State(substance=\"water\")", "def assertGreaterEqual(self, a, b, msg=None):\r\n if not a >= b:\r\n standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))\r\n self.fail(self._formatMessage(msg, standardMsg))", "def assert_(expr, msg=None):\r\n statistics.assertions += 1\r\n if not expr:\r\n if msg is None:\r\n raise AssertionError\r\n raise AssertionError(msg)\r\n return expr", "def _assert_same(values):\n assert len(values) > 0\n first, rest = values[0], values[1:]\n for v in rest:\n assert v == first\n return first", "def assertStructurallyEqual(self, a, b):\n if not is_structurally_equal(a, b):\n message = \"%s !~= %s\" % (repr(a), repr(b))\n self.fail(message)", "def test_cclerror_not_equal():\n e = pyccl.CCLError(\"blah\")\n e2 = pyccl.CCLError(\"blahh\")\n assert e is not e2\n assert e != e2\n assert hash(e) != hash(e2)", "def assert_floats_are_equal(a, b, tol=1e-5):\r\n assert floats_are_equal(a, b, tol), (a,b)", "def assert_less(self, a, b):\n if not a < b:\n raise AssertionError('%s not less than %s' % (str(a), str(b)))", "def test_not_equal_on_not_equal_value(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_b, enums.OpaqueDataType.NONE)\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def test_tc():\n assert 1 == 1", "def test_inexact_to_exact(doctest):", "def assert_contents_equivalent(contents_a, contents_b):\n assert normalize_contents(contents_a) == normalize_contents(contents_b)", "def test_unequality(self):\n self.assertFalse(Record(1, 2) != Record(1, 2))\n self.assertTrue(Record(1, 2) != Record(1, 3))\n self.assertTrue(Record(1, 2) != Record(2, 2))\n self.assertTrue(Record(1, 2) != Record(3, 4))", "def test_a(self):\n v1 = versions.Version(version='1.2.1', name='foo')\n v2 = versions.Version(version='1.2.2', name='bar')\n\n self.assertFalse(v1 >= v2)\n self.assertTrue(v2 >= v1)", "def test_a(self):\n v1 = versions.Version(version='1.2.1', name='foo')\n v2 = versions.Version(version='1.2.2', name='bar')\n\n self.assertTrue(v1 <= v2)\n self.assertFalse(v2 <= v1)", "def check_equivalent(self, a, b):\n assert len(a) == len(b)\n for x, y in zip(a, b):\n assert self.is_equal(x, y)", "def test_equality_inequality(\n self, inst: t.Any, other: t.Any, eq: bool\n ) -> None:\n assert (inst == other) is eq\n assert (inst != other) is not eq", "def _almost_coincident(a,b, rtol=RTOL, atol=ATOL):\n return (np.allclose(a, b, rtol=RTOL, atol=ATOL)\n or np.allclose(np.flipud(a),b, rtol=RTOL, atol=ATOL))", "def test_nonEquality(self):\n # Make explicitly sure we're using !=:\n self.assertFalse(Comparable(1) != Comparable(1))\n self.assertTrue(Comparable(2) != Comparable(1))", "def test_not_equal_on_equal(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def test_eq_invalid(self):\n self.assertFalse(self.instance == '123')", "def testEquals():\n assert isEqual([1, 2, 3], [1, 2, 3])" ]
[ "0.7919735", "0.7919735", "0.79195523", "0.7669848", "0.766435", "0.7557559", "0.7554662", "0.74091434", "0.73751223", "0.7365663", "0.7247073", "0.7191134", "0.71108264", "0.71013314", "0.70558685", "0.70555484", "0.704943", "0.6989262", "0.685035", "0.68350255", "0.6828854", "0.67936283", "0.6779917", "0.67408144", "0.6734605", "0.664615", "0.6609359", "0.6467162", "0.64417505", "0.6425769", "0.6424658", "0.63545585", "0.63050425", "0.6292138", "0.6279847", "0.62726295", "0.6259791", "0.6258812", "0.6249484", "0.6237144", "0.62174124", "0.6192228", "0.6187672", "0.6187609", "0.6180732", "0.6178361", "0.61711377", "0.61285186", "0.6127993", "0.6124616", "0.6107536", "0.6101455", "0.607828", "0.6056167", "0.60492545", "0.6036041", "0.60327494", "0.6013381", "0.5998279", "0.59840643", "0.59756935", "0.59743613", "0.59570855", "0.59562975", "0.59371614", "0.5926393", "0.59219766", "0.5919197", "0.59082866", "0.59030986", "0.5891164", "0.588917", "0.58891463", "0.58891463", "0.58891463", "0.5868467", "0.5860362", "0.58568597", "0.5840253", "0.58365065", "0.58279276", "0.58226526", "0.58214676", "0.5807112", "0.58048064", "0.5798204", "0.57942253", "0.57916063", "0.5778472", "0.5772295", "0.577082", "0.57693434", "0.5767265", "0.576498", "0.5764349", "0.5749167", "0.5740389", "0.57260144", "0.5714923", "0.5711655" ]
0.7456572
7
Checks only authenticated users can see the page
def test_csv_import_auth(self): path = reverse("import-csv") request = RequestFactory().get(path) request.user = mixer.blend(User) response = csv_import(request) assert response.status_code == 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_authenticated(self, request, **kwargs):\r\n return True", "def has_permission(self, request, view):\n if request.user.is_authenticated():\n return True\n return False", "def is_allowed_to_submit(request):\n return not settings.REQUIRE_LOGIN or request.user.is_authenticated()", "def is_authenticated(self):\n return True", "def logged_in(request):\n return request.current_user is not None", "def is_authenticated(self):\n return False", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def can_view(self, user):\r\n return True", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def is_authorized(self, request, obj=None):\r\n if request.method == 'GET':\r\n return True\r\n else:\r\n return False", "def can_access(user, page):\n page_groups = PageViewGroup.objects.filter(page=page)\n if user.is_anonymous():\n return page_groups.count() == 0\n else:\n groups = page_groups.filter(group__in=user.groups.all())\n return page_groups.count() == 0 or groups.count() > 0", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text", "def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)", "def is_regular_user(user):\n return user.is_authenticated()", "def http_auth_allowed(request):\n\n if request.method not in ('GET', 'HEAD'):\n return False\n if not request.is_secure() and not settings.DEBUG:\n return False\n\n ua = request.META.get('HTTP_USER_AGENT', '')\n if HTTP_AUTH_USER_AGENT.match(ua):\n return True\n else:\n return False", "def is_authenticated(self):\n return True #self.authenticated", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def check_authentication(self, request):\n if not self.request.user.is_authenticated:\n raise NotAuthenticated()", "def has_permission(self, request, view):\n if request.method == \"POST\":\n return not (request.user and is_authenticated(request.user))\n\n return request.user and is_authenticated(request.user)", "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def index(request):\n try:\n if request.user.is_authenticated:\n return render(request, \"pages/index.html\")\n else:\n return redirect('login')\n\n except:\n return redirect('login')", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_auth():\n if not current_user.is_authenticated:\n return render_template('401.html', base_template=appbuilder.base_template, appbuilder=appbuilder), 401\n for role in current_user.roles:\n if appbuilder.get_app.config['AUTH_ROLE_ADMIN'] == role.name:\n return None\n return render_template('403.html', base_template=appbuilder.base_template, appbuilder=appbuilder), 403", "def require_login(self):\n\tif users.get_current_user():\n\t return True\n\telse:\n\t self.redirect(users.create_login_url(self.request.uri))\n\t return False", "def __before__(self):\n \n if not u'REMOTE_USER' in session: \n if not request.environ[u'PATH_INFO'] in self.public_urls:\n log.debug('PATH_INFO: %s' % request.environ[u'PATH_INFO'])\n #session[u'path_before_login'] = request.environ[u'PATH_INFO']\n #session.save()\n redirect(url('/users/index'))", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n utils.test_can_access(self, self.url)", "def index(request):\n user = request.user\n if user.is_authenticated:\n validar_usuario(request.user)\n return redirect('gestion:menu')\n else:\n return render(request,'index.html')", "def has_read_permission(request):\n return request.user.is_authenticated", "def has_permission(self, request):\n\t\treturn request.user.is_active", "def require_auth(view):\n def wrapper(request, *args):\n \n if not request.session.get('user_id', False):\n return HttpResponseRedirect(\"/clanovi/login/\")\n \n return view(request, *args) \n return wrapper", "def logged_in():\n\n if current_user.is_authenticated:\n return True\n\n return False", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def test_private_pages_auth(self):\r\n auth_pages = (\r\n '/course/',\r\n )\r\n\r\n # These are pages that should just load when the user is logged in\r\n # (no data needed)\r\n simple_auth_pages = (\r\n '/course/',\r\n )\r\n\r\n # need an activated user\r\n self.test_create_account()\r\n\r\n # Create a new session\r\n self.client = AjaxEnabledTestClient()\r\n\r\n # Not logged in. Should redirect to login.\r\n print('Not logged in')\r\n for page in auth_pages:\r\n print(\"Checking '{0}'\".format(page))\r\n self.check_page_get(page, expected=302)\r\n\r\n # Logged in should work.\r\n self.login(self.email, self.pw)\r\n\r\n print('Logged in')\r\n for page in simple_auth_pages:\r\n print(\"Checking '{0}'\".format(page))\r\n self.check_page_get(page, expected=200)", "def is_valid(self):\n return self.user.is_authenticated", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def is_logged_in():\n return 'user' in session", "def is_authorized(self, request, obj=None):\r\n return True", "def requires_auth(self):\n return True", "def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'", "def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False", "def unauthorized():\n flash('You must be logged in to view that page')\n return redirect(url_for('catalog_bp.index'))", "def is_logged_in(self):\n return self.router.token is not None", "def has_permission(self, request, view):\n usuario = request.user\n return str(usuario) == \"AnonymousUser\"", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def allowed_to_preview(user):\n if (\n user.is_authenticated and\n user.is_active and\n user.is_staff\n ):\n return True\n return False", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return self.login()", "def test_journal_route_accessible_only_if_logged_in(self):\n response = self.client.get(reverse_lazy('journal'))\n self.assertEqual(response.status_code, 302)", "def user_auth_inst(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n inst = UserInformation.objects.get(user=user)\n if(inst.user_instructor):\n return True\n return False", "def is_accessible_by(self, user):\n return (self.public or\n (user.is_authenticated and\n (user.is_staff or self.users.filter(pk=user.pk).exists())))", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def check_login_required(views_func):\n @wraps(views_func)\n def wrapper(request, *args, **kwargs):\n if request.user.is_authenticated:\n return views_func(request, *args, **kwargs)\n else:\n return HttpResponse(status=401)\n return wrapper", "def is_user_allowed(self, user):\n return user.is_staff", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def should_render(\n self,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n unified_banner_feature.is_enabled(request=request))", "def check_admin(self, *args, **kwargs):\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)", "def has_permission(self, request, view):\n\n is_authenticated = request.user.is_authenticated()\n safe_request = request.method in permissions.SAFE_METHODS\n return is_authenticated and safe_request", "def admin_user_only(view):\r\n @google_login_required\r\n def wrapped(request, *args, **kwargs):\r\n if users.is_current_user_admin():\r\n return view(request, *args, **kwargs)\r\n context = RequestContext(request);\r\n return rtr( 'access_limited.html', context,None )\r\n return wraps(view)(wrapped)", "def test_should_render_with_authenticated(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n self.request.user = User.objects.get(username='doc')\n self.assertTrue(\n self.action.should_render(\n context=self._create_request_context(\n User.objects.get(username='doc'))))", "def is_user_authenticated(request):\n return request.session.session_key", "def is_authenticated(self):\r\n return self.authenticated", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))", "def is_accessible(self):\n if login.current_user.is_authenticated:\n return login.current_user.is_admin()\n return False", "def before_request():\n if g.current_user.is_anonymous:\n return forbidden('Not signed in')\n\n if not g.current_user.confirmed:\n return forbidden('Unconfirmed account')", "def can_be_viewed_by(self,user):\n return True", "def test_page_view_permission(self):\n \n adminonlypage = create_page_in_admin(self.testproject,\"adminonlypage\",\n permission_lvl=Page.ADMIN_ONLY) \n registeredonlypage = create_page_in_admin(self.testproject,\"registeredonlypage\",\n permission_lvl=Page.REGISTERED_ONLY)\n publicpage = create_page_in_admin(self.testproject,\"publicpage\",\n permission_lvl=Page.ALL)\n \n self._test_page_can_be_viewed(self.projectadmin,adminonlypage)\n self._test_page_can_not_be_viewed(self.participant,adminonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,adminonlypage) \n self._test_page_can_not_be_viewed(None,adminonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,registeredonlypage)\n self._test_page_can_be_viewed(self.participant,registeredonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,registeredonlypage)\n self._test_page_can_not_be_viewed(None,registeredonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,publicpage)\n self._test_page_can_be_viewed(self.participant,publicpage)\n self._test_page_can_be_viewed(self.registered_user,publicpage)\n self._test_page_can_be_viewed(None,publicpage) # None = not logged in", "def is_visible(cls, request):\n if cls.permission_required:\n return request.user.has_perm(cls.permission_uri)\n else:\n return True", "def can_view_post(user):\n #only students and admins may use the search, submitForm functions\n return (not bool(user.is_staff) or user.is_superuser)" ]
[ "0.75106347", "0.74165606", "0.7196165", "0.7195553", "0.71893704", "0.7175715", "0.71588826", "0.71588826", "0.71588826", "0.71588826", "0.71588826", "0.71588826", "0.71588826", "0.71113145", "0.70598507", "0.7030012", "0.70088905", "0.699589", "0.69223857", "0.69180375", "0.69120365", "0.6895573", "0.6869566", "0.6869326", "0.68691695", "0.68532443", "0.6844825", "0.6809327", "0.6764613", "0.67613614", "0.6760781", "0.6760781", "0.67603564", "0.6759666", "0.67430437", "0.6738578", "0.67087203", "0.67061245", "0.6701092", "0.6695492", "0.6694849", "0.6690891", "0.6690891", "0.6690891", "0.66877246", "0.668717", "0.66821235", "0.66661596", "0.66661596", "0.66661596", "0.66661596", "0.6663385", "0.6658645", "0.6637744", "0.6612206", "0.6612206", "0.6611734", "0.6611734", "0.6611734", "0.65984905", "0.65938973", "0.6587656", "0.65628403", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6557228", "0.65543705", "0.65332395", "0.6529083", "0.65261585", "0.65240973", "0.65240973", "0.65073985", "0.65070605", "0.6505294", "0.65024704", "0.64848727", "0.6484046", "0.64833456", "0.64740294", "0.64706576", "0.6466903", "0.6461381", "0.6461381", "0.6457303", "0.6431013", "0.6421082", "0.6414511", "0.64070535", "0.6407015" ]
0.0
-1
Checks unauthenticated users can not see the page
def test_csv_import_unauth(self): path = reverse("import-csv") request = RequestFactory().get(path) request.user = AnonymousUser() response = csv_import(request) assert response.status_code == 302
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def unauthorized():\n flash('You must be logged in to view that page')\n return redirect(url_for('catalog_bp.index'))", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def forbidden(self):\n self.flash(self._(\"You don't have the correct permissions to access this page.\"), category=\"error\")\n # TODO: maybe check barcamp and permissions for the barcamp homepage and redirect there instead\n # TODO: maybe create a remember decorator which remember the last page in the session which is safe to redirect to.\n # the forbidden handler should delete it though\n return redirect(self.url_for(\"index\"))", "def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def get_authenticated_denied(self):", "def unauthorized():\n flash(\"You must be logged in to view that page.\")\n return redirect(url_for(\"auth.login_view\"))", "def test_unauthenticated(self):\n self.browser.open(\"http://nohost/plone/full_review_list\")\n self.assertTrue(\"Login Name\" in self.browser.contents)", "def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)", "def deny_access():\n flash('You must login first.')\n return redirect(url_for('home'))", "def test_not_logged_in(self):\n response = self.c.get(reverse(map_page), {'lat': 34.0, 'lng': 45.3})\n self.assertEqual(response.status_code, 200)", "def unauthorized_only(view_func):\n def is_anonymous(user):\n return user.is_anonymous()\n\n return user_passes_test(is_anonymous, login_url='/', redirect_field_name=None)(view_func)", "def unauthorized():\n flash('You must be logged in to view that page.', 'warning')\n return redirect(url_for('auth.login'))", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def unauthorized():\n #flash('You must be logged in to view that page.')\n return redirect(url_for('login'))", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_dashboard_not_signed(self):\n views_url = ('/dashboard/',\n '/accounts/picture/')\n #create a get request\n for view in views_url:\n response = self.client.get(view)\n #the user was not logged in, the user should be redirected\n self.assertEqual(response.status_code, 302,\n msg=str(response.request))", "def test_retrive_user_unauthenticated(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_authenticated_non_public_course_with_other_username(self):\n self.client.logout()\n self.verify_response(403)", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text", "def test_instructor_page_access_nonstaff(self):\r\n self.login(self.enrolled_user)\r\n\r\n urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),\r\n reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]\r\n\r\n # Shouldn't be able to get to the instructor pages\r\n for url in urls:\r\n check_for_get_code(self, 404, url)", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def forbidden_page(error):\n return render_template(\"access_forbidden.html\"), 403", "def unauthorized():\n flask.flash('You must be logged in to view that page.')\n return redirect(url_for('auth.sign_in'))", "def test_not_authenticated_public_course_with_other_username(self):\n self.client.logout()\n self.verify_response(403)", "def before_request():\n if g.current_user.is_anonymous:\n return forbidden('Not signed in')\n\n if not g.current_user.confirmed:\n return forbidden('Unconfirmed account')", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def redirect_users_without_permissions(page, request, serve_args, serve_kwargs):\n if not has_permission(request.user, get_required_groups(page)): \n return redirect(NO_PERMISSIONS_REDIRECT_URL)", "def test_category_view_not_logged_in(testapp):\n from webtest.app import AppError\n with pytest.raises(AppError, message=\"403 Forbidden\"):\n testapp.get('/category/1')", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_retrieve_user_unautherized(self):\n res = self.client.get(ME_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_authenticated(self):\n self.client.logout()\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 101)\n self.assertEqual(UserFitbit.objects.count(), 1)", "def test_unauthenticated_request(self):\n url = self.get_url(self.active_user.id)\n response = self.client.get(url)\n\n expected_status_code = 401\n self.assertEqual(response.status_code, expected_status_code)", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_anonymous_user_doesnt_have_lesson_state(self):\n self.client.force_authenticate(None)\n data = self.client.get(self.api_lesson_list_url).data\n\n for item in data.get('results'):\n self.assertNotIn('state', item)", "def get_everyone_denied(self):", "def forbidden(request):\n return Response(render_template('core/forbidden.html'),\n status=401, mimetype='text/html')", "def test_unauthorized_user_like_field(self):\n response = self.client.get(reverse('lessons-detail', args=(1,)))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotIn('like', response.data)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403", "def test_anonymous(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_not_logged_user_cannot_leave(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_unauthenticated_user_disliking(self):\n self.like_dislike(self.like_url(6))", "def page_forbidden(e):\n return render_template(\"403.html\", page_title=403)", "def test_get_unauthenticated(self):\n del self.client.request_kwargs['auth']\n self.verify_get_response(self.client.get(STATUS_PATH))", "def test_authentication_is_not_required(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_view_all_unauthenticated(self):\n response = self.client.get(reverse('crt_forms:crt-forms-index'))\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, '/accounts/login/?next=/form/view')", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_view_disabled(self, method, url):\n response = getattr(self.client, method)(url)\n assert response.status_code == 403", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def test_logged_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n utils.test_cannot_access(self, self.url, expected_url)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def is_authenticated(self):\n return False", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_auto_auth_disabled(self):\r\n response = self.client.get(self.url)\r\n self.assertEqual(response.status_code, 404)", "def test_not_authenticated_non_public_course_with_all_blocks(self):\n self.client.logout()\n self.query_params.pop('username')\n self.query_params['all_blocks'] = True\n self.verify_response(403)", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def __before__(self):\n \n if not u'REMOTE_USER' in session: \n if not request.environ[u'PATH_INFO'] in self.public_urls:\n log.debug('PATH_INFO: %s' % request.environ[u'PATH_INFO'])\n #session[u'path_before_login'] = request.environ[u'PATH_INFO']\n #session.save()\n redirect(url('/users/index'))", "def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_unauthenticated_user_liking(self):\n self.like_dislike(self.dislike_url(5))", "def test_no_user(self):\n self.request.user = None\n result = user_id_get_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def permission_denied(request):\n\treturn render(request, '403.html', None)", "def test_get_un_authenticated(self):\n\n url = reverse('post-detail', args=(self.user.id,))\n response = self.client.get(path=url)\n self.assertEqual(first=401, second=response.status_code)", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_not_authenticated_uri(self):\n request = self.factory.get(self.uri)\n response = self.view(request)\n response.render()\n self.assertEqual(response.status_code, 401,\n 'Expected Response Code 401, received {0} instead.'\n .format(response.status_code))", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def test_not_logged_cannot_delete(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def handle_forbidden_for_homepage(self, request):\n\n login_url = request.link(Auth.from_request_path(request), name='login')\n\n if URL(request.url).path() == '/':\n return morepath.redirect(login_url)\n\n return handle_forbidden(self, request)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)\n self._assert_no_change()", "def test_detail_blocked_forbidden_even_if_contributor(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c3.pk))\n self.assert404(resp)" ]
[ "0.7784443", "0.7784443", "0.7784443", "0.7784443", "0.7555281", "0.73508805", "0.73336077", "0.7329427", "0.72262084", "0.72135955", "0.72135955", "0.715947", "0.70824975", "0.7073744", "0.7048739", "0.70219946", "0.69940305", "0.6948124", "0.6944818", "0.69262964", "0.6906128", "0.68984574", "0.6892031", "0.68789524", "0.6875266", "0.6836221", "0.6831557", "0.68220717", "0.68162036", "0.67892414", "0.6787131", "0.6787131", "0.6787131", "0.67844284", "0.6779533", "0.67786014", "0.67681843", "0.6764277", "0.67631596", "0.6738897", "0.67285275", "0.67243356", "0.6696555", "0.66925013", "0.6691913", "0.66871196", "0.66871196", "0.66700643", "0.6661624", "0.66516733", "0.66505754", "0.66505754", "0.6647447", "0.66442126", "0.6628061", "0.66279775", "0.66242796", "0.6615613", "0.66050065", "0.6596261", "0.65950656", "0.65928066", "0.6592073", "0.6592073", "0.65886575", "0.6578902", "0.6574714", "0.6572206", "0.6572079", "0.65592515", "0.65572643", "0.65447253", "0.65441287", "0.65265834", "0.65255547", "0.6505049", "0.64989406", "0.6486051", "0.6486051", "0.64855015", "0.64751834", "0.646654", "0.646654", "0.6462312", "0.64616907", "0.64595234", "0.6459501", "0.64586735", "0.645021", "0.64485395", "0.64456916", "0.6445281", "0.6444391", "0.64438", "0.6442233", "0.6439296", "0.64352673", "0.6429903", "0.64242536", "0.64163536", "0.6405156" ]
0.0
-1
Checks only authenticated users can see the page
def test_setting_csv_auth(self): path = reverse("setting-csv") request = RequestFactory().get(path) request.user = mixer.blend(User) response = csv_setting(request) assert response.status_code == 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_authenticated(self, request, **kwargs):\r\n return True", "def has_permission(self, request, view):\n if request.user.is_authenticated():\n return True\n return False", "def is_allowed_to_submit(request):\n return not settings.REQUIRE_LOGIN or request.user.is_authenticated()", "def is_authenticated(self):\n return True", "def logged_in(request):\n return request.current_user is not None", "def is_authenticated(self):\n return False", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def can_view(self, user):\r\n return True", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def is_authorized(self, request, obj=None):\r\n if request.method == 'GET':\r\n return True\r\n else:\r\n return False", "def can_access(user, page):\n page_groups = PageViewGroup.objects.filter(page=page)\n if user.is_anonymous():\n return page_groups.count() == 0\n else:\n groups = page_groups.filter(group__in=user.groups.all())\n return page_groups.count() == 0 or groups.count() > 0", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text", "def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)", "def is_regular_user(user):\n return user.is_authenticated()", "def http_auth_allowed(request):\n\n if request.method not in ('GET', 'HEAD'):\n return False\n if not request.is_secure() and not settings.DEBUG:\n return False\n\n ua = request.META.get('HTTP_USER_AGENT', '')\n if HTTP_AUTH_USER_AGENT.match(ua):\n return True\n else:\n return False", "def is_authenticated(self):\n return True #self.authenticated", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def check_authentication(self, request):\n if not self.request.user.is_authenticated:\n raise NotAuthenticated()", "def has_permission(self, request, view):\n if request.method == \"POST\":\n return not (request.user and is_authenticated(request.user))\n\n return request.user and is_authenticated(request.user)", "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def index(request):\n try:\n if request.user.is_authenticated:\n return render(request, \"pages/index.html\")\n else:\n return redirect('login')\n\n except:\n return redirect('login')", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_auth():\n if not current_user.is_authenticated:\n return render_template('401.html', base_template=appbuilder.base_template, appbuilder=appbuilder), 401\n for role in current_user.roles:\n if appbuilder.get_app.config['AUTH_ROLE_ADMIN'] == role.name:\n return None\n return render_template('403.html', base_template=appbuilder.base_template, appbuilder=appbuilder), 403", "def require_login(self):\n\tif users.get_current_user():\n\t return True\n\telse:\n\t self.redirect(users.create_login_url(self.request.uri))\n\t return False", "def __before__(self):\n \n if not u'REMOTE_USER' in session: \n if not request.environ[u'PATH_INFO'] in self.public_urls:\n log.debug('PATH_INFO: %s' % request.environ[u'PATH_INFO'])\n #session[u'path_before_login'] = request.environ[u'PATH_INFO']\n #session.save()\n redirect(url('/users/index'))", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n utils.test_can_access(self, self.url)", "def index(request):\n user = request.user\n if user.is_authenticated:\n validar_usuario(request.user)\n return redirect('gestion:menu')\n else:\n return render(request,'index.html')", "def has_read_permission(request):\n return request.user.is_authenticated", "def has_permission(self, request):\n\t\treturn request.user.is_active", "def require_auth(view):\n def wrapper(request, *args):\n \n if not request.session.get('user_id', False):\n return HttpResponseRedirect(\"/clanovi/login/\")\n \n return view(request, *args) \n return wrapper", "def logged_in():\n\n if current_user.is_authenticated:\n return True\n\n return False", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def test_private_pages_auth(self):\r\n auth_pages = (\r\n '/course/',\r\n )\r\n\r\n # These are pages that should just load when the user is logged in\r\n # (no data needed)\r\n simple_auth_pages = (\r\n '/course/',\r\n )\r\n\r\n # need an activated user\r\n self.test_create_account()\r\n\r\n # Create a new session\r\n self.client = AjaxEnabledTestClient()\r\n\r\n # Not logged in. Should redirect to login.\r\n print('Not logged in')\r\n for page in auth_pages:\r\n print(\"Checking '{0}'\".format(page))\r\n self.check_page_get(page, expected=302)\r\n\r\n # Logged in should work.\r\n self.login(self.email, self.pw)\r\n\r\n print('Logged in')\r\n for page in simple_auth_pages:\r\n print(\"Checking '{0}'\".format(page))\r\n self.check_page_get(page, expected=200)", "def is_valid(self):\n return self.user.is_authenticated", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def is_logged_in():\n return 'user' in session", "def is_authorized(self, request, obj=None):\r\n return True", "def requires_auth(self):\n return True", "def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'", "def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def is_access_allowed(self, user_id):\n ### DATABASE CODE GOES HERE\n return False", "def unauthorized():\n flash('You must be logged in to view that page')\n return redirect(url_for('catalog_bp.index'))", "def is_logged_in(self):\n return self.router.token is not None", "def has_permission(self, request, view):\n usuario = request.user\n return str(usuario) == \"AnonymousUser\"", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def allowed_to_preview(user):\n if (\n user.is_authenticated and\n user.is_active and\n user.is_staff\n ):\n return True\n return False", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return self.login()", "def test_journal_route_accessible_only_if_logged_in(self):\n response = self.client.get(reverse_lazy('journal'))\n self.assertEqual(response.status_code, 302)", "def user_auth_inst(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n inst = UserInformation.objects.get(user=user)\n if(inst.user_instructor):\n return True\n return False", "def is_accessible_by(self, user):\n return (self.public or\n (user.is_authenticated and\n (user.is_staff or self.users.filter(pk=user.pk).exists())))", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def check_login_required(views_func):\n @wraps(views_func)\n def wrapper(request, *args, **kwargs):\n if request.user.is_authenticated:\n return views_func(request, *args, **kwargs)\n else:\n return HttpResponse(status=401)\n return wrapper", "def is_user_allowed(self, user):\n return user.is_staff", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def should_render(\n self,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n unified_banner_feature.is_enabled(request=request))", "def check_admin(self, *args, **kwargs):\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)", "def has_permission(self, request, view):\n\n is_authenticated = request.user.is_authenticated()\n safe_request = request.method in permissions.SAFE_METHODS\n return is_authenticated and safe_request", "def admin_user_only(view):\r\n @google_login_required\r\n def wrapped(request, *args, **kwargs):\r\n if users.is_current_user_admin():\r\n return view(request, *args, **kwargs)\r\n context = RequestContext(request);\r\n return rtr( 'access_limited.html', context,None )\r\n return wraps(view)(wrapped)", "def test_should_render_with_authenticated(self) -> None:\n with override_feature_check(unified_banner_feature.feature_id, False):\n self.request.user = User.objects.get(username='doc')\n self.assertTrue(\n self.action.should_render(\n context=self._create_request_context(\n User.objects.get(username='doc'))))", "def is_user_authenticated(request):\n return request.session.session_key", "def is_authenticated(self):\r\n return self.authenticated", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))", "def should_render(\n self,\n *,\n context: Context,\n ) -> bool:\n request = context['request']\n user = request.user\n\n return (super().should_render(context=context) and\n user.is_authenticated and\n not is_site_read_only_for(user) and\n not unified_banner_feature.is_enabled(request=request))", "def is_accessible(self):\n if login.current_user.is_authenticated:\n return login.current_user.is_admin()\n return False", "def before_request():\n if g.current_user.is_anonymous:\n return forbidden('Not signed in')\n\n if not g.current_user.confirmed:\n return forbidden('Unconfirmed account')", "def can_be_viewed_by(self,user):\n return True", "def test_page_view_permission(self):\n \n adminonlypage = create_page_in_admin(self.testproject,\"adminonlypage\",\n permission_lvl=Page.ADMIN_ONLY) \n registeredonlypage = create_page_in_admin(self.testproject,\"registeredonlypage\",\n permission_lvl=Page.REGISTERED_ONLY)\n publicpage = create_page_in_admin(self.testproject,\"publicpage\",\n permission_lvl=Page.ALL)\n \n self._test_page_can_be_viewed(self.projectadmin,adminonlypage)\n self._test_page_can_not_be_viewed(self.participant,adminonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,adminonlypage) \n self._test_page_can_not_be_viewed(None,adminonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,registeredonlypage)\n self._test_page_can_be_viewed(self.participant,registeredonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,registeredonlypage)\n self._test_page_can_not_be_viewed(None,registeredonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,publicpage)\n self._test_page_can_be_viewed(self.participant,publicpage)\n self._test_page_can_be_viewed(self.registered_user,publicpage)\n self._test_page_can_be_viewed(None,publicpage) # None = not logged in", "def is_visible(cls, request):\n if cls.permission_required:\n return request.user.has_perm(cls.permission_uri)\n else:\n return True", "def can_view_post(user):\n #only students and admins may use the search, submitForm functions\n return (not bool(user.is_staff) or user.is_superuser)" ]
[ "0.75106347", "0.74165606", "0.7196165", "0.7195553", "0.71893704", "0.7175715", "0.71588826", "0.71588826", "0.71588826", "0.71588826", "0.71588826", "0.71588826", "0.71588826", "0.71113145", "0.70598507", "0.7030012", "0.70088905", "0.699589", "0.69223857", "0.69180375", "0.69120365", "0.6895573", "0.6869566", "0.6869326", "0.68691695", "0.68532443", "0.6844825", "0.6809327", "0.6764613", "0.67613614", "0.6760781", "0.6760781", "0.67603564", "0.6759666", "0.67430437", "0.6738578", "0.67087203", "0.67061245", "0.6701092", "0.6695492", "0.6694849", "0.6690891", "0.6690891", "0.6690891", "0.66877246", "0.668717", "0.66821235", "0.66661596", "0.66661596", "0.66661596", "0.66661596", "0.6663385", "0.6658645", "0.6637744", "0.6612206", "0.6612206", "0.6611734", "0.6611734", "0.6611734", "0.65984905", "0.65938973", "0.6587656", "0.65628403", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6560608", "0.6557228", "0.65543705", "0.65332395", "0.6529083", "0.65261585", "0.65240973", "0.65240973", "0.65073985", "0.65070605", "0.6505294", "0.65024704", "0.64848727", "0.6484046", "0.64833456", "0.64740294", "0.64706576", "0.6466903", "0.6461381", "0.6461381", "0.6457303", "0.6431013", "0.6421082", "0.6414511", "0.64070535", "0.6407015" ]
0.0
-1
Checks unauthenticated users can not see the page
def test_setting_csv_unauth(self): path = reverse("setting-csv") request = RequestFactory().get(path) request.user = AnonymousUser() response = csv_setting(request) assert response.status_code == 302
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def unauthorized():\n flash('You must be logged in to view that page')\n return redirect(url_for('catalog_bp.index'))", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def forbidden(self):\n self.flash(self._(\"You don't have the correct permissions to access this page.\"), category=\"error\")\n # TODO: maybe check barcamp and permissions for the barcamp homepage and redirect there instead\n # TODO: maybe create a remember decorator which remember the last page in the session which is safe to redirect to.\n # the forbidden handler should delete it though\n return redirect(self.url_for(\"index\"))", "def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)", "def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])", "def get_authenticated_denied(self):", "def unauthorized():\n flash(\"You must be logged in to view that page.\")\n return redirect(url_for(\"auth.login_view\"))", "def test_unauthenticated(self):\n self.browser.open(\"http://nohost/plone/full_review_list\")\n self.assertTrue(\"Login Name\" in self.browser.contents)", "def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)", "def deny_access():\n flash('You must login first.')\n return redirect(url_for('home'))", "def test_not_logged_in(self):\n response = self.c.get(reverse(map_page), {'lat': 34.0, 'lng': 45.3})\n self.assertEqual(response.status_code, 200)", "def unauthorized_only(view_func):\n def is_anonymous(user):\n return user.is_anonymous()\n\n return user_passes_test(is_anonymous, login_url='/', redirect_field_name=None)(view_func)", "def unauthorized():\n flash('You must be logged in to view that page.', 'warning')\n return redirect(url_for('auth.login'))", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def unauthorized():\n #flash('You must be logged in to view that page.')\n return redirect(url_for('login'))", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_dashboard_not_signed(self):\n views_url = ('/dashboard/',\n '/accounts/picture/')\n #create a get request\n for view in views_url:\n response = self.client.get(view)\n #the user was not logged in, the user should be redirected\n self.assertEqual(response.status_code, 302,\n msg=str(response.request))", "def test_retrive_user_unauthenticated(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_authenticated_non_public_course_with_other_username(self):\n self.client.logout()\n self.verify_response(403)", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text", "def test_instructor_page_access_nonstaff(self):\r\n self.login(self.enrolled_user)\r\n\r\n urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),\r\n reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]\r\n\r\n # Shouldn't be able to get to the instructor pages\r\n for url in urls:\r\n check_for_get_code(self, 404, url)", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def test_auth_private_unowned(self):\n self.do_visible(False, 'pattieblack', False, tenant='froggy')", "def forbidden_page(error):\n return render_template(\"access_forbidden.html\"), 403", "def unauthorized():\n flask.flash('You must be logged in to view that page.')\n return redirect(url_for('auth.sign_in'))", "def test_not_authenticated_public_course_with_other_username(self):\n self.client.logout()\n self.verify_response(403)", "def before_request():\n if g.current_user.is_anonymous:\n return forbidden('Not signed in')\n\n if not g.current_user.confirmed:\n return forbidden('Unconfirmed account')", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def test_10_admin_user_not_listed(self):\r\n self.register()\r\n res = self.app.get('/admin/users', follow_redirects=True)\r\n assert \"Manage Admin Users\" in res.data, res.data\r\n assert \"Current Users with Admin privileges\" not in res.data, res.data\r\n assert \"John\" not in res.data, res.data", "def redirect_users_without_permissions(page, request, serve_args, serve_kwargs):\n if not has_permission(request.user, get_required_groups(page)): \n return redirect(NO_PERMISSIONS_REDIRECT_URL)", "def test_category_view_not_logged_in(testapp):\n from webtest.app import AppError\n with pytest.raises(AppError, message=\"403 Forbidden\"):\n testapp.get('/category/1')", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_public_status_page_remove_authorization_from_public_status_page(self):\n pass", "def test_retrieve_user_unautherized(self):\n res = self.client.get(ME_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_authenticated(self):\n self.client.logout()\n response = self._get(get_kwargs=self._data())\n self._check_response(response, 101)\n self.assertEqual(UserFitbit.objects.count(), 1)", "def test_unauthenticated_request(self):\n url = self.get_url(self.active_user.id)\n response = self.client.get(url)\n\n expected_status_code = 401\n self.assertEqual(response.status_code, expected_status_code)", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_anonymous_user_doesnt_have_lesson_state(self):\n self.client.force_authenticate(None)\n data = self.client.get(self.api_lesson_list_url).data\n\n for item in data.get('results'):\n self.assertNotIn('state', item)", "def get_everyone_denied(self):", "def forbidden(request):\n return Response(render_template('core/forbidden.html'),\n status=401, mimetype='text/html')", "def test_unauthorized_user_like_field(self):\n response = self.client.get(reverse('lessons-detail', args=(1,)))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotIn('like', response.data)", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def test_requester_is_no_admin(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.other_user_tok,\n )\n\n self.assertEqual(403, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.FORBIDDEN, channel.json_body[\"errcode\"])", "def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403", "def test_anonymous(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_not_logged_user_cannot_leave(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_unauthenticated_user_disliking(self):\n self.like_dislike(self.like_url(6))", "def page_forbidden(e):\n return render_template(\"403.html\", page_title=403)", "def test_get_unauthenticated(self):\n del self.client.request_kwargs['auth']\n self.verify_get_response(self.client.get(STATUS_PATH))", "def test_authentication_is_not_required(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_view_all_unauthenticated(self):\n response = self.client.get(reverse('crt_forms:crt-forms-index'))\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, '/accounts/login/?next=/form/view')", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def testGetAccessDenied(self):\n self.runGet(None)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_view_disabled(self, method, url):\n response = getattr(self.client, method)(url)\n assert response.status_code == 403", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"t use private threads\", status_code=403)", "def test_logged_user_not_in_group_cannot_access(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n utils.test_cannot_access(self, self.url, expected_url)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def is_authenticated(self):\n return False", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)", "def test_auto_auth_disabled(self):\r\n response = self.client.get(self.url)\r\n self.assertEqual(response.status_code, 404)", "def test_not_authenticated_non_public_course_with_all_blocks(self):\n self.client.logout()\n self.query_params.pop('username')\n self.query_params['all_blocks'] = True\n self.verify_response(403)", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def __before__(self):\n \n if not u'REMOTE_USER' in session: \n if not request.environ[u'PATH_INFO'] in self.public_urls:\n log.debug('PATH_INFO: %s' % request.environ[u'PATH_INFO'])\n #session[u'path_before_login'] = request.environ[u'PATH_INFO']\n #session.save()\n redirect(url('/users/index'))", "def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_unauthenticated_user_liking(self):\n self.like_dislike(self.dislike_url(5))", "def test_no_user(self):\n self.request.user = None\n result = user_id_get_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))", "def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def permission_denied(request):\n\treturn render(request, '403.html', None)", "def test_get_un_authenticated(self):\n\n url = reverse('post-detail', args=(self.user.id,))\n response = self.client.get(path=url)\n self.assertEqual(first=401, second=response.status_code)", "def test_no_permission(self):\n override_acl(self.user, {'can_use_private_threads': 0})\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"can't use private threads\", status_code=403)", "def testGetAccessDenied(self):\n self.runGet(None, sequencer=self.hiseq2000.vendor_id)\n self.response_401()\n for user in (self.norole, self.unrelated_owner):\n self.runGet(user)\n self.response_403()", "def test_not_authenticated_uri(self):\n request = self.factory.get(self.uri)\n response = self.view(request)\n response.render()\n self.assertEqual(response.status_code, 401,\n 'Expected Response Code 401, received {0} instead.'\n .format(response.status_code))", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def test_not_logged_cannot_delete(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def handle_forbidden_for_homepage(self, request):\n\n login_url = request.link(Auth.from_request_path(request), name='login')\n\n if URL(request.url).path() == '/':\n return morepath.redirect(login_url)\n\n return handle_forbidden(self, request)", "def test_get_no_permission(self):\n self.user.user_permissions.clear()\n response = self._get()\n self.assertRedirectsToLogin(response)\n self._assert_no_change()", "def test_detail_blocked_forbidden_even_if_contributor(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c3.pk))\n self.assert404(resp)" ]
[ "0.7784443", "0.7784443", "0.7784443", "0.7784443", "0.7555281", "0.73508805", "0.73336077", "0.7329427", "0.72262084", "0.72135955", "0.72135955", "0.715947", "0.70824975", "0.7073744", "0.7048739", "0.70219946", "0.69940305", "0.6948124", "0.6944818", "0.69262964", "0.6906128", "0.68984574", "0.6892031", "0.68789524", "0.6875266", "0.6836221", "0.6831557", "0.68220717", "0.68162036", "0.67892414", "0.6787131", "0.6787131", "0.6787131", "0.67844284", "0.6779533", "0.67786014", "0.67681843", "0.6764277", "0.67631596", "0.6738897", "0.67285275", "0.67243356", "0.6696555", "0.66925013", "0.6691913", "0.66871196", "0.66871196", "0.66700643", "0.6661624", "0.66516733", "0.66505754", "0.66505754", "0.6647447", "0.66442126", "0.6628061", "0.66279775", "0.66242796", "0.6615613", "0.66050065", "0.6596261", "0.65950656", "0.65928066", "0.6592073", "0.6592073", "0.65886575", "0.6578902", "0.6574714", "0.6572206", "0.6572079", "0.65592515", "0.65572643", "0.65447253", "0.65441287", "0.65265834", "0.65255547", "0.6505049", "0.64989406", "0.6486051", "0.6486051", "0.64855015", "0.64751834", "0.646654", "0.646654", "0.6462312", "0.64616907", "0.64595234", "0.6459501", "0.64586735", "0.645021", "0.64485395", "0.64456916", "0.6445281", "0.6444391", "0.64438", "0.6442233", "0.6439296", "0.64352673", "0.6429903", "0.64242536", "0.64163536", "0.6405156" ]
0.0
-1
Tests the import from local file for cities works fine
def test_csv_import_city(self): from django.contrib.messages import get_messages path = reverse("import-csv") user = mixer.blend(User, is_staff=True, is_superuser=True) file = open("city.csv") client = Client() client.force_login(user) r = client.post(path, {"title": "city", "csv_file": file}) messages = list(get_messages(r.wsgi_request)) assert r.status_code == 200 assert len(messages) == 1 assert str(messages[0]) == "Successfully Uploaded!"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetWorldCities():\n return GetDataFromCsvFile('world_cities.csv')", "def _import_insee_city(self, cr, uid, ids, data_dir, context=None):\n if context is None:\n context = {}\n filepath = os.path.abspath(os.path.join(data_dir, 'comsimp2011.csv'))\n city_obj = self.pool.get('insee.city')\n department_obj = self.pool.get('insee.department')\n with open(filepath, 'rb') as cityfile:\n reader = csv.DictReader(cityfile)\n for row in reader:\n args = [('dep', '=', row['DEP'])]\n department_ids = department_obj.search(cr, uid, args)\n department_id = department_ids and department_ids[0] or None\n ncc = row['ARTMAJ'] and row['ARTMAJ'].strip(\"()\") + \\\n row['NCC'] or row['NCC']\n nccenr = row['ARTMIN'] and row['ARTMIN'].strip(\"()\") + \\\n row['NCCENR'] or row['NCCENR']\n values = {\n 'cdc': row['CDC'],\n 'cheflieu': row['CHEFLIEU'],\n 'reg': row['REG'],\n 'dep': row['DEP'],\n 'department_id': department_id,\n 'com': row['COM'],\n 'ar': row['AR'],\n 'ct': row['CT'],\n 'tncc': row['TNCC'],\n 'artmaj': row['ARTMAJ'],\n 'ncc': ncc,\n 'artmin': row['ARTMIN'],\n 'nccenr': nccenr,\n }\n city_obj.create(cr, uid, values, context=context)", "def GetUsCities():\n return GetDataFromCsvFile('us_cities.csv')", "async def test_get_location_data(self):\n for city_name in ['dublin', 'London', 'Copenhagen']:\n response = await self.http_client.fetch(request=HTTPRequest(\n url=self.get_url(path=\"/location-data/{}\".format(city_name)),\n method='GET'\n ))\n self.assertEqual(response.code, HTTPStatus.OK)\n self.check_city_response(response, city_name.lower())", "def load_cities (filename):\n if not os.path.isfile(filename):\n return None\n # try to decode a plain file\n try:\n with open(filename) as input:\n return [ json.loads(line) for line in input if line ]\n except:\n pass\n # try to decode a gzipped file\n try:\n with gzip.open(filename) as input:\n return [ json.loads(line) for line in input if line ]\n except:\n pass\n return None", "def __init__(self):\n\n with open('../examples/streets.txt', 'r') as sf:\n self.streets = sf.read()\n self.streets = self.streets.lower()\n\n with open('../examples/cities.txt', 'r') as cf:\n self.cities = cf.read()\n self.cities = self.cities.lower()", "def setUp(self):\n self.my_city = City()", "def test_city_country(self):\n dublin_ireland = city_country('dublin', 'ireland')\n self.assertEqual(dublin_ireland, 'Dublin, Ireland')", "def read_locations(db, openfile):\n pass", "def test_city_country(self):\n formatted_city = get_full_city(\"santiago\", \"chile\")\n self.assertEqual(formatted_city, \"Santiago, Chile\")", "def read_cities(filename):\n reader = csv.reader(open(filename, \"rb\")) # may raise IOError\n rows = [line for line in reader]\n cities = [City(r[2], index, r[3], float(r[0]), float(r[1])) for index, r in enumerate(rows[1:])]\n return cities", "def test_path_to_location(self):\n\n print \"Starting import\"\n modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])\n print \"finished import\"\n\n check_path_to_location(modulestore)", "def get_cities(self, city_name: str = \"\"):", "def test_path_to_location(self):\r\n\r\n print \"Starting import\"\r\n modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])\r\n print \"finished import\"\r\n\r\n check_path_to_location(modulestore)", "def get_cities(self, city_name: str = None):", "def load_projector(self, projector_file):\n self._test(projector_file)", "def load_random_cities(data):\n cities = list(set([elem['name'] for elem in data]))\n city_objects = [City(data=city) for city in cities]\n City.objects.bulk_create(city_objects)", "def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')", "def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')", "def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')", "def test_city_country(self):\n santiago_chile = get_city_name('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')", "def loadCity(fileid):\n dinf = {}\n root = etree.Element(\"city\")\n text = None\n statename = \"\"\n statefile = \"\"\n cityname = \"\"\n dinf['m'] = {}\n dinf['m']['events'] = {}\n # TODO: put this in a global variable, and make a function to populate it from the DTD.\n tags = [\"name\",\"state\",\"statefile\",\"start\",\"scue\",\"end\",\"ecue\",\"place\",\"aspects\"]\n for tag in tags:\n dinf[tag] = [\"\",False]\n dinf['aspects'] = {}\n if not dinf.get(\"places\"): dinf['places'] = {}\n if not idExists(fileid):\n status.push(0,\"new city created... '%s'\" % fileid)\n return dinf\n fn = os.path.join(config['realmdir'],\"%s.xml\" % fileid)\n status.push(0,\"loading city from XML... '%s'\" % fn)\n try:\n with codecs.open(fn,'rU','utf-8') as f:\n tree = etree.parse(f)\n f.close()\n root = tree.getroot()\n except IOError as e:\n print \"c: Could not open configuration file: %s\" % e\n\n ir = 0\n for i in range(len(root)):\n if root[i].tag is not None:\n if root[i].tag == \"place\":\n if len(root[i]) > 0:\n node = \"\"\n node = root[i].find(\"file\")\n if node.text:\n node = node.text.strip()\n node = common.validateFileid(node)\n dinf['places'][node] = {}\n for j in root[i]:\n if j.tag and j.text and j.tag != \"file\":\n dinf['places'][node][j.tag] = [j.text.strip(),False]\n if config['debug'] > 3: print dinf['places'][node]\n else:\n if config['debug'] > 0:\n print \"Invalid place tag:\"\n for c in root[i]:\n print c.tag + ': ' + c.text,\n else: # no relat length\n if config['debug'] > 0: print \"Empty place tag.\"\n elif root[i].tag == \"events\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['m']['events']))\n dinf['m']['events'][k] = {}\n for j in node:\n if j.tag and j.text:\n dinf['m']['events'][k][j.tag] = [j.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid milestone tag:\"\n for c in node:\n print c.tag + ': ' + c.text,\n if config['debug'] > 3: printPretty(dinf['m']['events'])\n else: # no relat length\n if config['debug'] > 0: print \"Empty milestone tag.\"\n elif root[i].tag == \"aspects\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['aspects']))\n dinf['aspects'][k] = {}\n if node.tag and node.text:\n dinf['aspects'][k] = [node.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid aspects tag:\"\n print node.tag + ': ' + node.text,\n else: # no aspects length\n if config['debug'] > 0: print \"Empty aspects tag.\"\n elif root[i].text is not None:\n if root[i].tag == \"statefile\":\n statefile = root[i].text.strip()\n statefile = common.validateFileid(statefile)\n if statefile is None: statefile = \"\"\n elif root[i].tag == \"state\":\n statename = root[i].text.strip()\n elif root[i].tag == \"name\":\n cityname = root[i].text.strip()\n dinf[root[i].tag] = [root[i].text.strip(), False]\n if config['debug'] > 2: print str(i) + \" \",\n if len(statefile) > 0: pushLoc(statefile,statename,fileid,cityname)\n return dinf", "def test_init(self, fixture_environment):\n\n # Generate city object\n city_object = cit.City(environment=fixture_environment)\n\n # Check inheritance from citydistrict object of pycity\n assert city_object._kind == 'citydistrict'", "def __import_locustfile__(filename, path):\n try:\n # Python 3 compatible\n source = importlib.machinery.SourceFileLoader(os.path.splitext(locustfile)[0], path)\n imported = source.load_module()\n except AttributeError:\n # Python 2.7 compatible\n import imp\n imported = imp.load_source(os.path.splitext(locustfile)[0], path)\n\n return imported", "def test_setting_csv_city(self):\n from django.contrib.messages import get_messages\n path = reverse(\"setting-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"title\": \"city\", \"url\": \"http://rachel.maykinmedia.nl/djangocase/city.csv\",\n \"username\": \"python-demo\", \"password\": \"claw30_bumps\", \"save\": \"on\"})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"", "def load_data(city, month, day):\n file_name = CITY_DATA.get(city)\n df = pd.read_csv(file_name)\n\n # convert \"Start Time\" column from string to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # create new column \"Month\" by extracting the month form datetime\n df['Month'] = df['Start Time'].dt.month\n\n # create new column \"Day\" by extracting the day form datetime\n df['Day'] = df['Start Time'].dt.day_name()\n df['Day'] = df['Day'].str.lower()\n\n # filter by month\n if month != \"all\":\n month_index = months.index(month) + 1\n df = df[df['Month'] == month_index]\n\n # filter by day\n if day != \"all\":\n df = df[df['Day'] == day]\n\n return df", "def load_data(city, month, day):\n if city == 'chicago':\n city_df = pd.read_csv('chicago.csv')\n elif city == 'new york city':\n city_df = pd.read_csv('new_york_city.csv')\n else:\n # city_df = pd.read_csv('washington.csv')\n print(\"else is running\")\n\n print(city_df.head())\n\n return city_df", "def import_from_file(jamsite, source='jammers.csv', fieldnames=None):\n\t# import jammers.csv\n\twith open(source) as csvfile:\n\t\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )", "def test_find_cities(self):\n\n # Given\n game_state: CarcassonneGameState = CarcassonneGameState()\n\n city_one_side_straight_road = base_tiles[\"city_top_straight_road\"].turn(3)\n city_with_road = inns_and_cathedrals_tiles[\"ic_15\"].turn(3)\n\n game_state.board = [[None for column in range(2)] for row in range(1)]\n\n game_state.board[0][0] = city_with_road\n game_state.board[0][1] = city_one_side_straight_road\n\n # When\n cities: [City] = CityUtil.find_cities(\n game_state=game_state,\n coordinate=Coordinate(0, 0)\n )\n\n # Then\n self.assertEqual(1, len(cities))\n self.assertEqual(2, len(cities[0].city_positions))\n self.assertTrue(cities[0].finished)", "def expected_city_names_fixture():\n return {'b', 'a', 'c'}", "def load_data(city, month, day):\n\n print('\\nLoading Data...\\n')\n\n path = os.getcwd().replace('\\\\', '/') + '/'\n dir_path = os.path.dirname(os.path.realpath(__file__)).replace('\\\\', '/') + '/'\n try:\n df = pd.read_csv(dir_path + CITY_DATA.get(city))\n except FileNotFoundError as e:\n sys.exit('Error loading file. Make sure that the datafiles are in the working directory.\\npath: {}\\ndir_path: {}'.format(path, dir_path))\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n df['hour'] = df['Start Time'].dt.hour\n\n if df is None:\n sys.exit('Error initializing dataframe. File was loaded successfully but load_data() failed.')\n\n return df", "def load_data(city, month, day):\n data_to_use = CITY_DATA[city]\n df = pd.read_csv(data_to_use)\n # drop rows containing NAN fields\n df2 = df.dropna()\n\n # Ensure the Start and End Time are Date\n pd.to_datetime(df2['Start Time'])\n pd.to_datetime(df2['End Time'])\n df = df2.sort_values(by='Start Time')\n\n # For each Start Time create additional columns to store year, month, day_of_week and hour\n # df['Start Year'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'year'))\n df['Start Month'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'month'))\n df['Start Day'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'day_of_week'))\n df['Start Hour'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'hour'))\n\n # filter month if month is not all\n if month.title() != 'All':\n df = df.loc[df['Start Month'] == month.title()]\n\n # filter day if day is not all\n if day.title() != 'All':\n df = df.loc[df['Start Day'] == day.title()]\n\n return df", "def load_data(city, month, day):\r\n # Make sure the city name is correct\r\n city_name = city.lower()\r\n\r\n if debug_flag:\r\n print(city_name)\r\n\r\n try:\r\n print('getting data from: ', CITY_DATA[city_name])\r\n df = pd.read_csv(CITY_DATA[city_name])\r\n except OSError as e:\r\n print(\"Error: cannot find the data files\")\r\n print(\" Please make sure they are available in the root folder\")\r\n print(\" and restart the program\\n\")\r\n finally:\r\n exit()\r\n\r\n\r\n try:\r\n # Build data frame columns:\r\n # Convert start time column to date time so we can work with it\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # Build month (num) column from \"start time\"\r\n df['Month'] = df['Start Time'].dt.month\r\n\r\n # Use start date to calculate start day (i.e. tuesday) column\r\n df['Start Day'] = df['Start Time'].dt.day_name()\r\n\r\n # build hour column from start day column\r\n df['Hour'] = df['Start Time'].dt.hour\r\n\r\n except:\r\n print (\"Unexpected error\")\r\n\r\n return df", "def create_list_csv_by_city(self, file_name, city_name):\n\n #We couldn't make it for this hackathon because we hadn't enough data and especially good data\n pass", "def internal_locations(source, include):\n with commit():\n import_internal_locations_from_json(source, include=include)", "def load_data(city, month, day):\n # Load data file into a dataframe.\n print('\\nLoading data for city = {}, month = {}, day = {}...'\n .format(city, month, day))\n df = pd.read_csv(CITY_DATA[city])\n\n # Convert the Start Time column to datetime.\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # Extract month, day of week, hour from Start Time to create new columns.\n df['Month'] = [MONTHS[int(m)] for m in df['Start Time'].dt.month]\n df['Day of Week'] = df['Start Time'].dt.weekday_name\n df['Hour'] = df['Start Time'].dt.hour\n # Create a column for the start and end station pairs.\n df['Path'] = df['Start Station'] + ' => ' + df['End Station']\n\n # Filter by month, if applicable.\n if month != 'All':\n df = df[df['Month'] == month]\n # Filter by day of week, if applicable\n if day != 'All':\n df = df[df['Day of Week'] == day]\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n # drop the unused 'Unnamed' column\n df = df.drop(\"Unnamed: 0\", axis=1)\n # convert the Start Time column to datetime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n # extract month, day of week and hour from Start Time to create new columns\n df[\"month\"] = df[\"Start Time\"].dt.month_name()\n df[\"day\"] = df[\"Start Time\"].dt.day_name()\n df[\"hour\"] = df[\"Start Time\"].dt.hour.astype(str)\n\n # filter by month if applicable\n if month != \"All\":\n # filter by month to create the new dataframe\n df = df.loc[df[\"month\"] == month]\n\n # filter by day of week if applicable\n if day != \"All\":\n # filter by day of week to create the new dataframe\n df = df.loc[df[\"day\"] == day]\n\n return df", "def load_data(city, month, day):\n \n filename = str(CITY_DATA.get(city))\n\n # load data file into a dataframe\n df = pd.read_csv(filename)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month_int = months.index(month) +1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month_int] \n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def initialize_cities() -> list:\n if get_supported_cities():\n supported_cities = get_supported_cities()\n else:\n supported_cities = []\n if os.path.exists(\"weights\"):\n for filename in os.listdir('weights'):\n if filename.endswith(\".pt\"):\n pretty_modelname = filename[:-3].replace(\"_\", \" \").title()\n supported_cities.append(pretty_modelname)\n else:\n print(\"There are no locally stored models. Try to connect the GUI with the DWH.\")\n Path(\"weights\").mkdir(mode=0o700, exist_ok=True)\n return supported_cities", "def test_pep8_conformance_city(self):\n Style = pep8.StyleGuide(quiet=True)\n result = Style.check_files(['models/city.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")", "def load_data(city, month='all', day='all'):\n\n df = pd.read_csv(CITY_DATA[city.lower()]).rename(columns={'Unnamed: 0': 'Trip Id'})\n cols = df.columns\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day of Week'] = df['Start Time'].dt.dayofweek\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # Filter by month if applicable\n if month.lower() in MONTH_LIST:\n n_month = MONTH_LIST[month.lower()]\n df = df.loc[df['Month'] == n_month]\n\n # Filter by day of the week if applicable\n if day.lower() in DAY_LIST:\n n_day = DAY_LIST[day.lower()]\n df = df.loc[df['Day of Week'] == n_day]\n\n return df, cols", "def test_get_countries(self):\n pass", "def load_from_geojson(self, filename_or_url):", "def test_city_country(self):\n formatted_name = city_country('santiago', 'chile')\n self.assertEqual(formatted_name, 'Santiago, Chile')", "def test_city_country(self):\n\t\tformatted_address = city_country('santiago', 'chile')\n\t\tself.assertEqual(formatted_address, 'Santiago, Chile')", "def test_loading(self):\n self.assertIsInstance(self.data.districts, list)", "def test_list_zr_locations(self):\n pass", "def conf_load_senzory_locations_file(fin):\n file_name = fin.readline().strip()\n if file_name == '':\n raise EnvironmentError(\"Expected file name for LOCATIONS\")\n return load_senzory_locations(file_name)", "def test_get_city(client):\n response = client.get(\"/weather/curitiba\")\n assert response.status_code == 200", "def load():\n\n # To run this command type: 'python manage.py shell'\n # 'from map.views import load; load()'\n\n mapping = {\"productivi\": \"productivi\", \"mpoly\": \"MULTIPOLYGON\"}\n map_path = os.path.abspath('gis_django/fields_test/test_fields.shp')\n lm = LayerMapping(Map, map_path, mapping, transform=False, encoding=\"iso-8859-1\")\n lm.save(verbose=True)", "def load_entities():\n # TODO dynamic look into entities folder\n return ['location']", "def test_codebook_loads_from_local_file() -> None:\n\n # dump codebook to disk\n codebook_data: List = codebook_json_data_factory()\n with tempfile.TemporaryDirectory() as directory:\n codebook_json: str = os.path.join(directory, 'simple_codebook.json')\n with open(codebook_json, 'w') as f:\n json.dump(codebook_data, f)\n\n # load the codebook\n codebook = Codebook.from_json(codebook_json)\n assert codebook.sizes[Indices.ROUND] == 2\n assert codebook.sizes[Indices.CH] == 3\n assert codebook.sizes[Features.TARGET] == 2", "def read_cities(file_name):\n stream = open(file_name)\n data = stream.readlines()\n stream.close()\n roadmap = []\n for city_info in data: # For each record in data file\n if city_info != \"\\n\": # Ignore new line characters\n city_info = clean_data(city_info) # Clean the record\n roadmap.append(city_info) # Add each cleaned record to a list\n return roadmap", "def load_data(city, month, day):\n if city == 'new york city':\n df=pd.read_csv(\"./new_york_city.csv\")\n else: \n df=pd.read_csv(\"./\" + city + \".csv\")\n df['Start Time']=pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.dayofweek\n df['hour'] =df['Start Time'].dt.hour\n if month !='all':\n df=df[df['month']==months_list[month]]\n if day != 'all':\n df=df[df['day']==days_list[day]]\n \n return df", "def import_locations(type_slug, zip_url):\n\n require('environment', provided_by=env.environments)\n locations_dir = '/tmp/fab_location_importer'\n if files.exists(locations_dir):\n sudo('rm -rf %s' % locations_dir, user=env.deploy_user)\n sudo('mkdir %s' % locations_dir, user=env.deploy_user)\n cmd = 'PYTHONPATH=%(code_root)s '\\\n 'DJANGO_SETTINGS_MODULE=openrural.local_settings '\\\n '%(virtualenv_root)s/bin/import_locations' % env\n with cd(locations_dir):\n sudo('wget -O locations.zip %s' % zip_url, user=env.deploy_user)\n sudo('unzip -d locations locations.zip', user=env.deploy_user)\n sudo(' '.join([cmd, type_slug, 'locations']), user=env.deploy_user)", "def test_load_csv_file():\n data = loader.load_csv_file(\"buildup/reference/comsol_solution/lofi/voltage.csv.bz2\")\n\n assert data.any()", "def main():\n langs = []\n\n with open(\"sql/07_populate.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in data folder\\n\\n\")\n\n langs = write_lang_city(sql)\n write_groups_diets(sql, langs)\n\n with open(\"sql/10_populate_test_data.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in moc_data folder\\n\\n\")\n write_test_data(sql)\n # This command sets postgis coordinates based on latitude and longitude\n sql.write(\"UPDATE restaurant SET geo_location = ST_POINT(latitude, longitude);\\n\")\n sql.close()", "def load_data(city, month, day):\n input_file_name = CITY_DATA.get(city)\n\n # Load the CSV file into a Pandas data frame\n df = pd.read_csv(input_file_name)\n\n # Convert the format of the existing date field to a python DateTime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n\n # Create new columns to filter on\n df[\"month\"] = df[\"Start Time\"].dt.month\n df[\"alpha_day\"] = df[\"Start Time\"].dt.weekday_name\n\n # If a month was provided, filter on it\n if month != \"all\":\n month_num = VALID_MONTHS.index(month) + 1\n df = df[df[\"month\"] == month_num]\n\n # If a day was provided, filter on it\n if day != \"all\":\n df = df[df[\"alpha_day\"] == day.title()]\n\n return df", "def load_data(city, month, day):\n \n if city == 'chicago':\n filename = 'chicago.csv'\n elif city == 'new York':\n filename = 'new_york_city.csv'\n elif city == 'washington':\n filename = 'washington.csv'\n else:\n return -1\n \n df = pd.read_csv(filename)\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n \n if month != 'all':\n df = df[df['Start Time'].dt.month == month]\n\n if day != 'all':\n df = df[df['Start Time'].dt.weekday == day]\n \n return df", "def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'", "def load_data(city, month, day):\r\n\r\n #creating data frame from csv\r\n df = pd.read_csv(CITY_DATA[city])\r\n\r\n #converting Start Time row into datetime data type\r\n df['Start Time']=pd.to_datetime(df['Start Time'])\r\n\r\n #Extracting month and week day from 'Start Time' row\r\n df['Month'] = df['Start Time'].dt.month_name()\r\n df['Day of Week'] = df['Start Time'].dt.day_name()\r\n\r\n #filter by month\r\n if month != 'All':\r\n df = df[df['Month'] == month]\r\n\r\n #filter by day\r\n if day != 'All':\r\n df = df[df['Day of Week'] == day.title()]\r\n\r\n #Returns the selected file as a dataframe (df) with relevant columns\r\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start, end Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n # Calculate the travel time per trip and add that column to data frame.\n df['Travel Time'] = df['End Time'] - df['Start Time']\n\n # extract month and day of week from Start Time to create new columns\n df['Start Hour'] = df['Start Time'].dt.hour\n df['End Hour'] = df['End Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def cities(self):\n from models.engine.file_storage import FileStorage\n from models.city import City\n fs = FileStorage.all(City)\n city_list = []\n for key, value in fs.items():\n if 'City' in key and self.id == value.state_id:\n '''Append City instances maybe fucked up here!!!'''\n city_list.append(value)\n return city_list", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['Start and End Stations'] = df['Start Station'] + ' and ' + df['End Station']\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def test_client_nationlities_list(self):\n pass", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def import_from_url(jamsite, url, fieldnames=None):\n\t# import csv, from the webz.\n\tcsvfile = fetch_csv_from_url(url)\n\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )", "def load_data(city, month, day):\n #Load data for city\n print(\"\\nCity Data..\")\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_week'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n if month != '7':\n months = ['1', '2', '3', '4', '5', '6']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_week'] == day.title()]\n return df", "def test_city_country(self):\n your_location = location_name(\"lviv\", \"ukraine\")\n self.assertEqual(your_location, \"Lviv, Ukraine\")", "def test_site_load_local_file(self):\n with patch('__builtin__.open', mock_open(read_data=self.test_config)) as mock_file:\n test_config = ef_site_config.EFSiteConfig().load_from_local_file()\n self.assertEqual(test_config[\"ENV_ACCOUNT_MAP\"][\"test\"], \"testaccount\")", "def test_retrieve_l_organization_locations(self):\n pass", "def test_import_string(self):\n assert utils.import_string('ttgn.pokedex.utils') == utils", "def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]", "def load_data(city, month, day):\r\n if city.lower() == \"chicago\" or city.lower() == \"c\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\chicago.csv'\r\n elif city.lower() == \"New York\" or city.lower() == \"new york\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\new_york_city.csv'\r\n elif city.lower() == \"Washington\" or city.lower() == \"washington\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\washington.csv'\r\n # load data file into a dataframe\r\n #df = pd.read_csv(CITY_DATA[\"city\"])\r\n df = pd.read_csv(filename)\r\n # convert the Start Time column to datetime\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # extract month and day of week from Start Time to create new columns\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n # use the index of the months list to get the corresponding int\r\n months = ['January', 'February', 'March', 'April', 'May', 'June']\r\n month = months.index(month) + 1\r\n\r\n # filter by month to create the new dataframe\r\n df = df[df['month'] == month]\r\n\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n df = df[df['day_of_week'] == day.title()]\r\n\r\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city.lower()])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month.lower()) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n return df", "def test_load_coinbasettr(self):\n with open(self.filename) as f:\n coinbasettr.CoinbaseTTRParser(csv_content=f)\n parser = coinbasettr.CoinbaseTTRParser(filename=self.filename)\n parser.cleanup()", "def test_get_imports(self):\n pass", "def get_cities() -> list:\n results = []\n with open('src/craigslist_cities.txt', 'r', encoding='utf8') as file:\n for line in file:\n results.append(line.strip())\n return results", "def load_data(city, month, day):\n # load file into dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # month and day of week from Start Time, creating new columns. Return month name, not integer.\n df['month'] = df['Start Time'].dt.strftime('%B')\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month, if applicable\n if month != 'all':\n # use index of months list to get corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by day of week, if applicable\n if day!= 'all':\n # filter by day of week to create new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def test_city_country_population(self):\n \tdublin_ireland = city_country('dublin', 'ireland', population=500000)\n \tself.assertEqual(dublin_ireland, 'Dublin, Ireland - Population 500000')", "def test_addr_city_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_city(input_val)\n self.assertEqual(output_val, self.line.addr_city)", "def test_loader_loads_from_file():\n base_json = 'tests/test_json.json'\n json_test = {\"foo\": \"bar\"}\n assert whenzat.loader(base_json) == json_test", "def load_locustfile(path):\n\n def __import_locustfile__(filename, path):\n \"\"\"\n Loads the locust file as a module, similar to performing `import`\n \"\"\"\n try:\n # Python 3 compatible\n source = importlib.machinery.SourceFileLoader(os.path.splitext(locustfile)[0], path)\n imported = source.load_module()\n except AttributeError:\n # Python 2.7 compatible\n import imp\n imported = imp.load_source(os.path.splitext(locustfile)[0], path)\n\n return imported\n\n # Start with making sure the current working dir is in the sys.path\n sys.path.insert(0, os.getcwd())\n # Get directory and locustfile name\n directory, locustfile = os.path.split(path)\n # If the directory isn't in the PYTHONPATH, add it so our import will work\n added_to_path = False\n index = None\n if directory not in sys.path:\n sys.path.insert(0, directory)\n added_to_path = True\n # If the directory IS in the PYTHONPATH, move it to the front temporarily,\n # otherwise other locustfiles -- like Locusts's own -- may scoop the intended\n # one.\n else:\n i = sys.path.index(directory)\n if i != 0:\n # Store index for later restoration\n index = i\n # Add to front, then remove from original position\n sys.path.insert(0, directory)\n del sys.path[i + 1]\n # Perform the import\n imported = __import_locustfile__(locustfile, path)\n # Remove directory from path if we added it ourselves (just to be neat)\n if added_to_path:\n del sys.path[0]\n # Put back in original index if we moved it\n if index is not None:\n sys.path.insert(index + 1, directory)\n del sys.path[0]\n # Return our two-tuple\n locusts = dict(filter(is_locust, vars(imported).items()))\n return imported.__doc__, locusts", "def testImport(self):\n success = False\n try:\n from cutlass import DiseaseMeta\n success = True\n except:\n pass\n\n self.failUnless(success)\n self.failIf(DiseaseMeta is None)", "def candidates_import_from_sample_file():\n # Load saved json from local file\n logger.info(\"Loading CandidateCampaigns from local file\")\n\n with open(\"candidate/import_data/candidates_sample.json\") as json_data:\n structured_json = json.load(json_data)\n\n return candidates_import_from_structured_json(structured_json)", "def test_DL_import_from_constructor(self):\n filepath = '7.txt'\n original_dl = flow_processing_input.DetectorsLocation(2021)\n original_dl.detectors_location_dict = createDLDataset(10).dataset\n original_dl.export_to_file(filepath)\n new_dl = flow_processing_input.DetectorsLocation(2021, filepath)\n os.remove(filepath)\n # Check if new_dl contains the same attributes as the original_dl\n self.assertTrue(new_dl == original_dl)", "def GetCountries():\n return GetDataFromCsvFile('countries.csv')", "def acquire_data(city):\n\n filename = FILENAME_TEMPLATE.format(city)\n\n text = open(filename).read()\n lines = text.splitlines()\n\n data_lines = lines[1:]\n\n return data_lines", "def import_module(self, location, name):", "def test_csv_import_hotel_success(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n file = open(\"city.csv\")\n client = Client()\n client.force_login(user)\n client.post(path, {\"title\": \"city\", \"csv_file\": file})\n file = open(\"hotel.csv\")\n r = client.post(path, {\"title\": \"hotel\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n df['month_names'] = df['Start Time'].dt.month\n df['day_names'] = df['Start Time'].dt.weekday\n df['hour'] = df['Start Time'].dt.hour\n\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n df = df[df['month_names'] == month]\n\n if day != 'all':\n day_name = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']\n day = day_name.index(day) + 1\n df = df[df['day_names'] == day]\n\n return df", "def init(city: str, country: str, list_of_streets: list):\n if check(city, country, list_of_streets):\n return get_sample_data(city, country, list_of_streets)", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['hour'] = df['Start Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def important_cities(self,\n cities_path=r\"/mnt/data/shared/important_cities.csv\"):\n\n df_cities = pd.read_csv(cities_path)\n\n for i, name in enumerate(list(df_cities.city)):\n plt.plot(df_cities.long[i], df_cities.lat[i],\n marker=self.city_marker,\n color=self.city_markercolor,\n markersize=self.city_markersize)\n\n plt.annotate(name,\n (df_cities.long[i]+0.03, df_cities.lat[i]),\n fontsize=self.fontsize)", "def load_data(city, month, day):\n #Used the practice#3 from Project solution here to convert time columns to month and weekday_name\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n #print('this is the month', month)\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def fixture_example_data():\n import_example_data()", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]" ]
[ "0.6581644", "0.64106566", "0.6348623", "0.6177727", "0.59844106", "0.58887535", "0.58847845", "0.5876422", "0.58704174", "0.5821457", "0.5782551", "0.57631767", "0.5752825", "0.573229", "0.5729731", "0.5711235", "0.56939304", "0.5689702", "0.5689702", "0.5689702", "0.5674587", "0.56686014", "0.56476337", "0.562758", "0.5616515", "0.5608479", "0.5598399", "0.55836976", "0.55825526", "0.55596155", "0.55475134", "0.5540718", "0.55366516", "0.55239", "0.5491514", "0.5483278", "0.5478354", "0.5473124", "0.5472115", "0.5465148", "0.54612213", "0.5451237", "0.5449163", "0.54454875", "0.54445845", "0.5440849", "0.5428426", "0.54201424", "0.54069257", "0.5405177", "0.54042363", "0.5403489", "0.5399256", "0.5390674", "0.5389819", "0.53881985", "0.5382219", "0.53764206", "0.5375715", "0.53688115", "0.5357307", "0.53554875", "0.5352646", "0.534352", "0.53412026", "0.53410774", "0.5328062", "0.5323141", "0.53214914", "0.53203046", "0.5319182", "0.53164935", "0.5309874", "0.530811", "0.52950376", "0.5283926", "0.52835554", "0.5281871", "0.5280941", "0.52788985", "0.52780837", "0.52769756", "0.5273376", "0.5273313", "0.52683693", "0.5267489", "0.5266297", "0.5265473", "0.52585655", "0.5257524", "0.5254223", "0.5254055", "0.52523583", "0.5251489", "0.5247761", "0.5245753", "0.5245353", "0.5242367", "0.5242367", "0.5240193" ]
0.656946
1
Tests the import from local file for hotels works fine
def test_csv_import_hotel_success(self): from django.contrib.messages import get_messages path = reverse("import-csv") user = mixer.blend(User, is_staff=True, is_superuser=True) file = open("city.csv") client = Client() client.force_login(user) client.post(path, {"title": "city", "csv_file": file}) file = open("hotel.csv") r = client.post(path, {"title": "hotel", "csv_file": file}) messages = list(get_messages(r.wsgi_request)) assert r.status_code == 200 assert len(messages) == 1 assert str(messages[0]) == "Successfully Uploaded!"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_imports(self):\n\n # DEVICES\n from surrortg.devices.udp import ( # noqa:F401\n UdpActuator,\n UdpBot,\n UdpCar,\n UdpInput,\n )\n from surrortg.devices.udp.udp_protocol import ( # noqa:F811,F401\n open_remote_endpoint,\n open_local_endpoint,\n open_remote_endpoint,\n )\n\n # INPUTS\n from surrortg.inputs import ( # noqa:F401\n Input,\n Switch,\n DelayedSwitch,\n Joystick,\n Directions,\n LinearActuator,\n )\n\n # NETWORK\n from surrortg.network import ( # noqa:F401\n SocketHandler,\n MessageRouter,\n MultiSeatMessageRouter,\n )\n\n # ROOT\n from surrortg import Game, GameIO # noqa:F401", "def test_get_imports(self):\n pass", "def import_module(self, location, name):", "def test_import_string(self):\n assert utils.import_string('ttgn.pokedex.utils') == utils", "def __import_locustfile__(filename, path):\n try:\n # Python 3 compatible\n source = importlib.machinery.SourceFileLoader(os.path.splitext(locustfile)[0], path)\n imported = source.load_module()\n except AttributeError:\n # Python 2.7 compatible\n import imp\n imported = imp.load_source(os.path.splitext(locustfile)[0], path)\n\n return imported", "def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]", "def test_import_system_asset(self):\n pass", "def importer():\n pass", "def test_import():\n import pyapp", "def test_import():\n import chrisbrake\n assert chrisbrake", "def test_import_test_asset(self):\n pass", "def test_imports():\n from .context import readersender # noqa: F401", "def test_molecool_imported():\n assert \"molecool\" in sys.modules", "def test_imports():\n from tg_utils import admin\n from tg_utils import checks\n from tg_utils import compressor_filters\n from tg_utils import email\n from tg_utils import files\n from tg_utils import hashmodels\n from tg_utils import lock\n from tg_utils import managers\n from tg_utils import mixins\n from tg_utils import models\n from tg_utils import profiling\n from tg_utils import signals\n from tg_utils import uuid\n from tg_utils import decorators", "def test_importable():\n root_path = os.path.dirname(MY_DIRECTORY)\n\n for version in versioning.get_all_versions():\n v = version.label.replace(\".\", \"_\")\n path = os.path.join(root_path, v)\n module_names = [m[:-3] for m in os.listdir(path) if m.endswith(\".py\")]\n for name in module_names:\n m = importlib.import_module(\".\".join([\"kuber\", v, name]))\n assert m is not None, f\"Expected kuber.{v}.{m} to be importable.\"", "def test_CL13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL13\", test]", "def test_NKT13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import NKT13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.NKT13\", test]", "def test_import_software_asset(self):\n pass", "def test_dag_load(self):\n\n with ObservatoryEnvironment().create():\n dag_file = os.path.join(module_file_path(\"academic_observatory_workflows.dags\"), \"openalex_telescope.py\")\n self.assert_dag_load(\"openalex\", dag_file)", "def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]", "def test_import_local_class(self):\n import_function(determine_package(LocalClass))\n assert f() == \"My name is f.\"", "def test_import_process(self):\r\n good_file = self._get_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._chrome_data_test()", "def load_enroller(self, enroller_file):\n self._test(enroller_file)", "def _import(file_path):\n proxy_factory.import_proxies(open(file_path, 'r'))", "def test_import(self, game=\"SuperMarioKart-Snes\"):\n self.assertTrue(game in retro.data.list_games())", "def test_regressions_imports(self):\n issue = {\n \"number\": \"main/main\",\n \"contract\": \"C\",\n \"txlimit\": 1,\n \"in_directory\": \"imports_issue\",\n }\n self._simple_cli_run(\n f'{issue[\"number\"]}.sol',\n contract=issue[\"contract\"],\n tx_limit=issue[\"txlimit\"],\n in_directory=issue.get(\"in_directory\"),\n )", "def testPynocleImportsPynocle(self):\r\n self.buildTempDirs()\r\n expected = os.path.join(THISDIR, '__init__')\r\n self.assertEqual(expected, modulefinder.get_module_filename('pynocle', __file__))", "def test_import(self):\n try:\n import gtcal\n except ImportError:\n self.fail(\"Could not import gtcal\")", "def fixture_example_data():\n import_example_data()", "def test_S16_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import S16 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.S16\", test]", "def testPreProcessedImport(self):\n a = 'a.mojom'\n self.WriteFile(a, \"\"\"\\\n module a;\n struct Bar {};\"\"\")\n self.ParseMojoms([a])\n\n b = 'b.mojom'\n self.WriteFile(\n b, \"\"\"\\\n module b;\n import \"a.mojom\";\n struct Foo { a.Bar bar; };\"\"\")\n self.ParseMojoms([b])", "def test_imports():\n assert False", "def test_xchemOT_imported():\n assert \"xchemOT\" in sys.modules", "def test_dag_load(self):\n\n with ObservatoryEnvironment().create():\n dag_file = os.path.join(module_file_path(\"academic_observatory_workflows.dags\"), \"unpaywall_telescope.py\")\n self.assert_dag_load(\"unpaywall\", dag_file)", "def test_import_process(self):\r\n good_file = self._get_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._firefox_data_test()", "def test_load(self):\n command = constituencies.Command()\n command.handle('load', silent=True)", "def load_locustfile(path):\n\n def __import_locustfile__(filename, path):\n \"\"\"\n Loads the locust file as a module, similar to performing `import`\n \"\"\"\n try:\n # Python 3 compatible\n source = importlib.machinery.SourceFileLoader(os.path.splitext(locustfile)[0], path)\n imported = source.load_module()\n except AttributeError:\n # Python 2.7 compatible\n import imp\n imported = imp.load_source(os.path.splitext(locustfile)[0], path)\n\n return imported\n\n # Start with making sure the current working dir is in the sys.path\n sys.path.insert(0, os.getcwd())\n # Get directory and locustfile name\n directory, locustfile = os.path.split(path)\n # If the directory isn't in the PYTHONPATH, add it so our import will work\n added_to_path = False\n index = None\n if directory not in sys.path:\n sys.path.insert(0, directory)\n added_to_path = True\n # If the directory IS in the PYTHONPATH, move it to the front temporarily,\n # otherwise other locustfiles -- like Locusts's own -- may scoop the intended\n # one.\n else:\n i = sys.path.index(directory)\n if i != 0:\n # Store index for later restoration\n index = i\n # Add to front, then remove from original position\n sys.path.insert(0, directory)\n del sys.path[i + 1]\n # Perform the import\n imported = __import_locustfile__(locustfile, path)\n # Remove directory from path if we added it ourselves (just to be neat)\n if added_to_path:\n del sys.path[0]\n # Put back in original index if we moved it\n if index is not None:\n sys.path.insert(index + 1, directory)\n del sys.path[0]\n # Return our two-tuple\n locusts = dict(filter(is_locust, vars(imported).items()))\n return imported.__doc__, locusts", "def test_path_to_location(self):\r\n\r\n print \"Starting import\"\r\n modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])\r\n print \"finished import\"\r\n\r\n check_path_to_location(modulestore)", "def test_path_to_location(self):\n\n print \"Starting import\"\n modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])\n print \"finished import\"\n\n check_path_to_location(modulestore)", "def test_import():\n assert tfio is not None", "def testImport(self):\n success = False\n try:\n from cutlass import DiseaseMeta\n success = True\n except:\n pass\n\n self.failUnless(success)\n self.failIf(DiseaseMeta is None)", "def test_import_process(self):\r\n good_file = self._get_google_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._google_data_test()", "def test_imports():\n import pylablib.core.fileio.binio\n import pylablib.core.fileio.datafile\n import pylablib.core.fileio.dict_entry\n import pylablib.core.fileio.loadfile\n import pylablib.core.fileio.location\n import pylablib.core.fileio.logfile\n import pylablib.core.fileio.parse_csv\n import pylablib.core.fileio.savefile", "def test_import_allows_multiple_modules_successful(self):\n # Deliberately using modules that will already be imported to avoid side effects.\n feature = LazyImportTester([\"site\", \"sys\"])\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n self.assertTrue(feature)\n check.assert_called_once()", "def load(path):\n pass", "def test_mmelemental_imported():\n import sys\n\n assert \"mmelemental\" in sys.modules", "def test():\n root_path = os.path.dirname(os.path.realpath(__file__))\n test_path = os.path.join(root_path, 'test_files')\n with open(os.path.join(test_path, 'hexagons0.geojson')) as f:\n hexagons_old = load(f)\n with open(os.path.join(test_path, 'hexagons1.geojson')) as f:\n hexagons_new = load(f) \n return hexagons_new, hexagons_old", "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def testImport(self):\n a = 'a.mojom'\n b = 'b.mojom'\n self.WriteFile(\n a, \"\"\"\\\n module a;\n import \"b.mojom\";\n struct Foo { b.Bar bar; };\"\"\")\n self.WriteFile(b, \"\"\"\\\n module b;\n struct Bar {};\"\"\")\n self.ParseMojoms([a, b])\n\n ma = self.LoadModule(a)\n mb = self.LoadModule(b)\n self.assertEqual('a.mojom', ma.path)\n self.assertEqual('b.mojom', mb.path)\n self.assertEqual(1, len(ma.imports))\n self.assertEqual(mb, ma.imports[0])", "def test_loader_loads_from_file():\n base_json = 'tests/test_json.json'\n json_test = {\"foo\": \"bar\"}\n assert whenzat.loader(base_json) == json_test", "def test_import_infra(self):\n project = Project.create()\n # Read an engine and check\n infra = import_infra(\"A320.xml\", \"engine\")\n self.assertEqual(len(infra.engines), 1)\n engine = infra.engines[0]\n self.assertEqual(engine.name, \"Machine 0\")\n self.assertEqual(engine.hauteur, 0.0)\n # Local frame:\n self.assertEqual(engine.position.x, 0.0)\n self.assertEqual(engine.position.y, 0.0)\n self.assertEqual(engine.position.z, 0.0)\n\n # Read a building and check\n infra = import_infra(\"Building.xml\", \"building\")\n self.assertEqual(len(infra.buildings), 1)\n building = infra.buildings[0]\n self.assertEqual(building.name, \"MyBuilding\")\n self.assertEqual(building.hauteur, 0.0)\n # Local frame:\n self.assertEqual(building.position.x, 0.0)\n self.assertEqual(building.position.y, 0.0)\n self.assertEqual(building.position.z, 0.0)\n\n # Check a no radiant building is refused:\n try:\n infra = import_infra(\"Building_no_radiant.xml\", \"building\")\n except:\n print(\"Ok, non radiant building is refused as expected.\")\n else:\n print(\"Non radiant building should be refused.\")\n sys.exit(-1)", "def test_documentation_popxl_code_loading(self):\n filename = \"code_loading.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_module_imports(self):\n apps = [\n 'customers',\n 'customers.migrations',\n 'customers.management',\n 'customers.management.commands',\n 'customers.management.commands.load_customers_to_redis',\n 'customers.forms',\n 'customers.admin',\n 'customers.models',\n 'customers.urls',\n 'customers.views',\n ]\n for a in apps:\n self.assertTrue(module_exists(a))", "def test_codebook_loads_from_local_file() -> None:\n\n # dump codebook to disk\n codebook_data: List = codebook_json_data_factory()\n with tempfile.TemporaryDirectory() as directory:\n codebook_json: str = os.path.join(directory, 'simple_codebook.json')\n with open(codebook_json, 'w') as f:\n json.dump(codebook_data, f)\n\n # load the codebook\n codebook = Codebook.from_json(codebook_json)\n assert codebook.sizes[Indices.ROUND] == 2\n assert codebook.sizes[Indices.CH] == 3\n assert codebook.sizes[Features.TARGET] == 2", "def import_realia(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('git pull')\n run('cd import_scripts;../bin/python import_realia.py load_fixture')\n run('bin/django update_index dasa.Realia')", "def import_and_index_resolutions():\n sys.path.append(os.path.abspath('import_scripts'))\n import import_resolutioninstance\n import_resolutioninstance.ResolutionImporter().load_items()", "def test_import():\n assert hasattr(waves, 'wave_number')", "def test_import(self):\n self.assertTrue(NagiosPerfdataCollector)", "def test_config_imports_file(config_file_location):\n\n assert Config(str(config_file_location)).file_location == \"E:\\\\A\\\\iMaterialistFiles\\\\\"", "def test_items_are_mounted(self):\n response2 = self.client.get(\"/importer/design26/models.py\")\n self.assertEquals(response2.status_code, 200)", "def test_simple_import(barred_tac_list_importer, logger, db_conn):\n expect_success(barred_tac_list_importer, 6, db_conn, logger)", "def test_import_local_function(self):\n import_function(determine_package(f))\n assert f() == \"My name is f.\"", "def available_importer(**kwargs):\n return LazyImportTester(\"site\", **kwargs)", "def test_file_loader_module_import_fail(\n mock_empty_os_environ, monkeypatch, file_str, filename, mock_module, tmpdir\n):\n # Check that without mocking everything is file:\n path = tmpdir / filename\n with open(tmpdir / filename, \"w\") as f:\n f.write(file_str)\n\n climate = core.Climate(prefix=\"TEST_STUFF\", settings_files=[str(path)])\n climate.update()\n # Now fake not having imported yaml\n monkeypatch.setattr(mock_module, None)\n climate = core.Climate(prefix=\"TEST_STUFF\", settings_files=[str(path)])\n with pytest.raises(ImportError):\n climate.update()", "def test_dag_load(self):\n # Run tests both for telescope with file suffixes and without\n for accounts in [None, {\"accounts\": [\"foo\", \"bar\"]}]:\n with self.subTest(accounts=accounts):\n env = ObservatoryEnvironment(\n self.project_id, self.data_location, api_host=self.host, api_port=self.api_port\n )\n with env.create():\n # Add Observatory API connection\n conn = Connection(\n conn_id=AirflowConns.OBSERVATORY_API, uri=f\"http://:password@{self.host}:{self.api_port}\"\n )\n env.add_connection(conn)\n\n # Add a Google Books telescope\n dt = pendulum.now(\"UTC\")\n telescope_type = orm.TelescopeType(\n name=\"Google Books Telescope\", type_id=TelescopeTypes.google_books, created=dt, modified=dt\n )\n env.api_session.add(telescope_type)\n organisation = orm.Organisation(name=\"anu-press\", created=dt, modified=dt)\n env.api_session.add(organisation)\n telescope = orm.Telescope(\n name=\"anu-press Google Books Telescope\",\n telescope_type=telescope_type,\n organisation=organisation,\n modified=dt,\n created=dt,\n extra=accounts,\n )\n env.api_session.add(telescope)\n env.api_session.commit()\n\n dag_file = os.path.join(module_file_path(\"oaebu_workflows.dags\"), \"google_books_telescope.py\")\n self.assert_dag_load(\"google_books_anu-press\", dag_file)", "def test_definition_loading(self):\r\n\r\n modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'two_toys'])\r\n\r\n location = Location(\"edX\", \"toy\", \"2012_Fall\", \"video\", \"Welcome\", None)\r\n toy_video = modulestore.get_item(location)\r\n location_two = Location(\"edX\", \"toy\", \"TT_2012_Fall\", \"video\", \"Welcome\", None)\r\n two_toy_video = modulestore.get_item(location_two)\r\n self.assertEqual(toy_video.youtube_id_1_0, \"p2Q6BrNhdh8\")\r\n self.assertEqual(two_toy_video.youtube_id_1_0, \"p2Q6BrNhdh9\")", "def test_load_gtis(self):\n fits_file = os.path.join(self.datadir, 'monol_testA.evt')\n hen.io.load_gtis(fits_file)", "def test_include():\n from bst import BST", "def test_yaml_tech_file(self) -> None:\n tech_yaml = \"\"\"\nname: My Technology Library\ninstalls:\n - path: test\n base var: \"\" # means relative to tech dir\nlibraries: []\n \"\"\"\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n\n tech_yaml_filename = os.path.join(tech_dir, \"dummy28.tech.yml\")\n with open(tech_yaml_filename, \"w\") as f: # pylint: disable=invalid-name\n f.write(tech_yaml)\n sys.path.append(tech_dir_base)\n tech_opt = hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir)\n self.assertFalse(tech_opt is None, \"Unable to load technology\")\n\n # Cleanup\n shutil.rmtree(tech_dir_base)", "def test_import_process(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()", "def _import_elmo():\n\n elmo = hub.Module('https://storage.googleapis.com/az-nlp/elmo_ru-news_wmt11-16_1.5M_steps.tar.gz',\n trainable=False) # news\n # elmo = hub.Module('https://storage.googleapis.com/az-nlp/elmo_ru-twitter_2013-01_2018-04_600k_steps.tar.gz',\n # trainable=False) # twitter\n print('❤️ ❤️ ❤️ DONE (re)importing Tensorflow hub.Module ')\n print('Tensorflow version is', tf.__version__)\n\n return elmo", "def load_rentedout():", "def load(self, path):\n pass", "def load(self, path):\n pass", "def testAbsolutePackageImport(self):\r\n self.buildTempDirs()\r\n expected = os.path.join(self.temp_fake_aa, '__init__')\r\n aaeggs = os.path.join(self.temp_fake_aa, 'eggs.py')\r\n self.assertEqual(expected, modulefinder.get_module_filename('aa', aaeggs))", "def test_imports():\n import sys\n import src\n assert 'sklearn.feature_extraction' not in sys.modules.keys()", "def load_test_file():\n hou.hipFile.load(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"data\",\n \"test_api_integration.hipnc\",\n ),\n ignore_load_warnings=True,\n )\n\n yield\n\n hou.hipFile.clear()", "def test_import_app(self):\n dirs.attempt_app_import(\"mediabrute\")\n with self.assertRaises(ImproperlyConfigured):\n dirs.attempt_app_import(\"NONONONONO\")", "def import_file(name: Text, file_path: Text):\n\n spec = spec_from_file_location(f\"luh3417.{name}\", file_path)\n module = module_from_spec(spec)\n spec.loader.exec_module(module)\n\n return module", "def test_path_issues(Script):\n source = '''from datetime import '''\n assert Script(source).completions()", "def test_dupe_imports(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()", "def _test_import_local_class(self): # TODO\n module = determine_package(LocalClass)\n name = f.__name__\n\n function = {\"module\": module, \"name\": name}\n\n import_function(function)\n assert f() == \"My name is f.\"", "def test_component_resolution_different_file():\n\n assert snippet_eval(ComponentSnippet(modulea.ComponentResolutionViaModule())) == \"hi from module b\\n\"", "def test_module(self):\n pass", "def import_all():\n import theory", "def testUnparsedImport(self):\n a = 'a.mojom'\n b = 'b.mojom'\n self.WriteFile(a, \"\"\"\\\n module a;\n struct Bar {};\"\"\")\n self.WriteFile(\n b, \"\"\"\\\n module b;\n import \"a.mojom\";\n struct Foo { a.Bar bar; };\"\"\")\n\n # a.mojom has not been parsed yet, so its import will fail when processing\n # b.mojom here.\n with self.assertRaisesRegexp(ValueError, \"does not exist\"):\n self.ParseMojoms([b])", "def test_taskfile_import(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n randpath = choice(pypath)\n\n assert modpath not in sys.modules\n assert all(not p.startswith(modpath) for p in sys.modules)\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n taskfile = import_module(randpath)\n\n expected = set(pypath) | set([modpath])\n result = set(p for p in sys.modules if p.startswith(modpath))\n\n assert modpath in sys.modules\n assert result == expected\n assert taskfile.TEST == randpath", "def setUp(self):\n\n def import_hook(name, *args, **kwargs):\n if name == 'actstream':\n raise ImportError('test case module import failure')\n else:\n return self.original_imports(name, *args, **kwargs)\n\n self.original_imports = builtins.__import__\n builtins.__import__ = import_hook", "def test_import_local_method(self):\n import_function(determine_package(LocalClass().foo_method))\n assert f() == \"My name is f.\"", "def testImport(self):\n success = False\n try:\n from cutlass import VisitAttribute\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(VisitAttribute is None)", "def _import_bh_(self):", "def load_projector(self, projector_file):\n self._test(projector_file)", "def test_WW95_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import WW95 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.WW95\", test]", "def _run_online_test(*args, **kwargs):\n import responses # noqa: F401", "def test_site_load_local_file(self):\n with patch('__builtin__.open', mock_open(read_data=self.test_config)) as mock_file:\n test_config = ef_site_config.EFSiteConfig().load_from_local_file()\n self.assertEqual(test_config[\"ENV_ACCOUNT_MAP\"][\"test\"], \"testaccount\")", "def test_load_file(self):\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test1_\"+self.loader.version))\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/pl_test2_\"+self.loader.version))", "def test_twice_dependent_object_import(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass" ]
[ "0.6580002", "0.62604517", "0.62208253", "0.6155901", "0.60002035", "0.5990556", "0.593152", "0.5869385", "0.5839019", "0.5812476", "0.58026296", "0.5798949", "0.5780236", "0.5765925", "0.5747201", "0.57247925", "0.5714153", "0.5711708", "0.5696462", "0.56960547", "0.5638083", "0.5624456", "0.56218964", "0.56194836", "0.5590037", "0.55891156", "0.55769867", "0.5551508", "0.55471265", "0.55455947", "0.55436987", "0.5521529", "0.5505507", "0.5490942", "0.54891473", "0.5484598", "0.54734796", "0.5466461", "0.54658276", "0.54560953", "0.5452915", "0.54498786", "0.54332626", "0.54304063", "0.54273325", "0.5422316", "0.5420057", "0.5409216", "0.5401762", "0.53992695", "0.538394", "0.53512895", "0.53502256", "0.5348773", "0.5344798", "0.5334891", "0.53344065", "0.53310543", "0.53277767", "0.5320692", "0.53046227", "0.5298293", "0.52935886", "0.5292214", "0.52909625", "0.52905893", "0.52830267", "0.52654326", "0.52621114", "0.5259177", "0.5257864", "0.52574146", "0.5247484", "0.5247484", "0.5237203", "0.52209204", "0.5213668", "0.5210797", "0.52095586", "0.52065474", "0.52041996", "0.5200471", "0.5192984", "0.51866525", "0.51854587", "0.51820827", "0.5181874", "0.5178725", "0.5178192", "0.51643753", "0.51634544", "0.51618034", "0.5160012", "0.515915", "0.5157045", "0.5146955", "0.5143446", "0.5138049", "0.5138049", "0.5138049", "0.5138049" ]
0.0
-1
Tests hotels which their cities aren't in database can not get imported form local file
def test_csv_import_hotel_fail(self): from django.contrib.messages import get_messages path = reverse("import-csv") user = mixer.blend(User, is_staff=True, is_superuser=True) client = Client() client.force_login(user) file = open("hotel.csv") r = client.post(path, {"title": "hotel", "csv_file": file}) messages = list(get_messages(r.wsgi_request)) assert r.status_code == 200 assert len(messages) >= 1 for message in messages: assert "can not import" in str(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_unknown_countries(self):\n # Currently, there are no Countries or Regions\n self.assertEqual(Country.objects.count(), 0)\n self.assertEqual(Region.objects.count(), 0)\n\n # Call the command with countries that are not recognized by the iso3166 library\n self.call_command(filename='power_plant_import/tests/data/unknown_countries.csv')\n\n # No Countries or Regions were created during the test\n self.assertEqual(Country.objects.count(), 0)\n self.assertEqual(Region.objects.count(), 0)", "async def test_get_location_data(self):\n for city_name in ['dublin', 'London', 'Copenhagen']:\n response = await self.http_client.fetch(request=HTTPRequest(\n url=self.get_url(path=\"/location-data/{}\".format(city_name)),\n method='GET'\n ))\n self.assertEqual(response.code, HTTPStatus.OK)\n self.check_city_response(response, city_name.lower())", "def load_random_cities(data):\n cities = list(set([elem['name'] for elem in data]))\n city_objects = [City(data=city) for city in cities]\n City.objects.bulk_create(city_objects)", "def GetWorldCities():\n return GetDataFromCsvFile('world_cities.csv')", "async def test_get_bad_location_data(self):\n city_name = 'notarealplace'\n response = await self.http_client.fetch(request=HTTPRequest(\n url=self.get_url(path=\"/location-data/{}\".format(city_name)),\n method='GET'\n ), raise_error=False)\n self.assertEqual(response.code, HTTPStatus.BAD_REQUEST, \"Incorrect response for an unknown city\")", "def test_create_hotel(self):\n amsterdam = City.objects.get(name=\"Amsterdam\")\n ibis = Hotel.objects.get(name=\"Ibis\")\n\n self.assertEqual(ibis.city, amsterdam)\n self.assertEqual(ibis.code, \"AMS01\")\n self.assertEqual(ibis.name, \"Ibis\")", "def test_csv_import_city(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n file = open(\"city.csv\")\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"title\": \"city\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"", "def _import_insee_city(self, cr, uid, ids, data_dir, context=None):\n if context is None:\n context = {}\n filepath = os.path.abspath(os.path.join(data_dir, 'comsimp2011.csv'))\n city_obj = self.pool.get('insee.city')\n department_obj = self.pool.get('insee.department')\n with open(filepath, 'rb') as cityfile:\n reader = csv.DictReader(cityfile)\n for row in reader:\n args = [('dep', '=', row['DEP'])]\n department_ids = department_obj.search(cr, uid, args)\n department_id = department_ids and department_ids[0] or None\n ncc = row['ARTMAJ'] and row['ARTMAJ'].strip(\"()\") + \\\n row['NCC'] or row['NCC']\n nccenr = row['ARTMIN'] and row['ARTMIN'].strip(\"()\") + \\\n row['NCCENR'] or row['NCCENR']\n values = {\n 'cdc': row['CDC'],\n 'cheflieu': row['CHEFLIEU'],\n 'reg': row['REG'],\n 'dep': row['DEP'],\n 'department_id': department_id,\n 'com': row['COM'],\n 'ar': row['AR'],\n 'ct': row['CT'],\n 'tncc': row['TNCC'],\n 'artmaj': row['ARTMAJ'],\n 'ncc': ncc,\n 'artmin': row['ARTMIN'],\n 'nccenr': nccenr,\n }\n city_obj.create(cr, uid, values, context=context)", "def load_restaurants(city):\n session = connect_db()\n # Start offset at 0 to return the first 20 results from Yelp API request\n offset = 0\n\n # Get total number of restaurants for this city\n bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n result_len = 20\n \n # Get all restaurants for a city and load each restaurant into the database\n # Note: Yelp has a limitation of 1000 for accessible results, so get total results\n # if less than 1000 or get only 1000 results back even if there should be more\n while (1000 > offset) and (result_len==20):\n results = search(bearer_token, 'restaurant', city, offset)\n result_len = len(results['businesses'])\n\n # API response returns a SearchResponse object with accessible attributes\n # response.businesses returns a list of business objects with further attributes\n for business in results['businesses']:\n biz = get_business(bearer_token, business['id'])\n try:\n table.insert(biz)\n except DuplicateKeyError:\n print 'DUPS!'\n\n hour_start_monday = None\n hour_end_monday = None \n hour_start_tuesday = None\n hour_end_tuesday = None\n hour_start_wednesday = None\n hour_end_wednesday = None \n hour_start_thursday = None\n hour_end_thursday = None \n hour_start_friday = None\n hour_end_friday = None \n hour_start_saturday = None\n hour_end_saturday = None \n hour_start_sunday = None\n hour_end_sunday = None\n try:\n yelp_price_level = biz['price']\n except:\n yelp_price_level = None\n try:\n hours_type = biz['hours'][0]['hours_type']\n is_open_now = biz['hours'][0]['is_open_now']\n for item in biz['hours'][0]['open']:\n if item['day'] == 1:\n hour_start_tuesday = item['start']\n hour_end_tuesday = item['end']\n elif item['day'] == 0:\n hour_start_monday = item['start']\n hour_end_monday = item['end']\n elif item['day'] == 2:\n hour_start_wednesday = item['start']\n hour_end_wednesday = item['end']\n elif item['day'] == 3:\n hour_start_thursday = item['start']\n hour_end_thursday = item['end']\n elif item['day'] == 4:\n hour_start_friday = item['start']\n hour_end_friday = item['end']\n elif item['day'] == 5:\n hour_start_saturday = item['start']\n hour_end_saturday = item['end']\n elif item['day'] == 6:\n hour_start_sunday = item['start']\n hour_end_sunday = item['end']\n except:\n hours_type = None\n is_open_now = None\n hour_start_monday = None\n hour_end_monday = None \n hour_start_tuesday = None\n hour_end_tuesday = None\n hour_start_wednesday = None\n hour_end_wednesday = None \n hour_start_thursday = None\n hour_end_thursday = None \n hour_start_friday = None\n hour_end_friday = None \n hour_start_saturday = None\n hour_end_saturday = None \n hour_start_sunday = None\n hour_end_sunday = None\n restaurant = Restaurant(\n yelp_id = business['id'],\n yelp_rating = biz['rating'],\n yelp_review_count = biz['review_count'],\n name = biz['name'],\n phone = biz['phone'],\n yelp_url = biz['url'],\n yelp_price_level = yelp_price_level,\n latitude = biz['coordinates']['latitude'],\n longitude = biz['coordinates']['longitude'],\n hours_type = hours_type,\n is_open_now = is_open_now,\n hour_start_monday = hour_start_monday,\n hour_end_monday = hour_end_monday,\n hour_start_tuesday = hour_start_tuesday,\n hour_end_tuesday = hour_end_tuesday,\n hour_start_wednesday = hour_start_wednesday,\n hour_end_wednesday = hour_end_wednesday, \n hour_start_thursday = hour_start_thursday,\n hour_end_thursday = hour_end_thursday, \n hour_start_friday = hour_start_friday,\n hour_end_friday = hour_end_friday, \n hour_start_saturday = hour_start_saturday,\n hour_end_saturday = hour_end_saturday, \n hour_start_sunday = hour_start_sunday,\n hour_end_sunday = hour_end_sunday, \n is_closed = biz['is_closed'],\n categories = biz['categories'][0]['alias'],\n display_phone = biz['display_phone'],\n location = ' '.join(biz['location']['display_address']),\n location_city = biz['location']['city'],\n location_state = biz['location']['state'],\n location_zip_code = biz['location']['zip_code'],\n location_city_id = biz['location']['city'] + ', ' + biz['location']['state'])\n session.merge(restaurant)\n # Yelp returns only 20 results each time, so need to offset by 20 while iterating\n offset += 20\n print('current offset: ', offset)\n session.commit()", "def test_loading(self):\n self.assertIsInstance(self.data.districts, list)", "def load_venues():\n\n print('load_venues')\n\n Venue.query.delete()\n\n for row in open(\"seed_data/venues.csv\"):\n row = row.rstrip()\n subcategory, \\\n created_by, \\\n title, \\\n addr_1, \\\n addr_2, \\\n city, \\\n postal_code, \\\n state = row.split(',')\n\n cat_sub = Category_Subcategory.query.filter_by(name=subcategory).first()\n\n vnu = Venue(subcategory_id=cat_sub.id,\n created_by=created_by,\n name=title,\n addr_1=addr_1,\n addr_2=addr_2,\n city=city,\n postal_code=postal_code,\n state=state)\n\n db.session.add(vnu)\n\n db.session.commit()", "def audit_city(osmfile):\r\n suburb_list_wrong = defaultdict(set)\r\n city_file = open(osmfile, encoding=\"utf8\")\r\n \r\n for event, elem in ET.iterparse(city_file, events=(\"start\",)):\r\n \r\n if elem.tag == \"node\" or elem.tag == \"way\":\r\n \r\n for tag in elem.iter(\"tag\"):\r\n \r\n if tag.attrib['k'] == 'addr:city':\r\n \r\n city = tag.attrib['v']\r\n # province = re.sub(\" \", \"\", tag.attrib['v'].strip())\r\n if city not in expected_suburb:\r\n \r\n suburb_list_wrong[city].add(city)\r\n \r\n city_file.close()\r\n return suburb_list_wrong", "def cities(self):\n from models.engine.file_storage import FileStorage\n from models.city import City\n fs = FileStorage.all(City)\n city_list = []\n for key, value in fs.items():\n if 'City' in key and self.id == value.state_id:\n '''Append City instances maybe fucked up here!!!'''\n city_list.append(value)\n return city_list", "def load_data(city, month, day):\n if city == 'new york city':\n df=pd.read_csv(\"./new_york_city.csv\")\n else: \n df=pd.read_csv(\"./\" + city + \".csv\")\n df['Start Time']=pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.dayofweek\n df['hour'] =df['Start Time'].dt.hour\n if month !='all':\n df=df[df['month']==months_list[month]]\n if day != 'all':\n df=df[df['day']==days_list[day]]\n \n return df", "def load_data(city, month, day):\n if city == 'chicago':\n city_df = pd.read_csv('chicago.csv')\n elif city == 'new york city':\n city_df = pd.read_csv('new_york_city.csv')\n else:\n # city_df = pd.read_csv('washington.csv')\n print(\"else is running\")\n\n print(city_df.head())\n\n return city_df", "def GetUsCities():\n return GetDataFromCsvFile('us_cities.csv')", "def read_locations(db, openfile):\n pass", "def load_data(city, month, day ,city_num, month_num, day_num):\r\n try:\r\n df = pd.read_csv(CITY_DATA[city])\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n df['End Time'] = pd.to_datetime(df['End Time'])\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n df['hour'] = df['Start Time'].dt.hour\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n df = df[df['month'] == month_num]\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n\r\n df = df[df['day_of_week'].str.contains(day.title())]\r\n return df\r\n except Exception as e:\r\n print('An exception has been occurred during loading data: {}'.format(e))", "def check_all_type(name):\n all_type = set()\n for city in ['beijing', 'tianjing', 'guangzhou']:\n with open(\n exp_data_path + os.sep + 'weather' + os.sep + city + os.sep + '{}_{}.csv'.format(city, name)) as f:\n reader = csv.reader(f)\n for line in reader:\n all_type.add(line[1].replace(\" \", \"\"))\n print(all_type)", "def load_data(city, month, day):\n data_to_use = CITY_DATA[city]\n df = pd.read_csv(data_to_use)\n # drop rows containing NAN fields\n df2 = df.dropna()\n\n # Ensure the Start and End Time are Date\n pd.to_datetime(df2['Start Time'])\n pd.to_datetime(df2['End Time'])\n df = df2.sort_values(by='Start Time')\n\n # For each Start Time create additional columns to store year, month, day_of_week and hour\n # df['Start Year'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'year'))\n df['Start Month'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'month'))\n df['Start Day'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'day_of_week'))\n df['Start Hour'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'hour'))\n\n # filter month if month is not all\n if month.title() != 'All':\n df = df.loc[df['Start Month'] == month.title()]\n\n # filter day if day is not all\n if day.title() != 'All':\n df = df.loc[df['Start Day'] == day.title()]\n\n return df", "def test_client_nationlities_list(self):\n pass", "def load_data(city, month, day):\n \n if city == 'chicago':\n filename = 'chicago.csv'\n elif city == 'new York':\n filename = 'new_york_city.csv'\n elif city == 'washington':\n filename = 'washington.csv'\n else:\n return -1\n \n df = pd.read_csv(filename)\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n \n if month != 'all':\n df = df[df['Start Time'].dt.month == month]\n\n if day != 'all':\n df = df[df['Start Time'].dt.weekday == day]\n \n return df", "def load_data(city, month, day):\n \n filename = str(CITY_DATA.get(city))\n\n # load data file into a dataframe\n df = pd.read_csv(filename)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month_int = months.index(month) +1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month_int] \n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\r\n if city.lower() == \"chicago\" or city.lower() == \"c\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\chicago.csv'\r\n elif city.lower() == \"New York\" or city.lower() == \"new york\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\new_york_city.csv'\r\n elif city.lower() == \"Washington\" or city.lower() == \"washington\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\washington.csv'\r\n # load data file into a dataframe\r\n #df = pd.read_csv(CITY_DATA[\"city\"])\r\n df = pd.read_csv(filename)\r\n # convert the Start Time column to datetime\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # extract month and day of week from Start Time to create new columns\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n # use the index of the months list to get the corresponding int\r\n months = ['January', 'February', 'March', 'April', 'May', 'June']\r\n month = months.index(month) + 1\r\n\r\n # filter by month to create the new dataframe\r\n df = df[df['month'] == month]\r\n\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n df = df[df['day_of_week'] == day.title()]\r\n\r\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start, end Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n # Calculate the travel time per trip and add that column to data frame.\n df['Travel Time'] = df['End Time'] - df['Start Time']\n\n # extract month and day of week from Start Time to create new columns\n df['Start Hour'] = df['Start Time'].dt.hour\n df['End Hour'] = df['End Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def test_correct_data_under_places(self):\n load_to_datastore(self.places_sofia, self.metadata_sofia)\n CommonAssertions.check_correct_data_under_places(tester=self, places=self.places_sofia,\n metadata=self.metadata_sofia)", "def load_states():\n\n print \"States and Territories\"\n\n State.query.delete()\n\n for row in open(\"data/states_and_territories.txt\"):\n row = row.rstrip()\n # can't seem to get rid of \"\\r\" character other than doing a .split\n piped_rows = row.split(\"\\r\")\n for i in piped_rows:\n state_info = i.split(\"|\")\n state_name = state_info[0]\n state_code = state_info[1]\n\n state = State(state_name=state_name, state_code=state_code)\n\n db.session.add(state)\n\n db.session.commit()\n print \"States seeded\"", "def test_countries_regions_created(self):\n country_existing = CountryFactory(\n name=iso3166.countries.get('France').name,\n numeric=iso3166.countries.get('France').numeric,\n alpha_3=iso3166.countries.get('France').alpha3,\n )\n region_existing = RegionFactory(name='Existing Region')\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n # The Countries and Regions have been assigned to the correct PowerPlants and Projects\n self.assertEqual(Country.objects.count(), 4)\n self.assertEqual(Region.objects.count(), 3)\n greece = Country.objects.get(name='Greece')\n china = Country.objects.get(name='China')\n norway = Country.objects.get(name='Norway')\n mediterranean = Region.objects.get(name='Gulf and Mediterranean')\n northeast_asia = Region.objects.get(name='Northeast Asia')\n self.assertEqual(set(powerplant_ouessant.countries.all()), set([country_existing]))\n self.assertEqual(set(powerplant_ouessant.regions.all()), set([region_existing]))\n self.assertEqual(set(project_ouessant1.countries.all()), set([country_existing]))\n self.assertEqual(set(project_ouessant1.regions.all()), set([region_existing]))\n self.assertEqual(set(project_ouessant1.countries.all()), set([country_existing]))\n self.assertEqual(set(project_ouessant1.regions.all()), set([region_existing]))\n self.assertEqual(set(powerplant_ilarionas.countries.all()), set([greece]))\n self.assertEqual(set(powerplant_ilarionas.regions.all()), set([mediterranean]))\n self.assertEqual(set(project_liaoning.countries.all()), set([china]))\n self.assertEqual(set(project_liaoning.regions.all()), set([northeast_asia]))\n self.assertEqual(set(powerplant_tonstad.countries.all()), set([norway]))\n self.assertEqual(set(powerplant_tonstad.regions.all()), set([region_existing]))", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def load_data(city, month='all', day='all'):\n\n df = pd.read_csv(CITY_DATA[city.lower()]).rename(columns={'Unnamed: 0': 'Trip Id'})\n cols = df.columns\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day of Week'] = df['Start Time'].dt.dayofweek\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # Filter by month if applicable\n if month.lower() in MONTH_LIST:\n n_month = MONTH_LIST[month.lower()]\n df = df.loc[df['Month'] == n_month]\n\n # Filter by day of the week if applicable\n if day.lower() in DAY_LIST:\n n_day = DAY_LIST[day.lower()]\n df = df.loc[df['Day of Week'] == n_day]\n\n return df, cols", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n # drop the unused 'Unnamed' column\n df = df.drop(\"Unnamed: 0\", axis=1)\n # convert the Start Time column to datetime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n # extract month, day of week and hour from Start Time to create new columns\n df[\"month\"] = df[\"Start Time\"].dt.month_name()\n df[\"day\"] = df[\"Start Time\"].dt.day_name()\n df[\"hour\"] = df[\"Start Time\"].dt.hour.astype(str)\n\n # filter by month if applicable\n if month != \"All\":\n # filter by month to create the new dataframe\n df = df.loc[df[\"month\"] == month]\n\n # filter by day of week if applicable\n if day != \"All\":\n # filter by day of week to create the new dataframe\n df = df.loc[df[\"day\"] == day]\n\n return df", "def test_invlaid_data(self):\n # Currently, there are no PowerPlant or Project objects in the database\n self.assertEqual(PowerPlant.objects.count(), 0)\n self.assertEqual(Project.objects.count(), 0)\n\n # Call the command with invalid data.\n # The first row is for a power plant, but it has an invalid latitude.\n # The second row is for a project, but it has an invalid project capacity.\n # The third row is for a project, but it has an invalid project capacity unit.\n # The fourth row is for a power plant, but has an invalid status.\n self.call_command(filename='power_plant_import/tests/data/invalid_data.csv')\n\n # Each of the objects were created, though the invalid data was not saved.\n self.assertEqual(PowerPlant.objects.count(), 3)\n self.assertEqual(Project.objects.count(), 2)", "def test_load_file_does_not_exists(self):\n\n self.inactive_db.load()\n expected = {self.file_to_test: {}}\n\n self.assertEqual(expected, self.inactive_db.database)", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['Start and End Stations'] = df['Start Station'] + ' and ' + df['End Station']\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n# This code is refrenced from the practice problem on the project.\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[ df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name \n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1 \n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()] \n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name \n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1 \n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()] \n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def tearDown(self):\n del self.my_city", "def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'", "def test_find_cities(self):\n\n # Given\n game_state: CarcassonneGameState = CarcassonneGameState()\n\n city_one_side_straight_road = base_tiles[\"city_top_straight_road\"].turn(3)\n city_with_road = inns_and_cathedrals_tiles[\"ic_15\"].turn(3)\n\n game_state.board = [[None for column in range(2)] for row in range(1)]\n\n game_state.board[0][0] = city_with_road\n game_state.board[0][1] = city_one_side_straight_road\n\n # When\n cities: [City] = CityUtil.find_cities(\n game_state=game_state,\n coordinate=Coordinate(0, 0)\n )\n\n # Then\n self.assertEqual(1, len(cities))\n self.assertEqual(2, len(cities[0].city_positions))\n self.assertTrue(cities[0].finished)", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month, day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n\n # filter_choosed by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = [\"january\", \"february\", \"march\", \"april\", \"may\", \"june\"]\n month = months.index(month) + 1\n\n # filter_choosed by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter_choosed by day of week if applicable\n if day != 'all':\n # filter_choosed by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def test_incomplete_polygons():\n assert not query_row(db_conf, 'osm_landusages', 30004)\n assert not query_row(db_conf, 'osm_landusages', 30006)", "def load_data(city, month, day):\n df = pd.read_csv(city)\n df['day_of_week'] = pd.to_datetime(df['Start Time']).dt.dayofweek\n df['month'] = pd.to_datetime(df['Start Time']).dt.month\n if day != 'all':\n df = df[df['day_of_week'] == day]\n if month != 'all':\n df = df[df['month'] == month]\n df.drop('day_of_week', axis=1, inplace=True)\n df.drop('month', axis=1, inplace=True)\n return df", "def test_search(self):\n from importCsv.models import City, Hotel\n path = reverse(\"search\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n city = mixer.blend(City, abbrev=\"tes\", name=\"test\")\n mixer.blend(Hotel, city=city, data=\"testData\", name=\"test hotel\")\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"tes\": \"on\"})\n assert r.status_code == 200\n assert r.content.find(b'test hotel')", "def load_data(city, month, day):\n #Load data for city\n print(\"\\nCity Data..\")\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_week'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n if month != '7':\n months = ['1', '2', '3', '4', '5', '6']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_week'] == day.title()]\n return df", "def load_data(city, month, day):\n \n start_time = time.time()\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n\n # extract month, day of week and hour from Start Time to create new columns\n \n # Months will take values from 1 through 12\n df['month'] = df['Start Time'].dt.month \n \n # day of the week will take values in the range of 1 through 7\n df['day_of_week'] = df['Start Time'].dt.dayofweek \n \n # hour will take values from 0 through 23\n df['hour'] = df['Start Time'].dt.hour # range (0-23)\n\n # Here, we are filtering by month\n df['End Time'] = pd.to_datetime(df['End Time'])\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1 \n\n df = df[df['Start Time'].dt.month == month]\n\n # Here, we are filtering by day of week\n if day != 'all': \n df = df[df['Start Time'].dt.weekday_name == day.title()]\n \n return df", "def create_list_csv_by_city(self, file_name, city_name):\n\n #We couldn't make it for this hackathon because we hadn't enough data and especially good data\n pass", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[CITIES[city]])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day of Week'] = df['Start Time'].dt.dayofweek\n\n # get the subset of data where the month matches the one chosen\n if month != 0:\n df = df[df['Month'] == month]\n \n # get the subset of data where the day of the week matches the one chosen\n if day != 7:\n df = df[df['Day of Week'] == day]\n \n return df", "def read_cities(file_name):\n stream = open(file_name)\n data = stream.readlines()\n stream.close()\n roadmap = []\n for city_info in data: # For each record in data file\n if city_info != \"\\n\": # Ignore new line characters\n city_info = clean_data(city_info) # Clean the record\n roadmap.append(city_info) # Add each cleaned record to a list\n return roadmap", "def load_data(city, month, day):\n\n months_dict = {'january' : 1 , 'february' : 2 , 'march' : 3 , 'april' : 4 , 'may' : 5 , 'june' : 6, 'july' : 7, 'august' : 8, 'september' : 9}\n days_dict = {'monday' : 0 , 'tuesday' : 1 , 'wednesday' : 2 , 'thursday' : 3, 'friday' : 4 , 'saturday' : 5 , 'sunday' : 6}\n\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n if month != 'all':\n df = df[df['Start Time'].dt.month == months_dict[month]]\n\n if day != 'all':\n df = df[df['Start Time'].dt.weekday == days_dict[day]]\n\n return df", "def test_geo_data_created(self):\n # Currently, there are no GeometryStore or PointGeometry objects in the database\n self.assertEqual(GeometryStore.objects.count(), 0)\n self.assertEqual(PointGeometry.objects.count(), 0)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n\n # GeometryStore objects were created for:\n # - powerplant_ouessant\n # - powerplant_ilarionas\n # - project_liaoning\n # The project_ouessant1 and project_ouessant2 should use\n # powerplant_ouessant's GeometryStore\n self.assertEqual(GeometryStore.objects.count(), 3)\n # PointGeometry objects were created for:\n # - powerplant_ouessant\n # - powerplant_ilarionas\n # - project_liaoning\n # The project_ouessant1 and project_ouessant2 should use\n # powerplant_ouessant's PointGeometry\n self.assertEqual(PointGeometry.objects.count(), 3)\n # The powerplant_ouessant point is correct\n powerplant_ouessant_points = powerplant_ouessant.geo.points.all()\n self.assertEqual(powerplant_ouessant_points.count(), 1)\n self.assertEqual(powerplant_ouessant_points.first().geom.x, -5.11121)\n self.assertEqual(powerplant_ouessant_points.first().geom.y, 48.43754)\n # The powerplant_ilarionas point is correct\n powerplant_ilarionas_points = powerplant_ilarionas.geo.points.all()\n self.assertEqual(powerplant_ilarionas_points.count(), 1)\n self.assertEqual(powerplant_ilarionas_points.first().geom.x, 21.8039)\n self.assertEqual(powerplant_ilarionas_points.first().geom.y, 40.0966)\n # The project_liaoning gets its geodata from its latitude and longitude\n # cells\n project_liaoning_points = project_liaoning.geo.points.all()\n self.assertEqual(project_liaoning_points.count(), 1)\n self.assertEqual(project_liaoning_points.first().geom.x, 121.38065)\n self.assertEqual(project_liaoning_points.first().geom.y, 41.16469)\n # For the project_ouessant1 and project_ouessant2, the latitude and\n # longitude cells are blank, so they get their geodata from their\n # parent PowerPlant (powerplant_ouessant).\n self.assertEqual(project_ouessant1.geo, project_ouessant1.power_plant.geo)\n self.assertEqual(project_ouessant2.geo, project_ouessant2.power_plant.geo)\n # The powerplant_tonstad has no geo data\n self.assertIsNone(powerplant_tonstad.geo)", "def cities(self):\n objs = models.storage.all()\n tmp = []\n for key, value in objs.items():\n name = key.split('.')\n if name[0] == \"City\":\n if value.state_id == str(self.id):\n tmp.append(objs[key])\n return tmp", "def load_data(city, month, day):\n # Load data file into a dataframe.\n print('\\nLoading data for city = {}, month = {}, day = {}...'\n .format(city, month, day))\n df = pd.read_csv(CITY_DATA[city])\n\n # Convert the Start Time column to datetime.\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # Extract month, day of week, hour from Start Time to create new columns.\n df['Month'] = [MONTHS[int(m)] for m in df['Start Time'].dt.month]\n df['Day of Week'] = df['Start Time'].dt.weekday_name\n df['Hour'] = df['Start Time'].dt.hour\n # Create a column for the start and end station pairs.\n df['Path'] = df['Start Station'] + ' => ' + df['End Station']\n\n # Filter by month, if applicable.\n if month != 'All':\n df = df[df['Month'] == month]\n # Filter by day of week, if applicable\n if day != 'All':\n df = df[df['Day of Week'] == day]\n return df", "def load_data(city, month, day):\n\n print(\"\\nLoading data ...\")\n df = pd.read_csv(CITY_DATA[city])\n\n # Convert the Start Time column to datetime for time period comparison \n # then further create new columns for month and day of week based on that \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['dow'] = df['Start Time'].dt.day_name()\n\n # Perform filtering if enabled \n\n if month != 'all':\n # if possible , move this as a const to the top of source code\n months = ['january','february','march','april','may','june']\n month_to_filter = months.index(month) + 1\n\n # create a new dataframe \n df = df[df['month'] == month_to_filter]\n\n if day != 'all':\n # create a new dataframe \n # note: title() is called since the first letter of the \n # week in the created column is in uppercase\n df = df[df['dow'] == day.title()]\n \n return df", "def test_csv_import_hotel_success(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n file = open(\"city.csv\")\n client = Client()\n client.force_login(user)\n client.post(path, {\"title\": \"city\", \"csv_file\": file})\n file = open(\"hotel.csv\")\n r = client.post(path, {\"title\": \"hotel\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"", "def load_data(city, month, day):\n file_name = CITY_DATA.get(city)\n df = pd.read_csv(file_name)\n\n # convert \"Start Time\" column from string to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # create new column \"Month\" by extracting the month form datetime\n df['Month'] = df['Start Time'].dt.month\n\n # create new column \"Day\" by extracting the day form datetime\n df['Day'] = df['Start Time'].dt.day_name()\n df['Day'] = df['Day'].str.lower()\n\n # filter by month\n if month != \"all\":\n month_index = months.index(month) + 1\n df = df[df['Month'] == month_index]\n\n # filter by day\n if day != \"all\":\n df = df[df['Day'] == day]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n df = df[df['month']==month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week']==day]\n\n return df", "def load_data(city, month, day):\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time']) # convert the Start Time column to datetime\n df['month'] = df['Start Time'].dt.month # extract month from start time to create a new column\n df['day_of_week'] = df['Start Time'].dt.day_name() # extract day from start time to create a new column\n\n if month in months and day == 'all': # filter the df only by month if applicable\n month = convert_to_int(months, month)\n df = df.loc[df['month'] == month]\n \n if month == 'all' and day in days : # filter the df only by day of week if applicable\n df = df.loc[df['day_of_week'] == day.title()]\n \n if month in months and day in days:\n # use the index of the months list to get the corresponding month's int\n month = convert_to_int(months, month)\n\n df = df.loc[df['month'] == month] # first filter the df by month\n df = df.loc[df['day_of_week'] == day.title()] # then filter the df by day of week\n\n return df # no filter applied", "def main():\n langs = []\n\n with open(\"sql/07_populate.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in data folder\\n\\n\")\n\n langs = write_lang_city(sql)\n write_groups_diets(sql, langs)\n\n with open(\"sql/10_populate_test_data.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in moc_data folder\\n\\n\")\n write_test_data(sql)\n # This command sets postgis coordinates based on latitude and longitude\n sql.write(\"UPDATE restaurant SET geo_location = ST_POINT(latitude, longitude);\\n\")\n sql.close()", "def load_data(city, month, day):\n #Used the practice#3 from Project solution here to convert time columns to month and weekday_name\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n #print('this is the month', month)\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n #create the DataFrame\n #I'll be honest, I was struggling with this bit of code so I searched the internet and found what I needed to get started.\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week and hour from Start Time to create new columns. New columns are needed for filtering.\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.day\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[ df['day'] == day.title()]\n\n return df", "def load_data(city, month, day):\n \n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n\n\n # filter by month \n if month != 'all':\n \n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month =months.index(month) + 1\n \n \n df = df[df['month'] == month]\n\n # filter by day of week \n if day != 'all':\n \n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['hour'] = df['Start Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n \n\n #Converting time\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n if month != 'all':\n month = MONTHS.index(month) + 1\n\n df = df[df['month'] == month]\n \n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n #loading data of the city chosen by user into dataframe\n df = pd.read_csv(CITY_DATA[city])\n #converting the start time clomn from object (string) to datetime object so as we can use datetime Attributes and methonds to extract month coulmn and day to filter with them\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n #extracting month and day into new columns and days into new column 'month_name' and 'day_name' are methods in pandas datetime (https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.DatetimeIndex.html) as it's in this link\n df['month'] = df['Start Time'].dt.month_name()\n df['day_of_week'] = df['Start Time'].dt.day_name()\n #filtering data city with user inputs filter by moth and day:\n if month != 'all':\n df = df[df['month'] == month.title()]\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n\n return df", "def test_missing_data_sources(self):", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA [city])\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['hour'] = df['Start Time'].dt.hour\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n # Do the filter below\n # no filter is applied\n if month == 0 and day == 0:\n return df\n # only filter by day\n elif month == 0:\n df = df[df['day_of_week']==day]\n # only filter by month\n elif day == 0:\n df = df[df['month']== month]\n else:\n df = df[df['day_of_week']==day]\n df = df[df['month']== month]\n \n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city.lower()])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month.lower()) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city.lower()])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month, and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday #Monday=0,Sunday=6\n \n # filter by month if applicable\n \n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n filt1 = (df['month'] == month)\n df = df[filt1]\n\n # filter by day of week if applicable\n \n if day != 'all':\n # filter by day of week to create the new dataframe\n weekday = weekdays.index(day)\n filt2 = (df['day_of_week'] == weekday) \n df = df[filt2]\n \n return df", "def load_data(city, month, day):\n # load datafile into a DataFrame\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to Date time\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extracting month and day of the week from Start time\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday\n\n #filter by month when applicable\n if month != 'all':\n month = MONTH_DATA.index(month)\n\n #filter by month to create a new DataFrame\n df = df[df['month'] == month]\n\n #filter by day of the week where applicable\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month.lower() != 'all':\n # use the index of the months list to get the corresponding int\n months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month.lower()) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day.lower() != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n # months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n while month != \"\":\n # load data file into a dataframe\n filename = CITY_DATA[city]\n df = pd.read_csv(filename)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n # df['day_of_week'] = df['Start Time'].dt.day_name()\n\n\n try: df['day_of_week'] = df['Start Time'].dt.weekday_name\n except: df['day_of_week'] = df['Start Time'].dt.day_name()\n else: df['day_of_week'] = df['Start Time'].dt.weekday\n \n \n \n df['hour'] = df['Start Time'].dt.hour\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n # months = ['january', 'february', 'march', 'april', 'may', 'june','july','august','september','october','november','december']\n month = int(months.index(month)) + 1\n \n # filter by month to create the new dataframe\n df = df.loc[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df.loc[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n df = pd.read_csv(city)\n df['month'] = pd.to_datetime(df['Start Time']).dt.month\n df['day_of_week'] = pd.to_datetime(df['Start Time']).dt.dayofweek\n if month != 'all':\n df = df[df['month'] == month]\n if day != 'all':\n df = df[df['day_of_week'] == day]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n input_file_name = CITY_DATA.get(city)\n\n # Load the CSV file into a Pandas data frame\n df = pd.read_csv(input_file_name)\n\n # Convert the format of the existing date field to a python DateTime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n\n # Create new columns to filter on\n df[\"month\"] = df[\"Start Time\"].dt.month\n df[\"alpha_day\"] = df[\"Start Time\"].dt.weekday_name\n\n # If a month was provided, filter on it\n if month != \"all\":\n month_num = VALID_MONTHS.index(month) + 1\n df = df[df[\"month\"] == month_num]\n\n # If a day was provided, filter on it\n if day != \"all\":\n df = df[df[\"alpha_day\"] == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n \n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # here i load the datak\n df=pd.read_csv(CITY_DATA[city])\n \n df['Start Time']=pd.to_datetime(df['Start Time'])\n \n df['month']=df['Start Time'].dt.month\n df['day_of_week']=df['Start Time'].dt.weekday_name\n df['hour']=df['Start Time'].dt.hour\n \n #filter by month\n if month!='all':\n month =months.index(month)+1\n df=df[df['month']==month]\n \n #filter by day of week\n if day!='all':\n df=df[df['day_of_week']==day.title()]\n \n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city.lower()])\n month = month.title()\n day = day.title()\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n# df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['day_of_week'] = df['Start Time'].dt.day_name()\n \n # filter by month if applicable\n if month not in ['', 'All']:\n # use the index of the months list to get the corresponding int\n month = MONTHS_LIST.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n \n # filter by day of week if applicable\n if day not in ['', 'All']:\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day]\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def test_fuels_created(self):\n # Currently, there are no Fuels or FuelCategory objects in the database\n self.assertEqual(Fuel.objects.count(), 0)\n self.assertEqual(FuelCategory.objects.count(), 0)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # The file has the following fuels in the following fuel categories:\n # 'Ocean' category: 'Tidal', 'Wave'\n # 'Hydro' category: 'Hydro',\n # 'Wind' category: 'Wind'\n self.assertEqual(FuelCategory.objects.count(), 3)\n self.assertEqual(Fuel.objects.count(), 4)\n fuel_cat_ocean = FuelCategory.objects.get(name='Ocean')\n fuel_cat_hydro = FuelCategory.objects.get(name='Hydro')\n fuel_cat_wind = FuelCategory.objects.get(name='Wind')\n fuel_tidal = Fuel.objects.get(name='Tidal')\n fuel_wave = Fuel.objects.get(name='Wave')\n fuel_hydro = Fuel.objects.get(name='Hydro')\n fuel_wind = Fuel.objects.get(name='Wind')\n self.assertEqual(set(fuel_cat_ocean.fuel_set.all()), set([fuel_tidal, fuel_wave]))\n self.assertEqual(set(fuel_cat_hydro.fuel_set.all()), set([fuel_hydro]))\n self.assertEqual(set(fuel_cat_wind.fuel_set.all()), set([fuel_wind]))\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n # The Fuels have been assigned to the correct PowerPlants and Projects\n self.assertEqual(set(powerplant_ouessant.fuels.all()), set([fuel_tidal]))\n self.assertEqual(set(project_ouessant1.fuels.all()), set([fuel_tidal]))\n self.assertEqual(set(project_ouessant2.fuels.all()), set([fuel_wave]))\n self.assertEqual(set(powerplant_ilarionas.fuels.all()), set([fuel_hydro]))\n self.assertEqual(set(project_liaoning.fuels.all()), set([fuel_wind]))\n self.assertEqual(set(powerplant_tonstad.fuels.all()), set([fuel_wind, fuel_hydro]))", "def test_setting_csv_hotel_fail(self):\n from django.contrib.messages import get_messages\n path = reverse(\"setting-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"title\": \"hotel\", \"url\": \"http://rachel.maykinmedia.nl/djangocase/hotel.csv\",\n \"username\": \"python-demo\", \"password\": \"claw30_bumps\", \"save\": \"on\"})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) > 1", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n df[\"month\"] = df[\"Start Time\"].dt.month\n df[\"day_of_week\"] = df[\"Start Time\"].dt.weekday\n\n if month != \"all\":\n month = months[month]\n df = df[df[\"month\"] == month]\n\n if day != \"all\":\n df = df[df[\"day_of_week\"] == days.index(day)]\n return df", "def load_data(city, month, day):\r\n # Make sure the city name is correct\r\n city_name = city.lower()\r\n\r\n if debug_flag:\r\n print(city_name)\r\n\r\n try:\r\n print('getting data from: ', CITY_DATA[city_name])\r\n df = pd.read_csv(CITY_DATA[city_name])\r\n except OSError as e:\r\n print(\"Error: cannot find the data files\")\r\n print(\" Please make sure they are available in the root folder\")\r\n print(\" and restart the program\\n\")\r\n finally:\r\n exit()\r\n\r\n\r\n try:\r\n # Build data frame columns:\r\n # Convert start time column to date time so we can work with it\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # Build month (num) column from \"start time\"\r\n df['Month'] = df['Start Time'].dt.month\r\n\r\n # Use start date to calculate start day (i.e. tuesday) column\r\n df['Start Day'] = df['Start Time'].dt.day_name()\r\n\r\n # build hour column from start day column\r\n df['Hour'] = df['Start Time'].dt.hour\r\n\r\n except:\r\n print (\"Unexpected error\")\r\n\r\n return df", "def import_counties():\n\n query = 'INSERT INTO texas_counties(county, region) VALUES(%s,%s)'\n with persistence() as db:\n # create new cursor instance\n cursor = db.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n\n for council, counties in COUNCIL_DATA.items():\n for county in counties:\n cursor.execute(query, (county, council))\n db.commit()", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = VALID_MONTHS.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def test_bulk_locations(self):\n # do twice to check if it really updates\n lengths = []\n for i in range(2):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_201_CREATED, (\n responses.get(res.status_code, res.status_code), res.content)\n lengths.append(len(AdministrativeLocation.objects.all()))\n\n assert lengths[0] == lengths[1]\n\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations_duplicates)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def semi_all_static_feature(city):\n poi_frequency = np.load(exp_data_path + os.sep + 'poi_frequency' + os.sep + 'poi_frequency_{}.npy'.format(city),\n allow_pickle=True) # .tolist()\n poi_num = np.load(exp_data_path + os.sep + 'poi' + os.sep + 'poi_{}.npy'.format(city), allow_pickle=True)\n poi_entropy = np.load(exp_data_path + os.sep + 'poi_entropy' + os.sep + 'poi_entropy_{}.npy'.format(city),\n allow_pickle=True)\n road = np.load(exp_data_path + os.sep + 'roadnet' + os.sep + 'roadnet_{}.npy'.format(city), allow_pickle=True)\n transportation = np.load(exp_data_path + os.sep + 'transportation' + os.sep + 'transportation_{}.npy'.format(city),\n allow_pickle=True)\n commerce = np.load(exp_data_path + os.sep + 'commerce' + os.sep + 'commerce_{}.npy'.format(city), allow_pickle=True)\n\n file_name = exp_data_path + os.sep + 'station' + os.sep + 'all_demand_{}.npy'.format(city)\n demand_data = np.load(file_name, allow_pickle=True)\n total_num = demand_data[:, 0, -2, np.newaxis]\n slow_num = demand_data[:, 0, 0, np.newaxis]\n fast_num = demand_data[:, 0, 2, np.newaxis]\n\n raw_data = np.concatenate((slow_num, fast_num, total_num, poi_frequency, poi_num, poi_entropy, road, transportation, commerce), axis=1)\n csv_data = pd.DataFrame(raw_data, columns=SEMI_GENERAL_HEADER)\n print(csv_data.shape)\n # print(csv_data.iloc[:, 2])\n\n file_path = exp_data_path + os.sep + 'static' + os.sep + 'semi_static_feature_{}.csv'.format(city)\n if os.path.exists(file_path):\n os.remove(file_path)\n csv_data.to_csv(file_path)\n pass", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day_of_week'] = df['Start Time'].dt.weekday_name\n\n if month != 'all':\n month = months.index(month) + 1\n df = df[df['Month'] == month]\n if day != 'all':\n df = df[df['Day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_equipment():\n\n for row in open(\"static/equipment.csv\"):\n row = row.rstrip()\n \n gear_name, category, brand, lender_email, zipcode, gear_photo, gear_photo_url = row.split(\",\")\n\n equipment = Equipment(\n gear_name=gear_name,\n category=category,\n brand=brand,\n lender_email=lender_email,\n zipcode=zipcode,\n gear_photo=gear_photo,\n gear_photo_url=gear_photo_url)\n\n db.session.add(equipment)\n \n\n db.session.commit()", "def expected_city_names_fixture():\n return {'b', 'a', 'c'}", "def load_data(city, month, day):\n # I upload the data from the file for the city chosen by the user into the dataframe.\n df = pd.read_csv(CITY_DATA[city])\n\n # To handle the data with pandas, I need to convert 'Start Time' to datetime. Afterwards, I create seperate columns for month, weekday, and start hour\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # If the user did not input 'all', the data is filtered by the chosen month.\n # As I asked for the name of the month earlier, I use the index function to get the integer from the list.\n # As the list starts with 'all', the index of January is 1, February 2 etc.\n if month != 'all':\n month = MONTH_DATA.index(month)\n df = df[df['month'] == month]\n\n # Same for weekdays\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n df['City'] = city.title()\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month_name()\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n # filter by month if applicable\n if month != 'all':\n # filter by month to create the new dataframe\n df = df[df['month'] == month.title()]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df= pd.read_csv(CITY_DATA[city])\n \n #create column for month, day of week\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month_name() \n df['day_of_week'] = df['Start Time'].dt.day_name()\n \n #get the filtered data frame\n if month != 'all':\n df = df[df['Month'] == month]\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n return df", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['weekday'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n if month != 'all':\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['weekday'] == day.title()]\n\n return df" ]
[ "0.63880724", "0.60217434", "0.6000848", "0.59790474", "0.59473", "0.5909674", "0.5837335", "0.57937455", "0.5778223", "0.5773335", "0.5719565", "0.57075006", "0.56792784", "0.5651297", "0.56426704", "0.5636547", "0.5630291", "0.56268394", "0.5595516", "0.5582883", "0.5576265", "0.5573675", "0.5553242", "0.5551531", "0.5546585", "0.5545011", "0.55415845", "0.5537218", "0.5519994", "0.5516944", "0.5516664", "0.5505686", "0.5501089", "0.5500992", "0.54917854", "0.54875827", "0.54875827", "0.5462905", "0.54599637", "0.5459954", "0.54589057", "0.5458341", "0.5457524", "0.545462", "0.54497546", "0.5440924", "0.54306287", "0.5430026", "0.54283094", "0.5426481", "0.5423995", "0.5423381", "0.54225355", "0.54224724", "0.5420256", "0.5417989", "0.5415856", "0.5415463", "0.5409103", "0.54047257", "0.53991497", "0.53932106", "0.539145", "0.53840584", "0.538156", "0.5380135", "0.5371758", "0.5367105", "0.5361452", "0.5354671", "0.5347984", "0.5344691", "0.5344575", "0.5343079", "0.5342959", "0.53379995", "0.53350735", "0.53345895", "0.53281516", "0.5327331", "0.5326669", "0.5323536", "0.5322522", "0.53215617", "0.5318568", "0.53184354", "0.5317359", "0.5312243", "0.5311937", "0.53085864", "0.5305853", "0.5305753", "0.5302008", "0.5289882", "0.5288795", "0.5288706", "0.5288706", "0.5284896", "0.5284311", "0.5284059" ]
0.58699983
6
Tests the import from remote file for cities works fine
def test_setting_csv_city(self): from django.contrib.messages import get_messages path = reverse("setting-csv") user = mixer.blend(User, is_staff=True, is_superuser=True) client = Client() client.force_login(user) r = client.post(path, {"title": "city", "url": "http://rachel.maykinmedia.nl/djangocase/city.csv", "username": "python-demo", "password": "claw30_bumps", "save": "on"}) messages = list(get_messages(r.wsgi_request)) assert r.status_code == 200 assert len(messages) == 1 assert str(messages[0]) == "Successfully Uploaded!"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_csv_import_city(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n file = open(\"city.csv\")\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"title\": \"city\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"", "async def test_get_location_data(self):\n for city_name in ['dublin', 'London', 'Copenhagen']:\n response = await self.http_client.fetch(request=HTTPRequest(\n url=self.get_url(path=\"/location-data/{}\".format(city_name)),\n method='GET'\n ))\n self.assertEqual(response.code, HTTPStatus.OK)\n self.check_city_response(response, city_name.lower())", "def _import_insee_city(self, cr, uid, ids, data_dir, context=None):\n if context is None:\n context = {}\n filepath = os.path.abspath(os.path.join(data_dir, 'comsimp2011.csv'))\n city_obj = self.pool.get('insee.city')\n department_obj = self.pool.get('insee.department')\n with open(filepath, 'rb') as cityfile:\n reader = csv.DictReader(cityfile)\n for row in reader:\n args = [('dep', '=', row['DEP'])]\n department_ids = department_obj.search(cr, uid, args)\n department_id = department_ids and department_ids[0] or None\n ncc = row['ARTMAJ'] and row['ARTMAJ'].strip(\"()\") + \\\n row['NCC'] or row['NCC']\n nccenr = row['ARTMIN'] and row['ARTMIN'].strip(\"()\") + \\\n row['NCCENR'] or row['NCCENR']\n values = {\n 'cdc': row['CDC'],\n 'cheflieu': row['CHEFLIEU'],\n 'reg': row['REG'],\n 'dep': row['DEP'],\n 'department_id': department_id,\n 'com': row['COM'],\n 'ar': row['AR'],\n 'ct': row['CT'],\n 'tncc': row['TNCC'],\n 'artmaj': row['ARTMAJ'],\n 'ncc': ncc,\n 'artmin': row['ARTMIN'],\n 'nccenr': nccenr,\n }\n city_obj.create(cr, uid, values, context=context)", "def GetWorldCities():\n return GetDataFromCsvFile('world_cities.csv')", "def GetUsCities():\n return GetDataFromCsvFile('us_cities.csv')", "def import_from_url(jamsite, url, fieldnames=None):\n\t# import csv, from the webz.\n\tcsvfile = fetch_csv_from_url(url)\n\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )", "def load_cities (filename):\n if not os.path.isfile(filename):\n return None\n # try to decode a plain file\n try:\n with open(filename) as input:\n return [ json.loads(line) for line in input if line ]\n except:\n pass\n # try to decode a gzipped file\n try:\n with gzip.open(filename) as input:\n return [ json.loads(line) for line in input if line ]\n except:\n pass\n return None", "def test_csv_import_hotel_success(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n file = open(\"city.csv\")\n client = Client()\n client.force_login(user)\n client.post(path, {\"title\": \"city\", \"csv_file\": file})\n file = open(\"hotel.csv\")\n r = client.post(path, {\"title\": \"hotel\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"", "async def test_get_bad_location_data(self):\n city_name = 'notarealplace'\n response = await self.http_client.fetch(request=HTTPRequest(\n url=self.get_url(path=\"/location-data/{}\".format(city_name)),\n method='GET'\n ), raise_error=False)\n self.assertEqual(response.code, HTTPStatus.BAD_REQUEST, \"Incorrect response for an unknown city\")", "def import_data(loc,first_year, last_year):\n import zipfile\n import requests\n import urllib.request\n \n # Copy a network object to a local file\n\n \n table = [\"Prix\", \"Stations\", \"Services\"]\n \n for i in range(first_year, last_year + 1):\n for t in table :\n i = str(i)\n ti = t + i\n tiz = ti+\".zip\"\n ltiz = loc + tiz\n r = requests.get(ltiz)\n open(tiz, 'wb').write(r.content)\n with zipfile.ZipFile(tiz,\"r\") as zip_ref:\n zip_ref.extractall(\"data\")", "def test_csv_import_hotel_fail(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n client = Client()\n client.force_login(user)\n file = open(\"hotel.csv\")\n r = client.post(path, {\"title\": \"hotel\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) >= 1\n for message in messages:\n assert \"can not import\" in str(message)", "def test_get_city(client):\n response = client.get(\"/weather/curitiba\")\n assert response.status_code == 200", "def test_remote_file(fs: FakeFilesystem, requests_mock: Mocker) -> None:\n requests_mock.get(\"https://example.com/test.csv\", text=CONTENTS)\n\n connection = connect(\":memory:\")\n cursor = connection.cursor()\n\n sql = 'SELECT * FROM \"https://example.com/test.csv\" WHERE \"index\" > 11'\n data = list(cursor.execute(sql))\n assert data == [(12.0, 13.3, \"Platinum_St\"), (13.0, 12.1, \"Kodiak_Trail\")]\n\n sql = \"\"\"\n INSERT INTO \"https://example.com/test.csv\" (\n \"index\",\n temperature,\n site\n ) VALUES (\n 14,\n 10.1,\n 'New_Site'\n )\n \"\"\"\n with pytest.raises(ProgrammingError) as excinfo:\n cursor.execute(sql)\n assert str(excinfo.value) == \"Cannot apply DML to a remote file\"\n\n sql = \"\"\"DELETE FROM \"https://example.com/test.csv\" WHERE site = 'Kodiak_Trail'\"\"\"\n with pytest.raises(ProgrammingError) as excinfo:\n cursor.execute(sql)\n assert str(excinfo.value) == \"Cannot apply DML to a remote file\"", "def import_locations(type_slug, zip_url):\n\n require('environment', provided_by=env.environments)\n locations_dir = '/tmp/fab_location_importer'\n if files.exists(locations_dir):\n sudo('rm -rf %s' % locations_dir, user=env.deploy_user)\n sudo('mkdir %s' % locations_dir, user=env.deploy_user)\n cmd = 'PYTHONPATH=%(code_root)s '\\\n 'DJANGO_SETTINGS_MODULE=openrural.local_settings '\\\n '%(virtualenv_root)s/bin/import_locations' % env\n with cd(locations_dir):\n sudo('wget -O locations.zip %s' % zip_url, user=env.deploy_user)\n sudo('unzip -d locations locations.zip', user=env.deploy_user)\n sudo(' '.join([cmd, type_slug, 'locations']), user=env.deploy_user)", "def load_from_remote(self, url: Optional[str] = None) -> None:\n raise NotImplementedError", "def read_cities(filename):\n reader = csv.reader(open(filename, \"rb\")) # may raise IOError\n rows = [line for line in reader]\n cities = [City(r[2], index, r[3], float(r[0]), float(r[1])) for index, r in enumerate(rows[1:])]\n return cities", "def load_random_cities(data):\n cities = list(set([elem['name'] for elem in data]))\n city_objects = [City(data=city) for city in cities]\n City.objects.bulk_create(city_objects)", "def get_cities(self, city_name: str = \"\"):", "def get_cities(self, city_name: str = None):", "def read_locations(db, openfile):\n pass", "def test_reference_url_import(self):\n program = factories.ProgramFactory()\n resp, review = generate_review_object(\n program, state=all_models.Review.STATES.REVIEWED)\n del review\n program_id = program.id\n self.assertEqual(201, resp.status_code)\n import_data = OrderedDict(\n [\n (\"object_type\", \"Program\"),\n (\"Code*\", program.slug),\n (\"reference url\", \"test@test.com\")\n ]\n )\n response = self.import_data(import_data)\n self._check_csv_response(response, {})\n program = all_models.Program.query.get(program_id)\n self.assertEqual(\n all_models.Review.STATES.REVIEWED,\n program.review_status\n )", "def test_ip_extraction_txt(self):\n self.parser.parse_file(os.path.join(self.test_data_dir, \"txt_ips.txt\"))\n self.assertEqual(self.test_data_ips, self.parser.ips)", "def test_valid_url(self):\n cwd=os.getcwd()\n url = \"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n returned_fname = requester.url_to_csv(url, fname=\"wine\")\n self.assertIsInstance(returned_fname, str)\n self.assertEqual(returned_fname, \"{0}/{1}.csv\".format(cwd,'wine'))", "def test_ip_extraction_gz(self):\n self.parser.parse_file(self.test_data_dir + \"/txt_ips.txt.gz\")\n self.assertEqual(self.test_data_ips, self.parser.ips)", "def acquire_data(city):\n\n filename = FILENAME_TEMPLATE.format(city)\n\n text = open(filename).read()\n lines = text.splitlines()\n\n data_lines = lines[1:]\n\n return data_lines", "def loadCity(fileid):\n dinf = {}\n root = etree.Element(\"city\")\n text = None\n statename = \"\"\n statefile = \"\"\n cityname = \"\"\n dinf['m'] = {}\n dinf['m']['events'] = {}\n # TODO: put this in a global variable, and make a function to populate it from the DTD.\n tags = [\"name\",\"state\",\"statefile\",\"start\",\"scue\",\"end\",\"ecue\",\"place\",\"aspects\"]\n for tag in tags:\n dinf[tag] = [\"\",False]\n dinf['aspects'] = {}\n if not dinf.get(\"places\"): dinf['places'] = {}\n if not idExists(fileid):\n status.push(0,\"new city created... '%s'\" % fileid)\n return dinf\n fn = os.path.join(config['realmdir'],\"%s.xml\" % fileid)\n status.push(0,\"loading city from XML... '%s'\" % fn)\n try:\n with codecs.open(fn,'rU','utf-8') as f:\n tree = etree.parse(f)\n f.close()\n root = tree.getroot()\n except IOError as e:\n print \"c: Could not open configuration file: %s\" % e\n\n ir = 0\n for i in range(len(root)):\n if root[i].tag is not None:\n if root[i].tag == \"place\":\n if len(root[i]) > 0:\n node = \"\"\n node = root[i].find(\"file\")\n if node.text:\n node = node.text.strip()\n node = common.validateFileid(node)\n dinf['places'][node] = {}\n for j in root[i]:\n if j.tag and j.text and j.tag != \"file\":\n dinf['places'][node][j.tag] = [j.text.strip(),False]\n if config['debug'] > 3: print dinf['places'][node]\n else:\n if config['debug'] > 0:\n print \"Invalid place tag:\"\n for c in root[i]:\n print c.tag + ': ' + c.text,\n else: # no relat length\n if config['debug'] > 0: print \"Empty place tag.\"\n elif root[i].tag == \"events\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['m']['events']))\n dinf['m']['events'][k] = {}\n for j in node:\n if j.tag and j.text:\n dinf['m']['events'][k][j.tag] = [j.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid milestone tag:\"\n for c in node:\n print c.tag + ': ' + c.text,\n if config['debug'] > 3: printPretty(dinf['m']['events'])\n else: # no relat length\n if config['debug'] > 0: print \"Empty milestone tag.\"\n elif root[i].tag == \"aspects\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['aspects']))\n dinf['aspects'][k] = {}\n if node.tag and node.text:\n dinf['aspects'][k] = [node.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid aspects tag:\"\n print node.tag + ': ' + node.text,\n else: # no aspects length\n if config['debug'] > 0: print \"Empty aspects tag.\"\n elif root[i].text is not None:\n if root[i].tag == \"statefile\":\n statefile = root[i].text.strip()\n statefile = common.validateFileid(statefile)\n if statefile is None: statefile = \"\"\n elif root[i].tag == \"state\":\n statename = root[i].text.strip()\n elif root[i].tag == \"name\":\n cityname = root[i].text.strip()\n dinf[root[i].tag] = [root[i].text.strip(), False]\n if config['debug'] > 2: print str(i) + \" \",\n if len(statefile) > 0: pushLoc(statefile,statename,fileid,cityname)\n return dinf", "def load_data(city, month, day):\n if city == 'chicago':\n city_df = pd.read_csv('chicago.csv')\n elif city == 'new york city':\n city_df = pd.read_csv('new_york_city.csv')\n else:\n # city_df = pd.read_csv('washington.csv')\n print(\"else is running\")\n\n print(city_df.head())\n\n return city_df", "def setUp(self):\n\n self.host = 'http://www.weather.com.cn'\n self.ep_path = '/data/cityinfo'\n self.client = HttpClient()", "def read_cities(file_name):\n stream = open(file_name)\n data = stream.readlines()\n stream.close()\n roadmap = []\n for city_info in data: # For each record in data file\n if city_info != \"\\n\": # Ignore new line characters\n city_info = clean_data(city_info) # Clean the record\n roadmap.append(city_info) # Add each cleaned record to a list\n return roadmap", "def create_list_csv_by_city(self, file_name, city_name):\n\n #We couldn't make it for this hackathon because we hadn't enough data and especially good data\n pass", "def test_addr_city_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_city(input_val)\n self.assertEqual(output_val, self.line.addr_city)", "def test_content_file(self):\n\n url=[\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\",\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data\",\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\"]\n cwd=os.getcwd()\n list_of_files=requester.batch_url_to_csv(url, fnames=[\"m1\", \"m2\",\"m3\"])\n total_rows=0\n reader_list=[]\n for j in range(len(list_of_files)):\n reader=csv.DictReader(list_of_files[j])\n for rows in reader:\n total_rows+=1\n reader_list.append(total_rows)\n\n unique=set((reader_list))\n if len(unique)!=len(reader_list):\n with self.assertRaises(AssertionError):\n requester.batch_url_to_csv(url,fnames=['m1','m2','m3'])", "def test_client_nationlities_list(self):\n pass", "def load_data(city, month, day):\n data_to_use = CITY_DATA[city]\n df = pd.read_csv(data_to_use)\n # drop rows containing NAN fields\n df2 = df.dropna()\n\n # Ensure the Start and End Time are Date\n pd.to_datetime(df2['Start Time'])\n pd.to_datetime(df2['End Time'])\n df = df2.sort_values(by='Start Time')\n\n # For each Start Time create additional columns to store year, month, day_of_week and hour\n # df['Start Year'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'year'))\n df['Start Month'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'month'))\n df['Start Day'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'day_of_week'))\n df['Start Hour'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'hour'))\n\n # filter month if month is not all\n if month.title() != 'All':\n df = df.loc[df['Start Month'] == month.title()]\n\n # filter day if day is not all\n if day.title() != 'All':\n df = df.loc[df['Start Day'] == day.title()]\n\n return df", "def setUpClass(cls):\n cls.local_compounds = csv_to_compounds(CSV_FILE)\n # clear the DB so we are sure that the local compounds are the same as the remote ones\n r = api_clear(BASE_URL)\n assert r.status_code == 204\n r = api_batchadd(BASE_URL, cls.local_compounds)\n assert r.status_code == 201", "def setUp(self):\n self.my_city = City()", "def load_data(city, month, day):\n file_name = CITY_DATA.get(city)\n df = pd.read_csv(file_name)\n\n # convert \"Start Time\" column from string to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # create new column \"Month\" by extracting the month form datetime\n df['Month'] = df['Start Time'].dt.month\n\n # create new column \"Day\" by extracting the day form datetime\n df['Day'] = df['Start Time'].dt.day_name()\n df['Day'] = df['Day'].str.lower()\n\n # filter by month\n if month != \"all\":\n month_index = months.index(month) + 1\n df = df[df['Month'] == month_index]\n\n # filter by day\n if day != \"all\":\n df = df[df['Day'] == day]\n\n return df", "def internal_locations(source, include):\n with commit():\n import_internal_locations_from_json(source, include=include)", "def setUp(self):\n self.url = \"https://www.loc.gov/item/mss859430021?fo=json\"", "def testZipUrl(self):\n try:\n remoteLocator = self.__zipFileUrl\n # fn = self.__fileU.getFileName(remoteLocator)\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n lPath = os.path.join(self.__workPath, self.__fileU.getFileName(self.__zipFileUrl))\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n fp = self.__fileU.uncompress(lPath, outputDir=self.__workPath)\n ok = fp.endswith(\"Food_Display_Table.xlsx\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_list_zr_locations(self):\n pass", "def test_find_cities(self):\n\n # Given\n game_state: CarcassonneGameState = CarcassonneGameState()\n\n city_one_side_straight_road = base_tiles[\"city_top_straight_road\"].turn(3)\n city_with_road = inns_and_cathedrals_tiles[\"ic_15\"].turn(3)\n\n game_state.board = [[None for column in range(2)] for row in range(1)]\n\n game_state.board[0][0] = city_with_road\n game_state.board[0][1] = city_one_side_straight_road\n\n # When\n cities: [City] = CityUtil.find_cities(\n game_state=game_state,\n coordinate=Coordinate(0, 0)\n )\n\n # Then\n self.assertEqual(1, len(cities))\n self.assertEqual(2, len(cities[0].city_positions))\n self.assertTrue(cities[0].finished)", "def test_city_country(self):\n formatted_city = get_full_city(\"santiago\", \"chile\")\n self.assertEqual(formatted_city, \"Santiago, Chile\")", "def init(city: str, country: str, list_of_streets: list):\n if check(city, country, list_of_streets):\n return get_sample_data(city, country, list_of_streets)", "def load_from_geojson(self, filename_or_url):", "def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'", "def test_load_csv_file():\n data = loader.load_csv_file(\"buildup/reference/comsol_solution/lofi/voltage.csv.bz2\")\n\n assert data.any()", "def main(location: str='kansascity', cities: bool=False) -> None:\n car_list = []\n if cities:\n location = ''\n city_list = get_cities()\n for city in city_list:\n response = cars.get_porsche(city=city)\n car = cars.get_soup(response)\n if len(car) > 0:\n car_list.append(cars.get_soup(response))\n else:\n response = cars.get_porsche()\n car = cars.get_soup(response)\n car_list.append(car)\n add_to_db(car_list)\n results = get_pandas(location)\n print(response.headers.get('retry-after'))\n return results", "def test_json_file(self):\n #response = os.system(\"python3 client.py -f filename.csv\")\n response = client.result(False, 'json', 'unittest',file = 'test_file.csv')\n response = json.loads(response)\n first_name = response['person'][0]['first_name']\n self.assertEqual(first_name,'John','Should print John')\n length = len(response['person'])\n for count in range(0,length):\n self.assertNotIn('nationality',response['person'][count], 'Nationality should not be present')", "def test_loading(self):\n self.assertIsInstance(self.data.districts, list)", "def test_valid_url(self):\n\n cwd=os.getcwd()\n url = [\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\",\n \"http://stackoverflow.com/questions/17730173/python-cant-get-full-path-name-of-file\",\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv\"]\n #with warnings.warn('RuntimeWarning'):\n # requester.batch_url_to_csv(url, fnames=[\"test\", \"test2\",\"test3\"])\n returned_fname = requester.batch_url_to_csv(url, fnames=[\"test\", \"test2\",\"test3\"])\n self.assertEqual(returned_fname, [\"{0}/{1}.csv\".format(cwd,\"test\"),\n \"{0}/{1}.csv\".format(cwd,\"test3\")])", "def test_retrieve_l_organization_locations(self):\n pass", "def test_city_country(self):\n dublin_ireland = city_country('dublin', 'ireland')\n self.assertEqual(dublin_ireland, 'Dublin, Ireland')", "def __init__(self, model_city):\n\n assert isinstance(\n model_city, city_settings.ModelCity), 'ModelCity expected'\n self.model_city = model_city\n\n self._raw_data = data_io.fetch_service_units(\n self.servicetype, self.model_city)", "def testFtpUrl(self):\n try:\n remoteLocator = self.__ftpFileUrl\n # fn = self.__fileU.getFileName(remoteLocator)\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n dirPath = os.path.join(self.__workPath, \"chem_comp_models\")\n lPath = os.path.join(dirPath, self.__fileU.getFileName(self.__ftpFileUrl))\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n fp = self.__fileU.uncompress(lPath, outputDir=dirPath)\n ok = fp.endswith(\"chem_comp_model.cif\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_import_inconsistent_relatives():\n with TestClient(app) as client:\n response = client.post(\n \"/imports\",\n json={\n \"citizens\": [\n {\"citizen_id\": 1,\n \"town\": \"Москва\",\n \"street\": \"Льва Толстого\",\n \"building\": \"16к7стр5\",\n \"apartment\": 7,\n \"name\": \"Иванов Иван Иванович\",\n \"birth_date\": \"26.12.1986\",\n \"gender\": \"male\",\n \"relatives\": [2]},\n {\"citizen_id\": 2,\n \"town\": \"Москва\",\n \"street\": \"Льва Толстого\",\n \"building\": \"16к7стр5\",\n \"apartment\": 7,\n \"name\": \"Иванов Сергей Иванович\",\n \"birth_date\": \"17.04.1997\",\n \"gender\": \"male\",\n \"relatives\": []}\n ]\n }\n )\n\n assert response.status_code == 400", "def load_data(city, month, day):\r\n # Make sure the city name is correct\r\n city_name = city.lower()\r\n\r\n if debug_flag:\r\n print(city_name)\r\n\r\n try:\r\n print('getting data from: ', CITY_DATA[city_name])\r\n df = pd.read_csv(CITY_DATA[city_name])\r\n except OSError as e:\r\n print(\"Error: cannot find the data files\")\r\n print(\" Please make sure they are available in the root folder\")\r\n print(\" and restart the program\\n\")\r\n finally:\r\n exit()\r\n\r\n\r\n try:\r\n # Build data frame columns:\r\n # Convert start time column to date time so we can work with it\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # Build month (num) column from \"start time\"\r\n df['Month'] = df['Start Time'].dt.month\r\n\r\n # Use start date to calculate start day (i.e. tuesday) column\r\n df['Start Day'] = df['Start Time'].dt.day_name()\r\n\r\n # build hour column from start day column\r\n df['Hour'] = df['Start Time'].dt.hour\r\n\r\n except:\r\n print (\"Unexpected error\")\r\n\r\n return df", "def test_import_clean_data():\n citizen_ok = deepcopy(CITIZEN_EXAMPLE)\n with TestClient(app) as client:\n import_response = client.post(\n \"/imports\",\n json={\n \"citizens\": [citizen_ok]\n }\n )\n response_body = import_response.json()\n assert import_response.status_code == 201\n assert \"data\" in response_body\n assert \"import_id\" in response_body[\"data\"]\n assert isinstance(response_body[\"data\"][\"import_id\"], int)", "def test_load_coinbasettr(self):\n with open(self.filename) as f:\n coinbasettr.CoinbaseTTRParser(csv_content=f)\n parser = coinbasettr.CoinbaseTTRParser(filename=self.filename)\n parser.cleanup()", "def load_data(city, month='all', day='all'):\n\n df = pd.read_csv(CITY_DATA[city.lower()]).rename(columns={'Unnamed: 0': 'Trip Id'})\n cols = df.columns\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day of Week'] = df['Start Time'].dt.dayofweek\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # Filter by month if applicable\n if month.lower() in MONTH_LIST:\n n_month = MONTH_LIST[month.lower()]\n df = df.loc[df['Month'] == n_month]\n\n # Filter by day of the week if applicable\n if day.lower() in DAY_LIST:\n n_day = DAY_LIST[day.lower()]\n df = df.loc[df['Day of Week'] == n_day]\n\n return df, cols", "def test_get_city_notfound(client):\n response = client.get(\"/weather/curitoba\")\n # Validate the response\n print(response.data)\n assert b\"200\" not in response.data", "def test_fun(file_path, urls):\n assert os.path.exists(\"src/01_download/urls.txt\"), \"Urls text file not\\\n found in location\"\n assert os.path.exists(\"data/raw/census_2001.csv\"), \"Census file not\\\n found in location\"\n print(\"Tests ran succesfully\")", "def test_get_countries(self):\n pass", "def get_cities() -> list:\n results = []\n with open('src/craigslist_cities.txt', 'r', encoding='utf8') as file:\n for line in file:\n results.append(line.strip())\n return results", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n # drop the unused 'Unnamed' column\n df = df.drop(\"Unnamed: 0\", axis=1)\n # convert the Start Time column to datetime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n # extract month, day of week and hour from Start Time to create new columns\n df[\"month\"] = df[\"Start Time\"].dt.month_name()\n df[\"day\"] = df[\"Start Time\"].dt.day_name()\n df[\"hour\"] = df[\"Start Time\"].dt.hour.astype(str)\n\n # filter by month if applicable\n if month != \"All\":\n # filter by month to create the new dataframe\n df = df.loc[df[\"month\"] == month]\n\n # filter by day of week if applicable\n if day != \"All\":\n # filter by day of week to create the new dataframe\n df = df.loc[df[\"day\"] == day]\n\n return df", "def import_from_file(jamsite, source='jammers.csv', fieldnames=None):\n\t# import jammers.csv\n\twith open(source) as csvfile:\n\t\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )", "def test_init(self, fixture_environment):\n\n # Generate city object\n city_object = cit.City(environment=fixture_environment)\n\n # Check inheritance from citydistrict object of pycity\n assert city_object._kind == 'citydistrict'", "def load_data(client):\n codes = [\"DUB\", \"LHR\", \"ETC\", \"XXX\"]\n q = generateMultiInsertQuery(codes, \"Airport\")\n #print(json.dumps(q.json(), indent=4))\n q.execute(client)", "def test_unknown_countries(self):\n # Currently, there are no Countries or Regions\n self.assertEqual(Country.objects.count(), 0)\n self.assertEqual(Region.objects.count(), 0)\n\n # Call the command with countries that are not recognized by the iso3166 library\n self.call_command(filename='power_plant_import/tests/data/unknown_countries.csv')\n\n # No Countries or Regions were created during the test\n self.assertEqual(Country.objects.count(), 0)\n self.assertEqual(Region.objects.count(), 0)", "def load_data(city, month, day):\n # Load data file into a dataframe.\n print('\\nLoading data for city = {}, month = {}, day = {}...'\n .format(city, month, day))\n df = pd.read_csv(CITY_DATA[city])\n\n # Convert the Start Time column to datetime.\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # Extract month, day of week, hour from Start Time to create new columns.\n df['Month'] = [MONTHS[int(m)] for m in df['Start Time'].dt.month]\n df['Day of Week'] = df['Start Time'].dt.weekday_name\n df['Hour'] = df['Start Time'].dt.hour\n # Create a column for the start and end station pairs.\n df['Path'] = df['Start Station'] + ' => ' + df['End Station']\n\n # Filter by month, if applicable.\n if month != 'All':\n df = df[df['Month'] == month]\n # Filter by day of week, if applicable\n if day != 'All':\n df = df[df['Day of Week'] == day]\n return df", "def push_to_cartodb(f):\n print \"attempting to import into cartodb\"\n config = loadConfig()\n cl = CartoDBAPIKey(config[\"API_KEY\"],config[\"user\"])\n fi = FileImport(f,cl,table_name='python_table_test')\n fi.run()\n\n return fi.success", "def __init__(self):\n\n with open('../examples/streets.txt', 'r') as sf:\n self.streets = sf.read()\n self.streets = self.streets.lower()\n\n with open('../examples/cities.txt', 'r') as cf:\n self.cities = cf.read()\n self.cities = self.cities.lower()", "def import_sitefinder_data(path):\n asset_data = []\n\n site_id = 0\n\n with open(os.path.join(path), 'r') as system_file:\n reader = csv.DictReader(system_file)\n next(reader, None)\n for line in reader:\n if line['Operator'] != 'Airwave' and line['Operator'] != 'Network Rail':\n # if line['Operator'] == 'O2' or line['Operator'] == 'Vodafone':\n # if line['Anttype'] == 'MACRO' or \\\n # line['Anttype'] == 'SECTOR' or \\\n # line['Anttype'] == 'Sectored' or \\\n # line['Anttype'] == 'Directional':\n asset_data.append({\n 'type': \"Feature\",\n 'geometry': {\n \"type\": \"Point\",\n \"coordinates\": [float(line['X']), float(line['Y'])]\n },\n 'properties':{\n 'name': 'site_' + str(site_id),\n 'Operator': line['Operator'],\n 'Opref': line['Opref'],\n 'Sitengr': line['Sitengr'],\n 'Antennaht': line['Antennaht'],\n 'Transtype': line['Transtype'],\n 'Freqband': line['Freqband'],\n 'Anttype': line['Anttype'],\n 'Powerdbw': line['Powerdbw'],\n 'Maxpwrdbw': line['Maxpwrdbw'],\n 'Maxpwrdbm': line['Maxpwrdbm'],\n 'Sitelat': float(line['Sitelat']),\n 'Sitelng': float(line['Sitelng']),\n }\n })\n\n site_id += 1\n\n else:\n pass\n\n return asset_data", "def candidates_import_from_sample_file():\n # Load saved json from local file\n logger.info(\"Loading CandidateCampaigns from local file\")\n\n with open(\"candidate/import_data/candidates_sample.json\") as json_data:\n structured_json = json.load(json_data)\n\n return candidates_import_from_structured_json(structured_json)", "def readLocations():\n locationsRead = []\n\n # Parallel reading from address_file and locations_file\n with open(\"Files/PublicPlaces.txt\", 'r', encoding='utf8') as f:\n for line in f:\n if line == \"\\n\":\n continue\n details = line.split(\",\")\n address = []\n for detail in details:\n address.append(detail.rstrip('\\n').rstrip().lstrip())\n locationsRead.append(address)\n f.close()\n return locationsRead", "def test_get_last_cities(client):\n\n response = client.get(\"/weather/1\")\n print(response.data)\n assert response.status_code == 200", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start, end Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n # Calculate the travel time per trip and add that column to data frame.\n df['Travel Time'] = df['End Time'] - df['Start Time']\n\n # extract month and day of week from Start Time to create new columns\n df['Start Hour'] = df['Start Time'].dt.hour\n df['End Hour'] = df['End Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def test_valid_csv(self):\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_csv(url, fname='wine2')\n self.assertTrue(str(type(readerobject)),\"_csv.reader\")", "def route_creation():\r\n city_ids = json.loads(open(\"cities.json\").read())\r\n cities = []\r\n for id in city_ids:\r\n cities.append(fetch_weather(id))\r\n return Route(cities)", "def test_required_city(self):\r\n self.url_params['city'] = 'New York'\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 200)\r\n obj = json.loads(response.content)\r\n self.assertTrue(obj['success'])", "def load_data(city, month, day):\n if city == 'new york city':\n df=pd.read_csv(\"./new_york_city.csv\")\n else: \n df=pd.read_csv(\"./\" + city + \".csv\")\n df['Start Time']=pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.dayofweek\n df['hour'] =df['Start Time'].dt.hour\n if month !='all':\n df=df[df['month']==months_list[month]]\n if day != 'all':\n df=df[df['day']==days_list[day]]\n \n return df", "def candidates_import_from_sample_file():\n # Load saved json from local file\n logger.info(\"Loading CandidateCampaigns from local file\")\n\n with open(\"candidate/import_data/candidate_campaigns_sample.json\") as json_data:\n structured_json = json.load(json_data)\n\n return candidates_import_from_structured_json(structured_json)", "def load_data(city, month, day):\n\n print('\\nLoading Data...\\n')\n\n path = os.getcwd().replace('\\\\', '/') + '/'\n dir_path = os.path.dirname(os.path.realpath(__file__)).replace('\\\\', '/') + '/'\n try:\n df = pd.read_csv(dir_path + CITY_DATA.get(city))\n except FileNotFoundError as e:\n sys.exit('Error loading file. Make sure that the datafiles are in the working directory.\\npath: {}\\ndir_path: {}'.format(path, dir_path))\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n df['hour'] = df['Start Time'].dt.hour\n\n if df is None:\n sys.exit('Error initializing dataframe. File was loaded successfully but load_data() failed.')\n\n return df", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def do_import(files, container_id):\n content = files['fname']['content']\n lines = string.splitfields(content, '\\n')\n for line in lines:\n line = line.strip()\n if line != '':\n username, role = string.splitfields(line, ';')\n try:\n user_id = get_user_by_username(username).id\n role_id = get_role_by_name(role).id\n items = DmsUserUrlRole.objects.filter(user=user_id).filter(container=container_id)\n if len(items) == 0:\n DmsUserUrlRole.save_user_url_role(DmsUserUrlRole(), user_id, container_id, role_id)\n else:\n item = items[0]\n item.role_id = role_id\n item.save()\n except:\n pass", "def cities(self):\n from models.engine.file_storage import FileStorage\n from models.city import City\n fs = FileStorage.all(City)\n city_list = []\n for key, value in fs.items():\n if 'City' in key and self.id == value.state_id:\n '''Append City instances maybe fucked up here!!!'''\n city_list.append(value)\n return city_list", "def load_data(city, month, day):\n \n filename = str(CITY_DATA.get(city))\n\n # load data file into a dataframe\n df = pd.read_csv(filename)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month_int = months.index(month) +1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month_int] \n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n input_file_name = CITY_DATA.get(city)\n\n # Load the CSV file into a Pandas data frame\n df = pd.read_csv(input_file_name)\n\n # Convert the format of the existing date field to a python DateTime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n\n # Create new columns to filter on\n df[\"month\"] = df[\"Start Time\"].dt.month\n df[\"alpha_day\"] = df[\"Start Time\"].dt.weekday_name\n\n # If a month was provided, filter on it\n if month != \"all\":\n month_num = VALID_MONTHS.index(month) + 1\n df = df[df[\"month\"] == month_num]\n\n # If a day was provided, filter on it\n if day != \"all\":\n df = df[df[\"alpha_day\"] == day.title()]\n\n return df", "def test_locations(self):\n url = reverse(\"locations\", args=[00000])\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(isinstance(response.data, list))\n self.assertTrue(response.data) # list not empty\n\n location_data = response.data[0]\n data_keys = [\n \"title\",\n \"address\",\n \"address2\",\n \"city\",\n \"state\",\n \"postalCode\",\n \"distance\",\n \"hours\",\n \"phone\",\n \"geocode\",\n ]\n self.assertEqual(list(location_data.keys()), data_keys)", "def retrieve_existing_locations(\n vial_http: urllib3.connectionpool.ConnectionPool,\n) -> Iterator[dict]:\n resp = vial_http.request(\n \"GET\", \"/api/searchLocations?format=nlgeojson&all=1\", preload_content=False\n )\n\n for line in resp:\n yield geojson.loads(line)\n\n resp.release_conn()", "def test_bulk_locations(self):\n # do twice to check if it really updates\n lengths = []\n for i in range(2):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_201_CREATED, (\n responses.get(res.status_code, res.status_code), res.content)\n lengths.append(len(AdministrativeLocation.objects.all()))\n\n assert lengths[0] == lengths[1]\n\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations_duplicates)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def testGetDrugBankUrl(self):\n try:\n remoteLocator = \"https://www.drugbank.ca/releases/latest/downloads/all-full-database\"\n un = \"username\"\n pw = \"password\"\n # fn = self.__fileU.getFileName(remoteLocator)\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n lPath = os.path.join(self.__workPath, \"db-download.zip\")\n ok = self.__fileU.get(remoteLocator, lPath, username=un, password=pw)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n self.__fileU.uncompress(lPath, outputDir=self.__workPath)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_extract_recipe_from_website(self):\n pass", "def load_data(city, month, day):\n #Load data for city\n print(\"\\nCity Data..\")\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_week'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n if month != '7':\n months = ['1', '2', '3', '4', '5', '6']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_week'] == day.title()]\n return df", "def main():\n\n conn = psycopg2.connect(**env.DATABASE)\n cursor = conn.cursor()\n\n for file, city in env.supported_cities().items():\n try:\n data = add_metadata(parse_html(city, get_html(city)))\n save_data_to_db(cursor, data, file.title())\n except Exception as e:\n print(\"Failed to scrape '%s': %s\" %(city, e))\n print(traceback.format_exc())\n\n conn.commit()\n conn.close()", "def load_data(city, month, day):\n \n if city == 'chicago':\n filename = 'chicago.csv'\n elif city == 'new York':\n filename = 'new_york_city.csv'\n elif city == 'washington':\n filename = 'washington.csv'\n else:\n return -1\n \n df = pd.read_csv(filename)\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n \n if month != 'all':\n df = df[df['Start Time'].dt.month == month]\n\n if day != 'all':\n df = df[df['Start Time'].dt.weekday == day]\n \n return df", "def test_city_country(self):\n santiago_chile = get_city_name('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')", "def test_import_with_empty_name():\n citizen_with_empty_name = deepcopy(CITIZEN_EXAMPLE)\n citizen_with_empty_name[\"name\"] = \"\"\n with TestClient(app) as client:\n import_response = client.post(\n \"/imports\",\n json={\n \"citizens\": [citizen_with_empty_name]\n }\n )\n\n assert import_response.status_code == 400", "def test_get_clouds(self):\n pass", "def test_correct_city(self, ip_address, city_correct):\n city, country = get_geo(ip_address=ip_address)\n self.assertEqual(city, city_correct)" ]
[ "0.67597806", "0.65911627", "0.6365303", "0.6147274", "0.5999421", "0.59644586", "0.5793917", "0.57783747", "0.57087487", "0.56976557", "0.568284", "0.5604537", "0.55910504", "0.5550587", "0.55388725", "0.5531237", "0.55103743", "0.549801", "0.5494643", "0.5475696", "0.5474487", "0.5463505", "0.54496485", "0.5429551", "0.5421349", "0.54191804", "0.54123646", "0.5406623", "0.53964937", "0.53884596", "0.5382655", "0.5377673", "0.53695726", "0.5363753", "0.53441805", "0.5341878", "0.5337392", "0.53336847", "0.53249073", "0.5324019", "0.5321121", "0.53157157", "0.5310032", "0.53062516", "0.53018624", "0.5290466", "0.52806354", "0.52769816", "0.52686137", "0.5264528", "0.52616215", "0.5255075", "0.52525127", "0.52469164", "0.52431023", "0.5243003", "0.5235745", "0.5233518", "0.52335054", "0.5230212", "0.5220953", "0.5205714", "0.51916534", "0.51833534", "0.517513", "0.51659", "0.51632047", "0.5162002", "0.51611817", "0.51595104", "0.5158264", "0.5157559", "0.51568365", "0.5156028", "0.5145302", "0.5142051", "0.51330143", "0.5132505", "0.5130788", "0.51206034", "0.5118826", "0.51177776", "0.510938", "0.5099821", "0.5096929", "0.5096672", "0.50951076", "0.5084299", "0.5083364", "0.508252", "0.50757015", "0.5073159", "0.50674456", "0.50651395", "0.50632155", "0.5063139", "0.5062537", "0.50540096", "0.5053515", "0.50455546" ]
0.5930846
6
Tests the import from remote file for hotels works fine
def test_setting_csv_hotel_success(self): from django.contrib.messages import get_messages path = reverse("setting-csv") user = mixer.blend(User, is_staff=True, is_superuser=True) client = Client() client.force_login(user) client.post(path, {"title": "city", "url": "http://rachel.maykinmedia.nl/djangocase/city.csv", "username": "python-demo", "password": "claw30_bumps", "save": "on"}) r = client.post(path, {"title": "hotel", "url": "http://rachel.maykinmedia.nl/djangocase/hotel.csv", "username": "python-demo", "password": "claw30_bumps", "save": "on"}) messages = list(get_messages(r.wsgi_request)) assert r.status_code == 200 assert len(messages) == 1 assert str(messages[0]) == "Successfully Uploaded!"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_from_remote(self, url: Optional[str] = None) -> None:\n raise NotImplementedError", "def test_importfile_valid_remotepath_valid_localpath(self):\n\n # create a temporary file\n handle,remotepath = tempfile.mkstemp()\n indata = \"hubcheck\\ntool session shell test\\n%s\" % (remotepath)\n os.write(handle,indata)\n os.close(handle)\n\n # perform the transfer\n localpath,es = self.shell.execute('echo ${PWD}/$RANDOM.tmp')\n size = self.shell.importfile(remotepath,localpath)\n\n outdata = self.shell.read_file(localpath)\n\n # clean up the files\n self.shell.execute(\"rm -f %s\" % (localpath))\n os.remove(remotepath)\n\n # check the transfer\n self.assertTrue(size == len(indata),\n \"size mismatch: wrote %s bytes, expected %s bytes\" \\\n % (size,len(indata)))\n\n self.assertTrue(indata == outdata,\n \"file data mismatch: wrote '%s', expected '%s'\" \\\n % (repr(outdata),repr(indata)))", "def test_import_software_asset(self):\n pass", "def test_imports(self):\n\n # DEVICES\n from surrortg.devices.udp import ( # noqa:F401\n UdpActuator,\n UdpBot,\n UdpCar,\n UdpInput,\n )\n from surrortg.devices.udp.udp_protocol import ( # noqa:F811,F401\n open_remote_endpoint,\n open_local_endpoint,\n open_remote_endpoint,\n )\n\n # INPUTS\n from surrortg.inputs import ( # noqa:F401\n Input,\n Switch,\n DelayedSwitch,\n Joystick,\n Directions,\n LinearActuator,\n )\n\n # NETWORK\n from surrortg.network import ( # noqa:F401\n SocketHandler,\n MessageRouter,\n MultiSeatMessageRouter,\n )\n\n # ROOT\n from surrortg import Game, GameIO # noqa:F401", "def test_get_imports(self):\n pass", "def test_import_system_asset(self):\n pass", "def test_download_host(self):\n pass", "async def test_import(hass: HomeAssistantType, requests_mock: Mocker) -> None:\n mock_connection(requests_mock)\n\n user_input = {CONF_HOST: HOST}\n with patch(\n \"homeassistant.components.roku.async_setup\", return_value=True\n ) as mock_setup, patch(\n \"homeassistant.components.roku.async_setup_entry\", return_value=True,\n ) as mock_setup_entry:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={CONF_SOURCE: SOURCE_IMPORT}, data=user_input\n )\n\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == HOST\n\n assert result[\"data\"]\n assert result[\"data\"][CONF_HOST] == HOST\n\n await hass.async_block_till_done()\n assert len(mock_setup.mock_calls) == 1\n assert len(mock_setup_entry.mock_calls) == 1", "def _run_online_test(*args, **kwargs):\n import responses # noqa: F401", "def test_import_test_asset(self):\n pass", "def test_api_remote_import_post(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.ModelImport()\n path, method = default_api.api_remote_import_post(params)\n self.assertEqual(path, '/api/remote/import')\n self.assertEqual(method, 'POST')", "def import_module(self, location, name):", "def test_items_are_mounted(self):\n response2 = self.client.get(\"/importer/design26/models.py\")\n self.assertEquals(response2.status_code, 200)", "def test_import_process(self):\r\n good_file = self._get_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._chrome_data_test()", "def test_csv_import_hotel_fail(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n client = Client()\n client.force_login(user)\n file = open(\"hotel.csv\")\n r = client.post(path, {\"title\": \"hotel\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) >= 1\n for message in messages:\n assert \"can not import\" in str(message)", "def test_file_managed_http_source(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(\n name=name,\n source=remote_grail_scene33.url,\n source_hash=remote_grail_scene33.hash,\n skip_verify=False,\n )\n assert ret.result is True", "def _import(file_path):\n proxy_factory.import_proxies(open(file_path, 'r'))", "def test__import_api_v6(self):\n response = textwrap.dedent(\n \"\"\"\\\n var pveapi = [\n {\n \"info\" : {\n }\n }\n ]\n ;\n \"\"\"\n )\n self._test__import_api(response)", "def test_remote_file(fs: FakeFilesystem, requests_mock: Mocker) -> None:\n requests_mock.get(\"https://example.com/test.csv\", text=CONTENTS)\n\n connection = connect(\":memory:\")\n cursor = connection.cursor()\n\n sql = 'SELECT * FROM \"https://example.com/test.csv\" WHERE \"index\" > 11'\n data = list(cursor.execute(sql))\n assert data == [(12.0, 13.3, \"Platinum_St\"), (13.0, 12.1, \"Kodiak_Trail\")]\n\n sql = \"\"\"\n INSERT INTO \"https://example.com/test.csv\" (\n \"index\",\n temperature,\n site\n ) VALUES (\n 14,\n 10.1,\n 'New_Site'\n )\n \"\"\"\n with pytest.raises(ProgrammingError) as excinfo:\n cursor.execute(sql)\n assert str(excinfo.value) == \"Cannot apply DML to a remote file\"\n\n sql = \"\"\"DELETE FROM \"https://example.com/test.csv\" WHERE site = 'Kodiak_Trail'\"\"\"\n with pytest.raises(ProgrammingError) as excinfo:\n cursor.execute(sql)\n assert str(excinfo.value) == \"Cannot apply DML to a remote file\"", "def test_import_process(self):\r\n good_file = self._get_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._firefox_data_test()", "def command_import_from_hosted(self):\n import_from_hosted.main(*self.args())", "def importer():\n pass", "def get_import():\n\n msg = 'This is the orlo import url'\n return jsonify(message=msg), 200", "def remotes():", "async def test_flow_import(hass):\n mocked_device = _create_mocked_device()\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=CONF_DATA\n )\n assert result[\"type\"] == RESULT_TYPE_CREATE_ENTRY\n assert result[\"title\"] == FRIENDLY_NAME\n assert result[\"data\"] == CONF_DATA\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_not_called()", "def test_import_data():\n\n file_path = os.path.join(CONST_ADVANTICSYS_DIR, CONST_ADVANTICSYS_TEST_1)\n\n # Bring df\n success, log, test_ingress_df = advanticsys_import(file_path)\n assert success, log\n assert isinstance(test_ingress_df, pd.DataFrame)\n\n # Test import function\n success, log = import_data(\n test_ingress_df,\n CONST_ADVANTICSYS,\n SQL_USER,\n SQL_PASSWORD,\n SQL_HOST,\n SQL_PORT,\n SQL_TEST_DBNAME\n )\n\n assert success is True, log \n assert log == \"New: 0 (uploaded); Duplicates: 75 (ignored)\"", "def test_simple_import(barred_tac_list_importer, logger, db_conn):\n expect_success(barred_tac_list_importer, 6, db_conn, logger)", "def test_import_process(self):\r\n good_file = self._get_google_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._google_data_test()", "def test_reference_url_import(self):\n program = factories.ProgramFactory()\n resp, review = generate_review_object(\n program, state=all_models.Review.STATES.REVIEWED)\n del review\n program_id = program.id\n self.assertEqual(201, resp.status_code)\n import_data = OrderedDict(\n [\n (\"object_type\", \"Program\"),\n (\"Code*\", program.slug),\n (\"reference url\", \"test@test.com\")\n ]\n )\n response = self.import_data(import_data)\n self._check_csv_response(response, {})\n program = all_models.Program.query.get(program_id)\n self.assertEqual(\n all_models.Review.STATES.REVIEWED,\n program.review_status\n )", "def testFtpUrl(self):\n try:\n remoteLocator = self.__ftpFileUrl\n # fn = self.__fileU.getFileName(remoteLocator)\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n dirPath = os.path.join(self.__workPath, \"chem_comp_models\")\n lPath = os.path.join(dirPath, self.__fileU.getFileName(self.__ftpFileUrl))\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n fp = self.__fileU.uncompress(lPath, outputDir=dirPath)\n ok = fp.endswith(\"chem_comp_model.cif\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_load(api):\n # upload file to file.io servers\n uploaded_file = api.upload(\n tag='test_file',\n expiry='1d',\n path='tests/test_file.txt'\n )\n\n # check that instance of FileIO has these fields\n assert uploaded_file.link\n assert uploaded_file.key\n assert uploaded_file.tag\n assert uploaded_file.path\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list\n\n # export the file in json format\n api.export('tests/exported.json')\n\n # check that exported file exists\n assert path.isfile('tests/exported.json')\n\n # set it to empty list\n api.file_obj_list = []\n\n # load the file in json format\n api.load('tests/exported.json')\n\n # remove the file\n remove('tests/exported.json')\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list\n\n # export the file in pkl format\n api.export('tests/exported.pkl')\n\n # set it to empty list\n api.file_obj_list = []\n\n # load the file in pkl format\n api.load('tests/exported.pkl')\n\n # remove exported.pkl file\n remove('tests/exported.pkl')\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list", "def main(\n app_url: str,\n user: str,\n password: str,\n paths: Tuple[Union[Path, str], ...],\n load_mode: str,\n mode: str,\n no_installed_keywords: bool,\n include: str,\n exclude: str,\n) -> None:\n client = Client(app_url, user, password)\n if mode == \"keywords\":\n rfhub_importer = KeywordsImporter(\n client, paths, no_installed_keywords, load_mode, include, exclude\n )\n loaded_collections, loaded_keywords = rfhub_importer.import_data()\n print(\n f\"\\nSuccessfully loaded {loaded_collections} collections with {loaded_keywords} keywords.\"\n )\n elif mode == \"statistics\":\n rfhub_importer = StatisticsImporter(client, paths)\n loaded_files, loaded_statistics = rfhub_importer.import_data()\n print(\n f\"\\nSuccessfully loaded {loaded_files} files with {loaded_statistics} statistics.\"\n )", "def test_csv_import_hotel_success(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n file = open(\"city.csv\")\n client = Client()\n client.force_login(user)\n client.post(path, {\"title\": \"city\", \"csv_file\": file})\n file = open(\"hotel.csv\")\n r = client.post(path, {\"title\": \"hotel\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"", "def ezimport(conn: BlitzGateway, target: str,\n project: Optional[Union[str, int]] = None,\n dataset: Optional[Union[str, int]] = None,\n screen: Optional[Union[str, int]] = None,\n ln_s: Optional[bool] = False, ann: Optional[dict] = None,\n ns: Optional[str] = None) -> Union[List[int], None]:\n\n imp_ctl = Importer(conn, target, project, dataset, screen,\n ln_s, ann, ns)\n imp_ctl.ezimport()\n if imp_ctl.screen:\n imp_ctl.get_plate_ids()\n imp_ctl.organize_plates()\n imp_ctl.annotate_plates()\n return imp_ctl.plate_ids\n\n else:\n imp_ctl.get_my_image_ids()\n imp_ctl.organize_images()\n imp_ctl.annotate_images()\n return imp_ctl.image_ids", "def test_import_string(self):\n assert utils.import_string('ttgn.pokedex.utils') == utils", "def test_regressions_imports(self):\n issue = {\n \"number\": \"main/main\",\n \"contract\": \"C\",\n \"txlimit\": 1,\n \"in_directory\": \"imports_issue\",\n }\n self._simple_cli_run(\n f'{issue[\"number\"]}.sol',\n contract=issue[\"contract\"],\n tx_limit=issue[\"txlimit\"],\n in_directory=issue.get(\"in_directory\"),\n )", "def test_is_url_from_local_instance_returns_true_if_url_is_from_local_instance(\n self,\n ):\n # Arrange / Act\n return_value = BlobDownloader(\n f\"{settings.SERVER_URI}/987653456789\"\n ).is_url_from_local_instance()\n # Assert\n self.assertEqual(return_value, True)", "def ezimport_ln_s(self) -> bool:\n\n cli = CLI()\n cli.register('import', ImportControl, '_')\n cli.register('sessions', SessionsControl, '_')\n cli.invoke(['import',\n '-k', self.conn.getSession().getUuid().val,\n '-s', self.conn.host,\n '-p', str(self.conn.port),\n '--transfer', 'ln_s',\n str(self.file_path)])\n if cli.rv == 0:\n self.imported = True\n print(f'Imported {self.file_path}')\n return True\n else:\n logging.error(f'Import of {self.file_path} has failed!')\n return False", "def test_import_process(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()", "def test_search_remote_connector_content(self):\n pass", "def import_realia(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('git pull')\n run('cd import_scripts;../bin/python import_realia.py load_fixture')\n run('bin/django update_index dasa.Realia')", "def test_import_infra(self):\n project = Project.create()\n # Read an engine and check\n infra = import_infra(\"A320.xml\", \"engine\")\n self.assertEqual(len(infra.engines), 1)\n engine = infra.engines[0]\n self.assertEqual(engine.name, \"Machine 0\")\n self.assertEqual(engine.hauteur, 0.0)\n # Local frame:\n self.assertEqual(engine.position.x, 0.0)\n self.assertEqual(engine.position.y, 0.0)\n self.assertEqual(engine.position.z, 0.0)\n\n # Read a building and check\n infra = import_infra(\"Building.xml\", \"building\")\n self.assertEqual(len(infra.buildings), 1)\n building = infra.buildings[0]\n self.assertEqual(building.name, \"MyBuilding\")\n self.assertEqual(building.hauteur, 0.0)\n # Local frame:\n self.assertEqual(building.position.x, 0.0)\n self.assertEqual(building.position.y, 0.0)\n self.assertEqual(building.position.z, 0.0)\n\n # Check a no radiant building is refused:\n try:\n infra = import_infra(\"Building_no_radiant.xml\", \"building\")\n except:\n print(\"Ok, non radiant building is refused as expected.\")\n else:\n print(\"Non radiant building should be refused.\")\n sys.exit(-1)", "def load(self, url):\n pass", "def load(self, url):\n pass", "def _run_offline_test(*args, **kwargs):\n import responses # noqa: F401", "def performImportToPortal(portal):\n\n globals()['import_out'] = StringIO()\n\n instance_ipath, product_ipath, temp_dir_path, product_file_names = copyToInstanceImport()\n\n if product_file_names:\n beforeimporting_objects = portal.objectItems()\n importToPortalRoot(portal, product_file_names, temp_dir_path)\n fixImportingIssues(portal, beforeimporting_objects)\n cleanInstanceImport(instance_ipath, product_file_names, temp_dir_path)\n else:\n print >> import_out, \"!!! Failure importing: there is no file for importing to be found.\"\n\n result = import_out.getvalue()\n\n del globals()['import_out']\n\n return result", "def test_retrieve_files_with_pre_hook(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n os.makedirs('/tmp/remote_pacha/localhost/pacha_pre')\n touch_script = open('/tmp/remote_pacha/localhost/pacha_pre/foo.sh', 'w')\n touch_script.write('''touch /tmp/remote_pacha/localhost/pre_got_executed.txt''')\n touch_script.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)\n self.assertTrue(os.path.isfile('/tmp/remote_pacha/localhost/pre_got_executed.txt'))", "def import_internal_locations_from_json(dump_file, include,\n rectype=\"internal_location\"):\n dump_file = dump_file[0]\n model, provider = model_provider_by_rectype(rectype)\n library_model, library_provider = model_provider_by_rectype(\"library\")\n\n include_ids = None if include is None else include.split(',')\n with click.progressbar(json.load(dump_file)) as bar:\n records = []\n for record in bar:\n click.echo('Importing internal location \"{0}({1})\"...'.\n format(record['legacy_id'], rectype))\n if include_ids is None or record['legacy_id'] in include_ids:\n # remove the library type as it is not a part of the data model\n library_type = record.pop(\"type\", None)\n record[\"legacy_id\"] = str(record[\"legacy_id\"])\n if library_type == \"external\":\n # if the type is external => ILL Library\n record = import_record(record, library_model,\n library_provider,\n legacy_id_key='legacy_id')\n records.append(record)\n else:\n location_pid_value, _ = \\\n current_app_ils.get_default_location_pid\n record[\"location_pid\"] = location_pid_value\n record = import_record(record, model, provider,\n legacy_id_key='legacy_id')\n records.append(record)\n # Index all new internal location and libraries records\n bulk_index_records(records)", "def test_package(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n db = connect()\n engine = db.connect() \n init_db(engine)\n update(engine)\n assert True", "def _remoteScript(self, source_script):", "def test_import(self, game=\"SuperMarioKart-Snes\"):\n self.assertTrue(game in retro.data.list_games())", "async def test_import_with_no_config(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker\n) -> None:\n create_mock_api_discovery(aioclient_mock, [(\"0.0.0.0\", \"1234\")])\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_IMPORT},\n data={\"host\": \"0.0.0.0\"},\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"link\"", "def test_download(self):\n pass", "def fetch_velib_auto():\n # This try statement guards against the lack of internet connection\n try:\n dat = get_velib_data()\n except URL.URLError as err:\n print \"URLError: No internet connection?\"\n return 0\n\n save_velib_data(dat, glob.datafile)", "def testZipUrl(self):\n try:\n remoteLocator = self.__zipFileUrl\n # fn = self.__fileU.getFileName(remoteLocator)\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n lPath = os.path.join(self.__workPath, self.__fileU.getFileName(self.__zipFileUrl))\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n fp = self.__fileU.uncompress(lPath, outputDir=self.__workPath)\n ok = fp.endswith(\"Food_Display_Table.xlsx\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def testRemote(self):\n try:\n remoteLocator = self.__httpsFileUrl\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n ok = self.__fileU.exists(remoteLocator)\n self.assertTrue(ok)\n size = self.__fileU.size(remoteLocator)\n self.assertGreaterEqual(size, 1000)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_retrieve_files_all(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_2)\n self.assertTrue(result_1)", "def _import_elmo():\n\n elmo = hub.Module('https://storage.googleapis.com/az-nlp/elmo_ru-news_wmt11-16_1.5M_steps.tar.gz',\n trainable=False) # news\n # elmo = hub.Module('https://storage.googleapis.com/az-nlp/elmo_ru-twitter_2013-01_2018-04_600k_steps.tar.gz',\n # trainable=False) # twitter\n print('❤️ ❤️ ❤️ DONE (re)importing Tensorflow hub.Module ')\n print('Tensorflow version is', tf.__version__)\n\n return elmo", "def test_extract_recipe_from_website(self):\n pass", "def test_load_taric(self):\n\n c = Client()\n response = c.get('/taric_books/taric/')\n\n self.assertEqual(response.status_code, 200)", "def test_documentation_popxl_remote_var(self):\n filename = \"remote_variable.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_steps(api):\n do_upload = api.properties.get('do_upload') == 'true'\n images = api.properties.get('images') == 'true'\n lotties = api.properties.get('lotties') == 'true'\n resources = api.properties.get('resources') == 'true'\n skps = api.properties.get('skps') == 'true'\n svgs = api.properties.get('svgs') == 'true'\n\n api.flavor.install(\n images=images,\n lotties=lotties,\n resources=resources,\n skps=skps,\n svgs=svgs,\n )\n\n use_hash_file = False\n if do_upload:\n host_dm_dir = str(api.flavor.host_dirs.dm_dir)\n api.flavor.create_clean_host_dir(api.path['start_dir'].join('test'))\n device_dm_dir = str(api.flavor.device_dirs.dm_dir)\n if host_dm_dir != device_dm_dir:\n api.flavor.create_clean_device_dir(device_dm_dir)\n\n # Obtain the list of already-generated hashes.\n hash_filename = 'uninteresting_hashes.txt'\n\n host_hashes_file = api.vars.tmp_dir.join(hash_filename)\n hashes_file = api.flavor.device_path_join(\n api.flavor.device_dirs.tmp_dir, hash_filename)\n api.run(\n api.python.inline,\n 'get uninteresting hashes',\n program=\"\"\"\nimport contextlib\nimport math\nimport socket\nimport sys\nimport time\n\nfrom urllib.request import urlopen\n\nHASHES_URL = sys.argv[1]\nRETRIES = 5\nTIMEOUT = 60\nWAIT_BASE = 15\n\nsocket.setdefaulttimeout(TIMEOUT)\nfor retry in range(RETRIES):\n try:\n with contextlib.closing(\n urlopen(HASHES_URL, timeout=TIMEOUT)) as w:\n hashes = w.read().decode('utf-8')\n with open(sys.argv[2], 'w') as f:\n f.write(hashes)\n break\n except Exception as e:\n print('Failed to get uninteresting hashes from %s:' % HASHES_URL)\n print(e)\n if retry == RETRIES:\n raise\n waittime = WAIT_BASE * math.pow(2, retry)\n print('Retry in %d seconds.' % waittime)\n time.sleep(waittime)\n \"\"\",\n args=[api.properties['gold_hashes_url'], host_hashes_file],\n # If this fails, we want to know about it because it means Gold is down\n # and proceeding onwards would take a very long time, but be hard to notice.\n abort_on_failure=True,\n fail_build_on_failure=True,\n infra_step=True)\n\n if api.path.exists(host_hashes_file):\n api.flavor.copy_file_to_device(host_hashes_file, hashes_file)\n use_hash_file = True\n\n # Find DM flags.\n args = json.loads(api.properties['dm_flags'])\n props = json.loads(api.properties['dm_properties'])\n args.append('--properties')\n # Map iteration order is arbitrary; in order to maintain a consistent step\n # ordering, sort by key.\n for k in sorted(props.keys()):\n v = props[k]\n if v == '${SWARMING_BOT_ID}':\n v = api.vars.swarming_bot_id\n elif v == '${SWARMING_TASK_ID}':\n v = api.vars.swarming_task_id\n if v != '':\n args.extend([k, v])\n\n # Paths to required resources.\n if resources:\n args.extend(['--resourcePath', api.flavor.device_dirs.resource_dir])\n if skps:\n args.extend(['--skps', api.flavor.device_dirs.skp_dir])\n if images:\n args.extend([\n '--images', api.flavor.device_path_join(\n api.flavor.device_dirs.images_dir, 'dm'),\n '--colorImages', api.flavor.device_path_join(\n api.flavor.device_dirs.images_dir, 'colorspace'),\n ])\n if svgs:\n # svg_dir is the root of the SVG corpus. Within that directory,\n # the *.svg inputs are in the 'svg' subdirectory. See skbug.com/11229\n args.extend(['--svgs', api.flavor.device_path_join(\n api.flavor.device_dirs.svg_dir, \"svg\")])\n if lotties:\n args.extend([\n '--lotties',\n api.flavor.device_path_join(\n api.flavor.device_dirs.resource_dir, 'skottie'),\n api.flavor.device_dirs.lotties_dir,\n ])\n\n if use_hash_file:\n args.extend(['--uninterestingHashesFile', hashes_file])\n if do_upload:\n args.extend(['--writePath', api.flavor.device_dirs.dm_dir])\n\n # Run DM.\n api.run(api.flavor.step, 'dm', cmd=args, abort_on_failure=False)\n\n if do_upload:\n # Copy images and JSON to host machine if needed.\n api.flavor.copy_directory_contents_to_host(\n api.flavor.device_dirs.dm_dir, api.flavor.host_dirs.dm_dir)\n # https://bugs.chromium.org/p/chromium/issues/detail?id=1192611\n if 'Win' not in api.vars.builder_cfg.get('os', ''):\n api.gold_upload.upload()", "def test_dag_load(self):\n # Run tests both for telescope with file suffixes and without\n for accounts in [None, {\"accounts\": [\"foo\", \"bar\"]}]:\n with self.subTest(accounts=accounts):\n env = ObservatoryEnvironment(\n self.project_id, self.data_location, api_host=self.host, api_port=self.api_port\n )\n with env.create():\n # Add Observatory API connection\n conn = Connection(\n conn_id=AirflowConns.OBSERVATORY_API, uri=f\"http://:password@{self.host}:{self.api_port}\"\n )\n env.add_connection(conn)\n\n # Add a Google Books telescope\n dt = pendulum.now(\"UTC\")\n telescope_type = orm.TelescopeType(\n name=\"Google Books Telescope\", type_id=TelescopeTypes.google_books, created=dt, modified=dt\n )\n env.api_session.add(telescope_type)\n organisation = orm.Organisation(name=\"anu-press\", created=dt, modified=dt)\n env.api_session.add(organisation)\n telescope = orm.Telescope(\n name=\"anu-press Google Books Telescope\",\n telescope_type=telescope_type,\n organisation=organisation,\n modified=dt,\n created=dt,\n extra=accounts,\n )\n env.api_session.add(telescope)\n env.api_session.commit()\n\n dag_file = os.path.join(module_file_path(\"oaebu_workflows.dags\"), \"google_books_telescope.py\")\n self.assert_dag_load(\"google_books_anu-press\", dag_file)", "def setUp(self):\n models.Connector.objects.create(\n identifier=\"openlibrary.org\",\n name=\"OpenLibrary\",\n connector_file=\"openlibrary\",\n base_url=\"https://openlibrary.org\",\n books_url=\"https://openlibrary.org\",\n covers_url=\"https://covers.openlibrary.org\",\n search_url=\"https://openlibrary.org/search?q=\",\n isbn_search_url=\"https://openlibrary.org/isbn\",\n )\n self.connector = Connector(\"openlibrary.org\")\n\n work_file = pathlib.Path(__file__).parent.joinpath(\"../data/ol_work.json\")\n edition_file = pathlib.Path(__file__).parent.joinpath(\"../data/ol_edition.json\")\n edition_md_file = pathlib.Path(__file__).parent.joinpath(\n \"../data/ol_edition_markdown.json\"\n )\n edition_list_file = pathlib.Path(__file__).parent.joinpath(\n \"../data/ol_edition_list.json\"\n )\n self.work_data = json.loads(work_file.read_bytes())\n self.edition_data = json.loads(edition_file.read_bytes())\n self.edition_md_data = json.loads(edition_md_file.read_bytes())\n self.edition_list_data = json.loads(edition_list_file.read_bytes())", "def load_part(self, partname, remoteclassname):\n success = False\n logger.info(u\"{} Loading of part: {}\".format(self.uid, partname))\n try:\n\n module = importlib.import_module(\"parts.{p}.{p}Remote\".format(\n p=partname))\n logger.info(\n le2mtrans(u\"{j} Module parts.{p}.{p}Remote loaded\").format(\n j=self.uid, p=partname))\n rem_temp = getattr(module, remoteclassname)\n remote = rem_temp(self)\n self._remotes[partname] = remote\n logger.info(u\"{} Part {} loaded successfully\".format(\n self.uid, partname))\n success = True\n\n except (KeyError, ImportError, AttributeError) as e:\n logger.critical(\n u\"{} Error while loading part: {}\".format(self.uid, e.message))\n\n finally:\n return success", "def test_import_process(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_xml_data_test()", "def test_remote_plugin(self):\n plugin_name = 'Slack'\n Plugin.download_plugin(plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['info'][1] == os.path.join(target, '__init__.py')", "def test_load(self):\n command = constituencies.Command()\n command.handle('load', silent=True)", "def reinit_tripwire_from_splunk():\n lhosts=fabric.api.sudo(\"cat /opt/splunk/var/run/splunk/tripwire_epure.txt |cut -d' ' -f4|grep -v 'Status'|sort |uniq|tr '\\n' ' '\")\n #execute(_un_trip,hosts='opsips02s')\n execute(_un_trip,hosts=lhosts.split(' '))", "def test_dag_load(self):\n\n with ObservatoryEnvironment().create():\n dag_file = os.path.join(module_file_path(\"academic_observatory_workflows.dags\"), \"unpaywall_telescope.py\")\n self.assert_dag_load(\"unpaywall\", dag_file)", "def load_rentedout():", "def import_from_url(jamsite, url, fieldnames=None):\n\t# import csv, from the webz.\n\tcsvfile = fetch_csv_from_url(url)\n\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )", "def _test__import_api(self, response):\n requests_get_mock = MagicMock()\n requests_get_mock.return_value.status_code = 200\n requests_get_mock.return_value.text = response\n with patch(\"requests.get\", requests_get_mock):\n proxmox._import_api()\n self.assertEqual(proxmox.api, [{\"info\": {}}])\n return", "def test_autoload_session(session: CloudShellAPISession, autoload_resource: ResourceInfo, dut: List[str]) -> None:\n session.AutoLoad(autoload_resource.Name)\n resource_details = session.GetResourceDetails(autoload_resource.Name)\n assert len(resource_details.ChildResources) == 1\n assert resource_details.ChildResources[0].FullAddress == f\"{dut[0]}/M1\"", "def import_locations(type_slug, zip_url):\n\n require('environment', provided_by=env.environments)\n locations_dir = '/tmp/fab_location_importer'\n if files.exists(locations_dir):\n sudo('rm -rf %s' % locations_dir, user=env.deploy_user)\n sudo('mkdir %s' % locations_dir, user=env.deploy_user)\n cmd = 'PYTHONPATH=%(code_root)s '\\\n 'DJANGO_SETTINGS_MODULE=openrural.local_settings '\\\n '%(virtualenv_root)s/bin/import_locations' % env\n with cd(locations_dir):\n sudo('wget -O locations.zip %s' % zip_url, user=env.deploy_user)\n sudo('unzip -d locations locations.zip', user=env.deploy_user)\n sudo(' '.join([cmd, type_slug, 'locations']), user=env.deploy_user)", "def test_remote_ref(tmp_path, _clean_remote_schemas_store):\n # Create file\n directory = tmp_path / \"base\"\n directory.mkdir()\n schemas_file = directory / \"original.json\"\n remote_schemas_file = directory / \"remote.json\"\n remote_schemas_file.write_text('{\"Table\": {\"key\": \"value\"}}')\n # Set up remote schemas store\n ref.set_context(path=str(schemas_file))\n schemas = {\"RefTable\": {\"$ref\": \"remote.json#/Table\"}}\n model_factory = mock.MagicMock()\n\n define_all.define_all(model_factory=model_factory, schemas=schemas)", "def test_retrieve_files_move_existing_file(self):\n os.makedirs('/tmp/remote_pacha/localhost/etc')\n os.mkdir('/tmp/remote_pacha/localhost/home')\n remote_file = open('/tmp/remote_pacha/localhost/etc/etc.conf', 'w')\n remote_file.write(\"remote second file\")\n remote_file.close()\n remote_file = open('/tmp/remote_pacha/localhost/home/home.conf', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n server = \"%s@%s\" % (self.username, host.hostname()) \n os.mkdir('/tmp/localhost')\n\n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha')\n run.retrieve_files()\n result_1 = os.path.isfile('/tmp/localhost/etc/etc.conf')\n result_2 = os.path.isfile('/tmp/localhost/home/home.conf')\n result_3 = os.path.isdir('/tmp/localhost.%s' % strftime('%H%M%s'))\n line = open('/tmp/localhost/etc/etc.conf')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote second file\")\n self.assertTrue(result_3)\n self.assertTrue(result_2)\n self.assertTrue(result_1)", "def test_is_url_from_local_instance_returns_false_if_url_is_not_from_local_instance(\n self,\n ):\n # Arrange / Act\n return_value = BlobDownloader(\n \"http://google.com\"\n ).is_url_from_local_instance()\n # Assert\n self.assertEqual(return_value, False)", "def test_source_from_url(self):\n TEST_URI = \"http://testme.com/test.cfg\"\n httpretty.register_uri(httpretty.GET, TEST_URI, body=http_manifest)\n m = load_manifest(TEST_URI)\n assert m.source() == TEST_URI", "def test_importable():\n root_path = os.path.dirname(MY_DIRECTORY)\n\n for version in versioning.get_all_versions():\n v = version.label.replace(\".\", \"_\")\n path = os.path.join(root_path, v)\n module_names = [m[:-3] for m in os.listdir(path) if m.endswith(\".py\")]\n for name in module_names:\n m = importlib.import_module(\".\".join([\"kuber\", v, name]))\n assert m is not None, f\"Expected kuber.{v}.{m} to be importable.\"", "def load_satellite_endpoint():\n pass", "def _import_config(handle, file_name, file_location=\"ucscentral\",\n file_dir=None, merge=True, protocol=None,\n hostname=\"localhost\",\n username=None, password=\"\", timeout=120):\n\n from ..mometa.top.TopSystem import TopSystem\n from ..mometa.mgmt.MgmtDataImporter import MgmtDataImporter, \\\n MgmtDataImporterConsts\n\n if not file_name:\n raise UcscValidationException(\"Missing file_name argument\")\n\n if file_location != \"ucscentral\":\n if not file_dir:\n raise UcscValidationException(\"Missing file_dir argument\")\n\n if (not file_name.endswith('.tgz')):\n raise UcscValidationException(\"file_name must be .tgz format\")\n\n top_system = TopSystem()\n\n if file_location == \"remote\":\n file_path = os.path.join(file_dir, file_name)\n _validate_remote_host_args(protocol, hostname, username, password)\n mgmt_importer = MgmtDataImporter(\n parent_mo_or_dn=top_system,\n hostname=hostname,\n remote_file=file_path,\n proto=protocol,\n user=username,\n pwd=password,\n admin_state=MgmtDataImporterConsts.ADMIN_STATE_ENABLED)\n\n elif file_location == \"local\":\n file_path = os.path.join(file_dir, file_name)\n if not os.path.exists(file_path):\n raise UcscOperationError(\"Import config\",\n \"Backup File '%s' not found\" %\n file_path)\n mgmt_importer = MgmtDataImporter(\n parent_mo_or_dn=top_system,\n hostname=\"localhost\",\n remote_file='/' + file_name,\n proto=MgmtDataImporterConsts.PROTO_HTTP,\n admin_state=MgmtDataImporterConsts.ADMIN_STATE_ENABLED)\n\n elif file_location == \"ucscentral\":\n if not _is_backup_file_on_server(handle, \"ucs-central\", file_name):\n raise UcscOperationError(\"Import config\",\n \"Backup File '%s' not found \"\n \"on UcsCentral\" % file_name)\n mgmt_importer = MgmtDataImporter(\n parent_mo_or_dn=top_system,\n hostname=\"localhost\",\n remote_file='/ucs-central/cfg-backups/' + file_name,\n proto=MgmtDataImporterConsts.PROTO_TFTP,\n admin_state=MgmtDataImporterConsts.ADMIN_STATE_ENABLED)\n\n else:\n raise UcscOperationError(\n \"Import config\",\n \"Invalid file_location argument.\"\n \"It must be either ucscentral,local or remote\")\n\n if merge:\n mgmt_importer.action = MgmtDataImporterConsts.ACTION_MERGE\n else:\n mgmt_importer.action = MgmtDataImporterConsts.ACTION_REPLACE\n\n if file_location == \"local\":\n try:\n log.debug(\"Start uploading config\")\n uri_suffix = \"operations/file-%s/importconfig.txt?Cookie=%s\" % (\n file_name, handle.cookie)\n handle.file_upload(url_suffix=uri_suffix,\n file_dir=file_dir,\n file_name=file_name)\n\n except Exception as err:\n UcscWarning(str(err))\n raise UcscOperationError(\"Upload config\", \"upload failed\")\n\n handle.add_mo(mgmt_importer, modify_present=True)\n handle.commit()\n\n duration = timeout\n poll_interval = 2\n log.debug(\"Importing UcsCentral config\")\n while True:\n mgmt_importer = handle.query_dn(dn=mgmt_importer.dn)\n admin_state = mgmt_importer.admin_state\n\n # Break condition:- if state id disabled then break\n if admin_state == MgmtDataImporterConsts.ADMIN_STATE_DISABLED:\n break\n\n time.sleep(min(duration, poll_interval))\n duration = max(0, (duration - poll_interval))\n if duration == 0:\n raise UcscOperationError(\n \"Import config\", \"operation timed out\")\n\n if mgmt_importer.over_all_status != \\\n MgmtDataImporterConsts.OVER_ALL_STATUS_ALL_SUCCESS:\n raise UcscOperationError(\n \"Import config\",\n (\"operational status %s \" % mgmt_importer.over_all_status))\n\n log.debug(\"Import config to UcsCentral was successfull\")\n return mgmt_importer", "def test_unfetchable_url(self):\r\n url = u'file://test.html'\r\n read = readable.ReadUrl.parse(url)\r\n self.assertEqual(read.status, 901)", "def test_download2(self):\n pass", "def test_dag_load(self):\n\n with ObservatoryEnvironment().create():\n dag_file = os.path.join(module_file_path(\"academic_observatory_workflows.dags\"), \"openalex_telescope.py\")\n self.assert_dag_load(\"openalex\", dag_file)", "def test_get(self):\n response = self.client.get(\n reverse(\n 'projectroles:api_remote_get',\n kwargs={'secret': REMOTE_SITE_SECRET},\n )\n )\n self.assertEqual(response.status_code, 200)\n expected = self.remote_api.get_source_data(self.target_site)\n response_dict = json.loads(response.content.decode('utf-8'))\n self.assertEqual(response_dict, expected)", "def test_retrieve_files_single(self):\n os.makedirs('/tmp/remote_pacha/localhost/another_dir')\n os.makedirs('/tmp/remote_pacha/localhost/single_dir')\n remote_file = open('/tmp/remote_pacha/localhost/single_dir/remote.txt', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n self.assertTrue(os.path.isfile('/tmp/remote_pacha/localhost/single_dir/remote.txt'))\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha',\n directory='single_dir')\n run.retrieve_files()\n result = os.path.isfile('/tmp/localhost/single_dir/remote.txt')\n line = open('/tmp/localhost/single_dir/remote.txt')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote file\")\n self.assertTrue(result)", "async def test_bridge_import_already_configured(hass: HomeAssistant) -> None:\n MockConfigEntry(\n domain=\"hue\", unique_id=\"aabbccddeeff\", data={\"host\": \"0.0.0.0\"}\n ).add_to_hass(hass)\n\n result = await hass.config_entries.flow.async_init(\n const.DOMAIN,\n context={\"source\": config_entries.SOURCE_IMPORT},\n data={\"host\": \"0.0.0.0\", \"properties\": {\"id\": \"aa:bb:cc:dd:ee:ff\"}},\n )\n\n assert result[\"type\"] == \"abort\"\n assert result[\"reason\"] == \"already_configured\"", "def test_load_edition_data(self):\n key = \"OL1234W\"\n responses.add(\n responses.GET,\n \"https://openlibrary.org/works/OL1234W/editions\",\n json={\"hi\": \"there\"},\n )\n result = self.connector.load_edition_data(key)\n self.assertEqual(result, {\"hi\": \"there\"})", "def test_file_managed_http_source_skip_verify(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=True)\n assert ret.result is True", "def candidates_import_from_master_server(\n request, google_civic_election_id='', state_code=''): # Consumes candidatesSyncOut\n\n import_results, structured_json = process_request_from_master(\n request, \"Loading Candidates from We Vote Master servers\",\n CANDIDATES_SYNC_URL,\n {\n \"key\": WE_VOTE_API_KEY, # This comes from an environment variable\n \"google_civic_election_id\": str(google_civic_election_id),\n \"state_code\": state_code,\n }\n )\n\n if import_results['success']:\n # results = filter_candidates_structured_json_for_local_duplicates(structured_json)\n # filtered_structured_json = results['structured_json']\n # duplicates_removed = results['duplicates_removed']\n # import_results = candidates_import_from_structured_json(filtered_structured_json)\n import_results = candidates_import_from_structured_json(structured_json)\n # import_results['duplicates_removed'] = duplicates_removed\n import_results['duplicates_removed'] = 0\n\n import2_results, structured_json = process_request_from_master(\n request, \"Loading Candidate to Office Links from We Vote Master servers\",\n \"https://api.wevoteusa.org/apis/v1/candidateToOfficeLinkSyncOut/\",\n {\n \"key\": WE_VOTE_API_KEY, # This comes from an environment variable\n \"google_civic_election_id\": str(google_civic_election_id),\n \"state_code\": state_code,\n }\n )\n\n if import2_results['success']:\n import2_results = candidate_to_office_link_import_from_structured_json(structured_json)\n\n return import_results", "def internal_locations(source, include):\n with commit():\n import_internal_locations_from_json(source, include=include)", "def fixture_example_data():\n import_example_data()", "def letsgo(download, host, ssporderid_ordernumber, debug, incremental, import_file):\n global is_debug_mode\n is_debug_mode = debug\n setup_db_login(host)\n\n setup_ssporder_id(ssporderid_ordernumber)\n if download:\n download_from_remote(host)\n replace_pmpid_to_local_pmpid()\n else:\n click.echo('skip download')\n if incremental:\n click.echo('incremental importing')\n else:\n delete_local()\n if import_file:\n import_all_to_local()\n else:\n click.echo('skip import file')", "def _import_url(self, url):\n # Caller's responsibility to ensure URL points\n # someplace safe/sane!\n req = urlrq.Request(url, headers={\"User-Agent\": \"sphobjinv URL/\" + soi_version})\n resp = urlrq.urlopen(req, context=self._sslcontext) # noqa: S310\n b_str = resp.read()\n\n # Plaintext URL D/L is unreliable; zlib only\n return self._import_zlib_bytes(b_str)", "def test_csv_import_city(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n file = open(\"city.csv\")\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"title\": \"city\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"", "def import_file(factory, dir, file):\n if file[-4:]!='.deb' and file[-5:]!='.udeb':\n log.msg(\"Ignoring (unknown file type):\"+ file, 'import')\n return\n \n log.debug(\"considering: \" + dir + '/' + file, 'import')\n try:\n paths = get_mirror_path(factory, dir+'/'+file)\n except SystemError:\n log.msg(file + ' skipped - wrong format or corrupted', 'import')\n return\n if paths:\n if len(paths) != 1:\n log.debug(\"WARNING: multiple ocurrences\", 'import')\n log.debug(str(paths), 'import')\n cache_path = paths[0]\n else:\n log.debug(\"Not found, trying to guess\", 'import')\n info = AptDpkgInfo(dir+'/'+file)\n cache_path = closest_match(info,\n get_mirror_versions(factory, info['Package']))\n if cache_path:\n log.debug(\"MIRROR_PATH:\"+ cache_path, 'import')\n src_path = dir+'/'+file\n dest_path = factory.cache_dir+cache_path\n \n if not os.path.exists(dest_path):\n log.debug(\"IMPORTING:\" + src_path, 'import')\n dest_path = re.sub(r'/\\./', '/', dest_path)\n if not os.path.exists(dirname(dest_path)):\n os.makedirs(dirname(dest_path))\n f = open(dest_path, 'w')\n fcntl.lockf(f.fileno(), fcntl.LOCK_EX)\n f.truncate(0)\n shutil.copy2(src_path, dest_path)\n f.close()\n if hasattr(factory, 'access_times'):\n atime = os.stat(src_path)[stat.ST_ATIME]\n factory.access_times[cache_path] = atime\n log.msg(file + ' imported', 'import')\n else:\n log.msg(file + ' skipped - already in cache', 'import')\n\n else:\n log.msg(file + ' skipped - no suitable backend found', 'import')", "def testPreProcessedImport(self):\n a = 'a.mojom'\n self.WriteFile(a, \"\"\"\\\n module a;\n struct Bar {};\"\"\")\n self.ParseMojoms([a])\n\n b = 'b.mojom'\n self.WriteFile(\n b, \"\"\"\\\n module b;\n import \"a.mojom\";\n struct Foo { a.Bar bar; };\"\"\")\n self.ParseMojoms([b])", "def test_import(self):\n self.assertTrue(NagiosPerfdataCollector)", "def test_api_river(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load rivers from url specified in api base\n r = requests.get(r['rivers']).json()\n r = requests.get(r['rivers'][0]['url']).json()\n self.assertIn('html', r)\n self.assertIn('url', r)\n self.assertIn('sections', r)\n self.assertIn('gages', r)\n self.assertIn('regions', r)\n self.assertIn('tributaries', r)", "def import_list(ctx, list_path):\n with open(list_path, 'r') as fobj:\n migrator.import_list(ctx.obj[\"sceptre_dir\"], ctx.obj[\"options\"], fobj)" ]
[ "0.6129432", "0.5878348", "0.58344084", "0.5827213", "0.5798477", "0.57732666", "0.57479435", "0.56994355", "0.5604567", "0.5604356", "0.56015086", "0.56014234", "0.55989224", "0.5594116", "0.5569717", "0.55627763", "0.5552308", "0.55374223", "0.55238146", "0.55180776", "0.54787135", "0.54232985", "0.54134357", "0.540596", "0.5387352", "0.5377429", "0.53736705", "0.53656256", "0.53575766", "0.5354557", "0.5327704", "0.53052074", "0.5302559", "0.5283114", "0.5281536", "0.526923", "0.52637935", "0.52602035", "0.52592796", "0.5258732", "0.5255397", "0.5254019", "0.52528125", "0.52528125", "0.52348846", "0.52255714", "0.52252287", "0.52240306", "0.5216139", "0.52125084", "0.5197934", "0.5194237", "0.519137", "0.5183595", "0.5179064", "0.5172336", "0.51678157", "0.5163407", "0.51535887", "0.5127502", "0.5124506", "0.5117653", "0.5113096", "0.51123875", "0.5111189", "0.5107541", "0.51009023", "0.5096892", "0.5091712", "0.508839", "0.50880164", "0.50873137", "0.5087206", "0.50760996", "0.5068735", "0.50685287", "0.50655085", "0.50650036", "0.50623417", "0.50581706", "0.5057043", "0.5049273", "0.5049224", "0.5048823", "0.5043863", "0.504286", "0.5024912", "0.5015114", "0.501142", "0.5000012", "0.4998966", "0.4998194", "0.49961278", "0.49959555", "0.49927503", "0.49923745", "0.49889854", "0.49880895", "0.49863836", "0.49838012", "0.4982883" ]
0.0
-1
Tests hotels which their cities aren't in database can not get imported form remote file
def test_setting_csv_hotel_fail(self): from django.contrib.messages import get_messages path = reverse("setting-csv") user = mixer.blend(User, is_staff=True, is_superuser=True) client = Client() client.force_login(user) r = client.post(path, {"title": "hotel", "url": "http://rachel.maykinmedia.nl/djangocase/hotel.csv", "username": "python-demo", "password": "claw30_bumps", "save": "on"}) messages = list(get_messages(r.wsgi_request)) assert r.status_code == 200 assert len(messages) > 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def test_get_bad_location_data(self):\n city_name = 'notarealplace'\n response = await self.http_client.fetch(request=HTTPRequest(\n url=self.get_url(path=\"/location-data/{}\".format(city_name)),\n method='GET'\n ), raise_error=False)\n self.assertEqual(response.code, HTTPStatus.BAD_REQUEST, \"Incorrect response for an unknown city\")", "async def test_get_location_data(self):\n for city_name in ['dublin', 'London', 'Copenhagen']:\n response = await self.http_client.fetch(request=HTTPRequest(\n url=self.get_url(path=\"/location-data/{}\".format(city_name)),\n method='GET'\n ))\n self.assertEqual(response.code, HTTPStatus.OK)\n self.check_city_response(response, city_name.lower())", "def test_unknown_countries(self):\n # Currently, there are no Countries or Regions\n self.assertEqual(Country.objects.count(), 0)\n self.assertEqual(Region.objects.count(), 0)\n\n # Call the command with countries that are not recognized by the iso3166 library\n self.call_command(filename='power_plant_import/tests/data/unknown_countries.csv')\n\n # No Countries or Regions were created during the test\n self.assertEqual(Country.objects.count(), 0)\n self.assertEqual(Region.objects.count(), 0)", "def test_csv_import_hotel_fail(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n client = Client()\n client.force_login(user)\n file = open(\"hotel.csv\")\n r = client.post(path, {\"title\": \"hotel\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) >= 1\n for message in messages:\n assert \"can not import\" in str(message)", "def test_create_hotel(self):\n amsterdam = City.objects.get(name=\"Amsterdam\")\n ibis = Hotel.objects.get(name=\"Ibis\")\n\n self.assertEqual(ibis.city, amsterdam)\n self.assertEqual(ibis.code, \"AMS01\")\n self.assertEqual(ibis.name, \"Ibis\")", "def load_random_cities(data):\n cities = list(set([elem['name'] for elem in data]))\n city_objects = [City(data=city) for city in cities]\n City.objects.bulk_create(city_objects)", "def test_csv_import_city(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n file = open(\"city.csv\")\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"title\": \"city\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"", "def GetWorldCities():\n return GetDataFromCsvFile('world_cities.csv')", "def load_restaurants(city):\n session = connect_db()\n # Start offset at 0 to return the first 20 results from Yelp API request\n offset = 0\n\n # Get total number of restaurants for this city\n bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n result_len = 20\n \n # Get all restaurants for a city and load each restaurant into the database\n # Note: Yelp has a limitation of 1000 for accessible results, so get total results\n # if less than 1000 or get only 1000 results back even if there should be more\n while (1000 > offset) and (result_len==20):\n results = search(bearer_token, 'restaurant', city, offset)\n result_len = len(results['businesses'])\n\n # API response returns a SearchResponse object with accessible attributes\n # response.businesses returns a list of business objects with further attributes\n for business in results['businesses']:\n biz = get_business(bearer_token, business['id'])\n try:\n table.insert(biz)\n except DuplicateKeyError:\n print 'DUPS!'\n\n hour_start_monday = None\n hour_end_monday = None \n hour_start_tuesday = None\n hour_end_tuesday = None\n hour_start_wednesday = None\n hour_end_wednesday = None \n hour_start_thursday = None\n hour_end_thursday = None \n hour_start_friday = None\n hour_end_friday = None \n hour_start_saturday = None\n hour_end_saturday = None \n hour_start_sunday = None\n hour_end_sunday = None\n try:\n yelp_price_level = biz['price']\n except:\n yelp_price_level = None\n try:\n hours_type = biz['hours'][0]['hours_type']\n is_open_now = biz['hours'][0]['is_open_now']\n for item in biz['hours'][0]['open']:\n if item['day'] == 1:\n hour_start_tuesday = item['start']\n hour_end_tuesday = item['end']\n elif item['day'] == 0:\n hour_start_monday = item['start']\n hour_end_monday = item['end']\n elif item['day'] == 2:\n hour_start_wednesday = item['start']\n hour_end_wednesday = item['end']\n elif item['day'] == 3:\n hour_start_thursday = item['start']\n hour_end_thursday = item['end']\n elif item['day'] == 4:\n hour_start_friday = item['start']\n hour_end_friday = item['end']\n elif item['day'] == 5:\n hour_start_saturday = item['start']\n hour_end_saturday = item['end']\n elif item['day'] == 6:\n hour_start_sunday = item['start']\n hour_end_sunday = item['end']\n except:\n hours_type = None\n is_open_now = None\n hour_start_monday = None\n hour_end_monday = None \n hour_start_tuesday = None\n hour_end_tuesday = None\n hour_start_wednesday = None\n hour_end_wednesday = None \n hour_start_thursday = None\n hour_end_thursday = None \n hour_start_friday = None\n hour_end_friday = None \n hour_start_saturday = None\n hour_end_saturday = None \n hour_start_sunday = None\n hour_end_sunday = None\n restaurant = Restaurant(\n yelp_id = business['id'],\n yelp_rating = biz['rating'],\n yelp_review_count = biz['review_count'],\n name = biz['name'],\n phone = biz['phone'],\n yelp_url = biz['url'],\n yelp_price_level = yelp_price_level,\n latitude = biz['coordinates']['latitude'],\n longitude = biz['coordinates']['longitude'],\n hours_type = hours_type,\n is_open_now = is_open_now,\n hour_start_monday = hour_start_monday,\n hour_end_monday = hour_end_monday,\n hour_start_tuesday = hour_start_tuesday,\n hour_end_tuesday = hour_end_tuesday,\n hour_start_wednesday = hour_start_wednesday,\n hour_end_wednesday = hour_end_wednesday, \n hour_start_thursday = hour_start_thursday,\n hour_end_thursday = hour_end_thursday, \n hour_start_friday = hour_start_friday,\n hour_end_friday = hour_end_friday, \n hour_start_saturday = hour_start_saturday,\n hour_end_saturday = hour_end_saturday, \n hour_start_sunday = hour_start_sunday,\n hour_end_sunday = hour_end_sunday, \n is_closed = biz['is_closed'],\n categories = biz['categories'][0]['alias'],\n display_phone = biz['display_phone'],\n location = ' '.join(biz['location']['display_address']),\n location_city = biz['location']['city'],\n location_state = biz['location']['state'],\n location_zip_code = biz['location']['zip_code'],\n location_city_id = biz['location']['city'] + ', ' + biz['location']['state'])\n session.merge(restaurant)\n # Yelp returns only 20 results each time, so need to offset by 20 while iterating\n offset += 20\n print('current offset: ', offset)\n session.commit()", "def test_client_nationlities_list(self):\n pass", "def test_loading(self):\n self.assertIsInstance(self.data.districts, list)", "def _import_insee_city(self, cr, uid, ids, data_dir, context=None):\n if context is None:\n context = {}\n filepath = os.path.abspath(os.path.join(data_dir, 'comsimp2011.csv'))\n city_obj = self.pool.get('insee.city')\n department_obj = self.pool.get('insee.department')\n with open(filepath, 'rb') as cityfile:\n reader = csv.DictReader(cityfile)\n for row in reader:\n args = [('dep', '=', row['DEP'])]\n department_ids = department_obj.search(cr, uid, args)\n department_id = department_ids and department_ids[0] or None\n ncc = row['ARTMAJ'] and row['ARTMAJ'].strip(\"()\") + \\\n row['NCC'] or row['NCC']\n nccenr = row['ARTMIN'] and row['ARTMIN'].strip(\"()\") + \\\n row['NCCENR'] or row['NCCENR']\n values = {\n 'cdc': row['CDC'],\n 'cheflieu': row['CHEFLIEU'],\n 'reg': row['REG'],\n 'dep': row['DEP'],\n 'department_id': department_id,\n 'com': row['COM'],\n 'ar': row['AR'],\n 'ct': row['CT'],\n 'tncc': row['TNCC'],\n 'artmaj': row['ARTMAJ'],\n 'ncc': ncc,\n 'artmin': row['ARTMIN'],\n 'nccenr': nccenr,\n }\n city_obj.create(cr, uid, values, context=context)", "def test_search(self):\n from importCsv.models import City, Hotel\n path = reverse(\"search\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n city = mixer.blend(City, abbrev=\"tes\", name=\"test\")\n mixer.blend(Hotel, city=city, data=\"testData\", name=\"test hotel\")\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"tes\": \"on\"})\n assert r.status_code == 200\n assert r.content.find(b'test hotel')", "def test_missing_data_sources(self):", "def GetUsCities():\n return GetDataFromCsvFile('us_cities.csv')", "def cities(self):\n from models.engine.file_storage import FileStorage\n from models.city import City\n fs = FileStorage.all(City)\n city_list = []\n for key, value in fs.items():\n if 'City' in key and self.id == value.state_id:\n '''Append City instances maybe fucked up here!!!'''\n city_list.append(value)\n return city_list", "def audit_city(osmfile):\r\n suburb_list_wrong = defaultdict(set)\r\n city_file = open(osmfile, encoding=\"utf8\")\r\n \r\n for event, elem in ET.iterparse(city_file, events=(\"start\",)):\r\n \r\n if elem.tag == \"node\" or elem.tag == \"way\":\r\n \r\n for tag in elem.iter(\"tag\"):\r\n \r\n if tag.attrib['k'] == 'addr:city':\r\n \r\n city = tag.attrib['v']\r\n # province = re.sub(\" \", \"\", tag.attrib['v'].strip())\r\n if city not in expected_suburb:\r\n \r\n suburb_list_wrong[city].add(city)\r\n \r\n city_file.close()\r\n return suburb_list_wrong", "def test_csv_import_hotel_success(self):\n from django.contrib.messages import get_messages\n path = reverse(\"import-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n file = open(\"city.csv\")\n client = Client()\n client.force_login(user)\n client.post(path, {\"title\": \"city\", \"csv_file\": file})\n file = open(\"hotel.csv\")\n r = client.post(path, {\"title\": \"hotel\", \"csv_file\": file})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"", "def load_data(city, month, day):\n if city == 'chicago':\n city_df = pd.read_csv('chicago.csv')\n elif city == 'new york city':\n city_df = pd.read_csv('new_york_city.csv')\n else:\n # city_df = pd.read_csv('washington.csv')\n print(\"else is running\")\n\n print(city_df.head())\n\n return city_df", "def test_get_city_notfound(client):\n response = client.get(\"/weather/curitoba\")\n # Validate the response\n print(response.data)\n assert b\"200\" not in response.data", "def tearDown(self):\n del self.my_city", "def load_venues():\n\n print('load_venues')\n\n Venue.query.delete()\n\n for row in open(\"seed_data/venues.csv\"):\n row = row.rstrip()\n subcategory, \\\n created_by, \\\n title, \\\n addr_1, \\\n addr_2, \\\n city, \\\n postal_code, \\\n state = row.split(',')\n\n cat_sub = Category_Subcategory.query.filter_by(name=subcategory).first()\n\n vnu = Venue(subcategory_id=cat_sub.id,\n created_by=created_by,\n name=title,\n addr_1=addr_1,\n addr_2=addr_2,\n city=city,\n postal_code=postal_code,\n state=state)\n\n db.session.add(vnu)\n\n db.session.commit()", "def load_data(city, month, day ,city_num, month_num, day_num):\r\n try:\r\n df = pd.read_csv(CITY_DATA[city])\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n df['End Time'] = pd.to_datetime(df['End Time'])\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n df['hour'] = df['Start Time'].dt.hour\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n df = df[df['month'] == month_num]\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n\r\n df = df[df['day_of_week'].str.contains(day.title())]\r\n return df\r\n except Exception as e:\r\n print('An exception has been occurred during loading data: {}'.format(e))", "def test_bulk_locations(self):\n # do twice to check if it really updates\n lengths = []\n for i in range(2):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_201_CREATED, (\n responses.get(res.status_code, res.status_code), res.content)\n lengths.append(len(AdministrativeLocation.objects.all()))\n\n assert lengths[0] == lengths[1]\n\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations_duplicates)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def test_correct_data_under_places(self):\n load_to_datastore(self.places_sofia, self.metadata_sofia)\n CommonAssertions.check_correct_data_under_places(tester=self, places=self.places_sofia,\n metadata=self.metadata_sofia)", "def load_data(city, month, day):\n if city == 'new york city':\n df=pd.read_csv(\"./new_york_city.csv\")\n else: \n df=pd.read_csv(\"./\" + city + \".csv\")\n df['Start Time']=pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.dayofweek\n df['hour'] =df['Start Time'].dt.hour\n if month !='all':\n df=df[df['month']==months_list[month]]\n if day != 'all':\n df=df[df['day']==days_list[day]]\n \n return df", "def test_find_cities(self):\n\n # Given\n game_state: CarcassonneGameState = CarcassonneGameState()\n\n city_one_side_straight_road = base_tiles[\"city_top_straight_road\"].turn(3)\n city_with_road = inns_and_cathedrals_tiles[\"ic_15\"].turn(3)\n\n game_state.board = [[None for column in range(2)] for row in range(1)]\n\n game_state.board[0][0] = city_with_road\n game_state.board[0][1] = city_one_side_straight_road\n\n # When\n cities: [City] = CityUtil.find_cities(\n game_state=game_state,\n coordinate=Coordinate(0, 0)\n )\n\n # Then\n self.assertEqual(1, len(cities))\n self.assertEqual(2, len(cities[0].city_positions))\n self.assertTrue(cities[0].finished)", "def load_data(city, month, day):\n data_to_use = CITY_DATA[city]\n df = pd.read_csv(data_to_use)\n # drop rows containing NAN fields\n df2 = df.dropna()\n\n # Ensure the Start and End Time are Date\n pd.to_datetime(df2['Start Time'])\n pd.to_datetime(df2['End Time'])\n df = df2.sort_values(by='Start Time')\n\n # For each Start Time create additional columns to store year, month, day_of_week and hour\n # df['Start Year'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'year'))\n df['Start Month'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'month'))\n df['Start Day'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'day_of_week'))\n df['Start Hour'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'hour'))\n\n # filter month if month is not all\n if month.title() != 'All':\n df = df.loc[df['Start Month'] == month.title()]\n\n # filter day if day is not all\n if day.title() != 'All':\n df = df.loc[df['Start Day'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start, end Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n # Calculate the travel time per trip and add that column to data frame.\n df['Travel Time'] = df['End Time'] - df['Start Time']\n\n # extract month and day of week from Start Time to create new columns\n df['Start Hour'] = df['Start Time'].dt.hour\n df['End Hour'] = df['End Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month='all', day='all'):\n\n df = pd.read_csv(CITY_DATA[city.lower()]).rename(columns={'Unnamed: 0': 'Trip Id'})\n cols = df.columns\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day of Week'] = df['Start Time'].dt.dayofweek\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # Filter by month if applicable\n if month.lower() in MONTH_LIST:\n n_month = MONTH_LIST[month.lower()]\n df = df.loc[df['Month'] == n_month]\n\n # Filter by day of the week if applicable\n if day.lower() in DAY_LIST:\n n_day = DAY_LIST[day.lower()]\n df = df.loc[df['Day of Week'] == n_day]\n\n return df, cols", "def test_setting_csv_city(self):\n from django.contrib.messages import get_messages\n path = reverse(\"setting-csv\")\n user = mixer.blend(User, is_staff=True, is_superuser=True)\n client = Client()\n client.force_login(user)\n r = client.post(path, {\"title\": \"city\", \"url\": \"http://rachel.maykinmedia.nl/djangocase/city.csv\",\n \"username\": \"python-demo\", \"password\": \"claw30_bumps\", \"save\": \"on\"})\n messages = list(get_messages(r.wsgi_request))\n assert r.status_code == 200\n assert len(messages) == 1\n assert str(messages[0]) == \"Successfully Uploaded!\"", "def read_locations(db, openfile):\n pass", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def test_invlaid_data(self):\n # Currently, there are no PowerPlant or Project objects in the database\n self.assertEqual(PowerPlant.objects.count(), 0)\n self.assertEqual(Project.objects.count(), 0)\n\n # Call the command with invalid data.\n # The first row is for a power plant, but it has an invalid latitude.\n # The second row is for a project, but it has an invalid project capacity.\n # The third row is for a project, but it has an invalid project capacity unit.\n # The fourth row is for a power plant, but has an invalid status.\n self.call_command(filename='power_plant_import/tests/data/invalid_data.csv')\n\n # Each of the objects were created, though the invalid data was not saved.\n self.assertEqual(PowerPlant.objects.count(), 3)\n self.assertEqual(Project.objects.count(), 2)", "def test_get_countries(self):\n pass", "def test_incomplete_polygons():\n assert not query_row(db_conf, 'osm_landusages', 30004)\n assert not query_row(db_conf, 'osm_landusages', 30006)", "def cities(self):\n objs = models.storage.all()\n tmp = []\n for key, value in objs.items():\n name = key.split('.')\n if name[0] == \"City\":\n if value.state_id == str(self.id):\n tmp.append(objs[key])\n return tmp", "def load_data(city, month, day):\n \n if city == 'chicago':\n filename = 'chicago.csv'\n elif city == 'new York':\n filename = 'new_york_city.csv'\n elif city == 'washington':\n filename = 'washington.csv'\n else:\n return -1\n \n df = pd.read_csv(filename)\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n \n if month != 'all':\n df = df[df['Start Time'].dt.month == month]\n\n if day != 'all':\n df = df[df['Start Time'].dt.weekday == day]\n \n return df", "def load_data(city, month, day):\r\n if city.lower() == \"chicago\" or city.lower() == \"c\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\chicago.csv'\r\n elif city.lower() == \"New York\" or city.lower() == \"new york\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\new_york_city.csv'\r\n elif city.lower() == \"Washington\" or city.lower() == \"washington\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\washington.csv'\r\n # load data file into a dataframe\r\n #df = pd.read_csv(CITY_DATA[\"city\"])\r\n df = pd.read_csv(filename)\r\n # convert the Start Time column to datetime\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # extract month and day of week from Start Time to create new columns\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n # use the index of the months list to get the corresponding int\r\n months = ['January', 'February', 'March', 'April', 'May', 'June']\r\n month = months.index(month) + 1\r\n\r\n # filter by month to create the new dataframe\r\n df = df[df['month'] == month]\r\n\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n df = df[df['day_of_week'] == day.title()]\r\n\r\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n # drop the unused 'Unnamed' column\n df = df.drop(\"Unnamed: 0\", axis=1)\n # convert the Start Time column to datetime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n # extract month, day of week and hour from Start Time to create new columns\n df[\"month\"] = df[\"Start Time\"].dt.month_name()\n df[\"day\"] = df[\"Start Time\"].dt.day_name()\n df[\"hour\"] = df[\"Start Time\"].dt.hour.astype(str)\n\n # filter by month if applicable\n if month != \"All\":\n # filter by month to create the new dataframe\n df = df.loc[df[\"month\"] == month]\n\n # filter by day of week if applicable\n if day != \"All\":\n # filter by day of week to create the new dataframe\n df = df.loc[df[\"day\"] == day]\n\n return df", "def test_list_zr_locations(self):\n pass", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name \n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1 \n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()] \n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name \n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1 \n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()] \n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def test_locations(self):\n url = reverse(\"locations\", args=[00000])\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(isinstance(response.data, list))\n self.assertTrue(response.data) # list not empty\n\n location_data = response.data[0]\n data_keys = [\n \"title\",\n \"address\",\n \"address2\",\n \"city\",\n \"state\",\n \"postalCode\",\n \"distance\",\n \"hours\",\n \"phone\",\n \"geocode\",\n ]\n self.assertEqual(list(location_data.keys()), data_keys)", "def test_countries_regions_created(self):\n country_existing = CountryFactory(\n name=iso3166.countries.get('France').name,\n numeric=iso3166.countries.get('France').numeric,\n alpha_3=iso3166.countries.get('France').alpha3,\n )\n region_existing = RegionFactory(name='Existing Region')\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n # The Countries and Regions have been assigned to the correct PowerPlants and Projects\n self.assertEqual(Country.objects.count(), 4)\n self.assertEqual(Region.objects.count(), 3)\n greece = Country.objects.get(name='Greece')\n china = Country.objects.get(name='China')\n norway = Country.objects.get(name='Norway')\n mediterranean = Region.objects.get(name='Gulf and Mediterranean')\n northeast_asia = Region.objects.get(name='Northeast Asia')\n self.assertEqual(set(powerplant_ouessant.countries.all()), set([country_existing]))\n self.assertEqual(set(powerplant_ouessant.regions.all()), set([region_existing]))\n self.assertEqual(set(project_ouessant1.countries.all()), set([country_existing]))\n self.assertEqual(set(project_ouessant1.regions.all()), set([region_existing]))\n self.assertEqual(set(project_ouessant1.countries.all()), set([country_existing]))\n self.assertEqual(set(project_ouessant1.regions.all()), set([region_existing]))\n self.assertEqual(set(powerplant_ilarionas.countries.all()), set([greece]))\n self.assertEqual(set(powerplant_ilarionas.regions.all()), set([mediterranean]))\n self.assertEqual(set(project_liaoning.countries.all()), set([china]))\n self.assertEqual(set(project_liaoning.regions.all()), set([northeast_asia]))\n self.assertEqual(set(powerplant_tonstad.countries.all()), set([norway]))\n self.assertEqual(set(powerplant_tonstad.regions.all()), set([region_existing]))", "def check_all_type(name):\n all_type = set()\n for city in ['beijing', 'tianjing', 'guangzhou']:\n with open(\n exp_data_path + os.sep + 'weather' + os.sep + city + os.sep + '{}_{}.csv'.format(city, name)) as f:\n reader = csv.reader(f)\n for line in reader:\n all_type.add(line[1].replace(\" \", \"\"))\n print(all_type)", "def create_list_csv_by_city(self, file_name, city_name):\n\n #We couldn't make it for this hackathon because we hadn't enough data and especially good data\n pass", "def load_data(city, month, day):\n #Load data for city\n print(\"\\nCity Data..\")\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_week'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n if month != '7':\n months = ['1', '2', '3', '4', '5', '6']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_week'] == day.title()]\n return df", "def load_data(city, month, day):\n \n filename = str(CITY_DATA.get(city))\n\n # load data file into a dataframe\n df = pd.read_csv(filename)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month_int = months.index(month) +1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month_int] \n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def test_functional_bad_ip(self, url):\n response = requests.get(\"http://localhost:80/ip2w/{url}\".format(url=url))\n self.assertEqual(response.status_code, BAD_REQUEST)\n self.assertEqual(response.json().get(\"error\"),\n \"No city for ip {}\".format(url))", "def main(location: str='kansascity', cities: bool=False) -> None:\n car_list = []\n if cities:\n location = ''\n city_list = get_cities()\n for city in city_list:\n response = cars.get_porsche(city=city)\n car = cars.get_soup(response)\n if len(car) > 0:\n car_list.append(cars.get_soup(response))\n else:\n response = cars.get_porsche()\n car = cars.get_soup(response)\n car_list.append(car)\n add_to_db(car_list)\n results = get_pandas(location)\n print(response.headers.get('retry-after'))\n return results", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['Start and End Stations'] = df['Start Station'] + ' and ' + df['End Station']\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def read_cities(file_name):\n stream = open(file_name)\n data = stream.readlines()\n stream.close()\n roadmap = []\n for city_info in data: # For each record in data file\n if city_info != \"\\n\": # Ignore new line characters\n city_info = clean_data(city_info) # Clean the record\n roadmap.append(city_info) # Add each cleaned record to a list\n return roadmap", "def test_last_cities(client):\n client.get(\"/weather/viena\")\n client.get(\"/weather/sorocaba\")\n client.get(\"/weather/barcelona\")\n\n\n # Validate weither return two lasts cities in cache or not\n response = client.get(\"/weather/1\")\n print(response.data)\n assert b\"Barcelona\" in response.data", "def load_data(city, month, day):\n# This code is refrenced from the practice problem on the project.\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[ df['day_of_week'] == day.title()]\n\n return df", "def init(city: str, country: str, list_of_streets: list):\n if check(city, country, list_of_streets):\n return get_sample_data(city, country, list_of_streets)", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month, day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n\n # filter_choosed by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = [\"january\", \"february\", \"march\", \"april\", \"may\", \"june\"]\n month = months.index(month) + 1\n\n # filter_choosed by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter_choosed by day of week if applicable\n if day != 'all':\n # filter_choosed by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def get_cities(self, city_name: str = \"\"):", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[CITIES[city]])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day of Week'] = df['Start Time'].dt.dayofweek\n\n # get the subset of data where the month matches the one chosen\n if month != 0:\n df = df[df['Month'] == month]\n \n # get the subset of data where the day of the week matches the one chosen\n if day != 7:\n df = df[df['Day of Week'] == day]\n \n return df", "def extract_airports(filename, store):\n print filename\n f = open(filename, 'r')\n text = f.read()\n f.close()\n \n if store:\n ## Database connection, db, collection\n conn = pymongo.Connection()\n db=conn.flight_db\n ap = db.airports\n\n airport_list = []\n \n ## extract city,country,airport code\n #match = re.findall(r'<td\\s*class=\\\"city sorted\\\">(.*?)<\\/td>\\s+<td\\s*class=\\\"country\\\">(\\w+?)</td>\\s+<td\\s*class=\\\"code\\\"><a\\s*href=.+\\\">(\\w+?)</a></td>\\s+', text)\n match = re.findall(r'<td\\s*class=\\\"city sorted\\\">(.*?)<\\/td>\\s+<td\\s*class=\\\"country\\\">(\\w+?)</td>\\s+<td\\s*class=\\\"code\\\"><a\\s*href=.+\\\">(\\w+?)</a><span\\s*style=.*', text)\n if not match:\n print 'airport:rank not found...'\n exit(1)\n for tuples in match:\n if store:\n ap.insert({\n 'city':tuples[0],\n 'country':tuples[1],\n 'code':tuples[2]\n })\n airport_list.append(tuples[0] + ', ' + tuples[1] + ' - ' + tuples[2])\n if store:\n conn.disconnect()\n return airport_list", "def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'", "def load_data(city, month, day):\n file_name = CITY_DATA.get(city)\n df = pd.read_csv(file_name)\n\n # convert \"Start Time\" column from string to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # create new column \"Month\" by extracting the month form datetime\n df['Month'] = df['Start Time'].dt.month\n\n # create new column \"Day\" by extracting the day form datetime\n df['Day'] = df['Start Time'].dt.day_name()\n df['Day'] = df['Day'].str.lower()\n\n # filter by month\n if month != \"all\":\n month_index = months.index(month) + 1\n df = df[df['Month'] == month_index]\n\n # filter by day\n if day != \"all\":\n df = df[df['Day'] == day]\n\n return df", "def load_data(city, month, day):\n # Load data file into a dataframe.\n print('\\nLoading data for city = {}, month = {}, day = {}...'\n .format(city, month, day))\n df = pd.read_csv(CITY_DATA[city])\n\n # Convert the Start Time column to datetime.\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # Extract month, day of week, hour from Start Time to create new columns.\n df['Month'] = [MONTHS[int(m)] for m in df['Start Time'].dt.month]\n df['Day of Week'] = df['Start Time'].dt.weekday_name\n df['Hour'] = df['Start Time'].dt.hour\n # Create a column for the start and end station pairs.\n df['Path'] = df['Start Station'] + ' => ' + df['End Station']\n\n # Filter by month, if applicable.\n if month != 'All':\n df = df[df['Month'] == month]\n # Filter by day of week, if applicable\n if day != 'All':\n df = df[df['Day of Week'] == day]\n return df", "async def get_city_excursions(city_name: str) -> dict:\n return await tripster_instance.get_trip(city_name)", "def test_functional_good_ip(self, url):\n response = requests.get(\"http://localhost:80/ip2w/{url}\".format(url=url))\n if response.status_code != BAD_GATEWAY:\n print(\"\\nGATEWAY is OK\")\n self.assertEqual(response.status_code, OK)\n content = response.json()\n self.assertEqual(len(content), 3)\n self.assertTrue(content.get(\"temp\"))\n self.assertTrue(content.get(\"city\"))\n else:\n print(\"\\nGATEWAY is RESET BY PEER\")", "def load_data(city, month, day):\n df = pd.read_csv(city)\n df['day_of_week'] = pd.to_datetime(df['Start Time']).dt.dayofweek\n df['month'] = pd.to_datetime(df['Start Time']).dt.month\n if day != 'all':\n df = df[df['day_of_week'] == day]\n if month != 'all':\n df = df[df['month'] == month]\n df.drop('day_of_week', axis=1, inplace=True)\n df.drop('month', axis=1, inplace=True)\n return df", "def load_data(city, month, day):\n #create the DataFrame\n #I'll be honest, I was struggling with this bit of code so I searched the internet and found what I needed to get started.\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week and hour from Start Time to create new columns. New columns are needed for filtering.\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.day\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[ df['day'] == day.title()]\n\n return df", "def test_addr_city_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_city(input_val)\n self.assertEqual(output_val, self.line.addr_city)", "def get_cities(self, city_name: str = None):", "def load_data(city, month, day):\n\n months_dict = {'january' : 1 , 'february' : 2 , 'march' : 3 , 'april' : 4 , 'may' : 5 , 'june' : 6, 'july' : 7, 'august' : 8, 'september' : 9}\n days_dict = {'monday' : 0 , 'tuesday' : 1 , 'wednesday' : 2 , 'thursday' : 3, 'friday' : 4 , 'saturday' : 5 , 'sunday' : 6}\n\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n if month != 'all':\n df = df[df['Start Time'].dt.month == months_dict[month]]\n\n if day != 'all':\n df = df[df['Start Time'].dt.weekday == days_dict[day]]\n\n return df", "def load_data(city, month, day):\n\n print(\"\\nLoading data ...\")\n df = pd.read_csv(CITY_DATA[city])\n\n # Convert the Start Time column to datetime for time period comparison \n # then further create new columns for month and day of week based on that \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['dow'] = df['Start Time'].dt.day_name()\n\n # Perform filtering if enabled \n\n if month != 'all':\n # if possible , move this as a const to the top of source code\n months = ['january','february','march','april','may','june']\n month_to_filter = months.index(month) + 1\n\n # create a new dataframe \n df = df[df['month'] == month_to_filter]\n\n if day != 'all':\n # create a new dataframe \n # note: title() is called since the first letter of the \n # week in the created column is in uppercase\n df = df[df['dow'] == day.title()]\n \n return df", "def main():\n\n conn = psycopg2.connect(**env.DATABASE)\n cursor = conn.cursor()\n\n for file, city in env.supported_cities().items():\n try:\n data = add_metadata(parse_html(city, get_html(city)))\n save_data_to_db(cursor, data, file.title())\n except Exception as e:\n print(\"Failed to scrape '%s': %s\" %(city, e))\n print(traceback.format_exc())\n\n conn.commit()\n conn.close()", "def locator(pcap_obj,kml_file):\r\n ip_list = []\r\n for ts, buf in pcap_obj:\r\n eth = dpkt.ethernet.Ethernet(buf)\r\n ip = eth.data\r\n try: # extract all unique IPs\r\n src_ip = str(socket.inet_ntoa(ip.src))\r\n dst_ip = str(socket.inet_ntoa(ip.dst))\r\n if src_ip in ip_list:\r\n pass\r\n else:\r\n ip_list.append(src_ip)\r\n if dst_ip in ip_list:\r\n pass\r\n else:\r\n ip_list.append(dst_ip)\r\n except AttributeError:\r\n pass\r\n\r\n try:\r\n reader = geoip2.database.Reader('GeoLite2-City_20190129.mmdb') # reading from db(can be redacted)\r\n except FileNotFoundError:\r\n print(f'[!]DB file not in current directory or with a different file name')\r\n sys.exit(1)\r\n area = []\r\n longitude = []\r\n latitude = []\r\n ips = []\r\n for ip_addr in ip_list:\r\n try:\r\n rec = reader.city(ip_addr) # reading IP\r\n country = rec.country.iso_code # assigning country and city\r\n city = rec.city.name\r\n if city is None and country is None:\r\n area.append('Unknown')\r\n elif city is None:\r\n area.append(f'Unknown city:{country}') # looking for unknown country\r\n elif country is None:\r\n area.append(f'Unknown country:{city}') # looking for unknown city\r\n else:\r\n area.append(f'{city} {country}')\r\n\r\n longitude.append(rec.location.longitude)\r\n latitude.append(rec.location.latitude)\r\n ips.append(ip_addr)\r\n except geoip2.errors.AddressNotFoundError:\r\n pass\r\n\r\n try:\r\n kml = simplekml.Kml()\r\n final_path = str(os.getcwd() + os.sep + kml_file) # defining full canonical path\r\n for i in range(0, len(ips)):\r\n kml.newpoint(name=(area[i]),\r\n coords=[(longitude[i], latitude[i])],\r\n description=f'[+] Location = {area[i]}\\n IP: {ips[i]}')\r\n kml.save(final_path)\r\n print(f\"[+] Writing IP locations to {kml_file}\") # writing data to a KML file\r\n print(f\"[+] Opening Google Earth with:{kml_file}\\n\") # reading file with google earth\r\n try:\r\n os.startfile(final_path)\r\n except OSError:\r\n print(f'[!] Warning: Google Earth must be installed to open the kml')\r\n except FileNotFoundError:\r\n pass", "def test_get_country_states(self):\n pass", "def load_data(city, month, day):\n \n start_time = time.time()\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n\n # extract month, day of week and hour from Start Time to create new columns\n \n # Months will take values from 1 through 12\n df['month'] = df['Start Time'].dt.month \n \n # day of the week will take values in the range of 1 through 7\n df['day_of_week'] = df['Start Time'].dt.dayofweek \n \n # hour will take values from 0 through 23\n df['hour'] = df['Start Time'].dt.hour # range (0-23)\n\n # Here, we are filtering by month\n df['End Time'] = pd.to_datetime(df['End Time'])\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1 \n\n df = df[df['Start Time'].dt.month == month]\n\n # Here, we are filtering by day of week\n if day != 'all': \n df = df[df['Start Time'].dt.weekday_name == day.title()]\n \n return df", "def test_non_matching_location(self):\n user1 = get_user_model().objects.get(username='test1@example.com')\n self.client.login(username='test1@example.com', password='1')\n\n office = OfficeLocation.objects.all()[0]\n org = OrgGroup.objects.filter(parent__isnull=True)[0]\n\n submission1 = Interest()\n submission1.owner = user1\n submission1.for_coffee = True\n submission1.save()\n submission1.locations.add(office)\n submission1.departments.add(org)\n\n resp = self.client.get(reverse('mystery:mystery'))\n self.assertContains(resp, \"Cancel this\", status_code=200)\n\n user2 = random_user()\n office2 = OfficeLocation()\n office2.id = \"test_id\"\n office2.street = \"test office\"\n office2.city = \"test office\"\n office2.state = \"test office\"\n office2.zip = \"test office\"\n office2.save()\n submission2 = Interest()\n submission2.owner = user2\n submission2.is_active = False\n submission2.save()\n submission2.for_coffee = True\n submission2.locations.add(office2)\n submission2.departments.add(org)\n submission2.is_active = True\n submission2.save()\n\n resp = self.client.get(reverse('mystery:mystery'))\n self.assertContains(resp, \"Cancel this\", status_code=200)", "def test_bad_airport(self):\n result = self.client.get(\"/search?origin=foo&destination=DFW%2C+Dallas+TX&date=2018-05-21\")\n self.assertNotIn('<meter', result.data)\n self.assertIn('enter a valid airport', result.data)", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n \n\n #Converting time\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n if month != 'all':\n month = MONTHS.index(month) + 1\n\n df = df[df['month'] == month]\n \n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n #loading data of the city chosen by user into dataframe\n df = pd.read_csv(CITY_DATA[city])\n #converting the start time clomn from object (string) to datetime object so as we can use datetime Attributes and methonds to extract month coulmn and day to filter with them\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n #extracting month and day into new columns and days into new column 'month_name' and 'day_name' are methods in pandas datetime (https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.DatetimeIndex.html) as it's in this link\n df['month'] = df['Start Time'].dt.month_name()\n df['day_of_week'] = df['Start Time'].dt.day_name()\n #filtering data city with user inputs filter by moth and day:\n if month != 'all':\n df = df[df['month'] == month.title()]\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n df = df[df['month']==month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week']==day]\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['hour'] = df['Start Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA [city])\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['hour'] = df['Start Time'].dt.hour\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n # Do the filter below\n # no filter is applied\n if month == 0 and day == 0:\n return df\n # only filter by day\n elif month == 0:\n df = df[df['day_of_week']==day]\n # only filter by month\n elif day == 0:\n df = df[df['month']== month]\n else:\n df = df[df['day_of_week']==day]\n df = df[df['month']== month]\n \n return df", "def test_load_file_does_not_exists(self):\n\n self.inactive_db.load()\n expected = {self.file_to_test: {}}\n\n self.assertEqual(expected, self.inactive_db.database)", "def main():\n\n #get the csv file into a data-frame\n universities_df = pd.read_csv('universities_data.csv', encoding = 'utf-8-sig')\n universities_names_list = universities_df['name'].tolist()\n\n #get list of university objects\n url = 'http://universities.hipolabs.com/search?country=Israel'\n api_universities = Get_universities(url)\n list_of_universities = api_universities.get_universities_info()\n\n #to see if we got new entities or not for exporting to csv later..\n is_new_entities = False\n\n for university in list_of_universities:\n if university.name not in universities_names_list:\n is_new_entities = True\n universities_df= universities_df.append(pd.DataFrame({\n 'alpha_two_code': [university.alpha_two_code], \n 'country': [university.country],\n 'web_pages': [str(university.web_pages)],\n 'domains': [str(university.domains)],\n 'name': [university.name],\n 'state_province':[str(university.state_province)]}) , ignore_index = True)\n\n #export back to csv if true\n if is_new_entities: \n print('we got new entities!') \n universities_df.to_csv('universities_data.csv', encoding = 'utf-8-sig', index = False)\n else:print('no new universities for now!')", "def load_data(city, month, day):\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time']) # convert the Start Time column to datetime\n df['month'] = df['Start Time'].dt.month # extract month from start time to create a new column\n df['day_of_week'] = df['Start Time'].dt.day_name() # extract day from start time to create a new column\n\n if month in months and day == 'all': # filter the df only by month if applicable\n month = convert_to_int(months, month)\n df = df.loc[df['month'] == month]\n \n if month == 'all' and day in days : # filter the df only by day of week if applicable\n df = df.loc[df['day_of_week'] == day.title()]\n \n if month in months and day in days:\n # use the index of the months list to get the corresponding month's int\n month = convert_to_int(months, month)\n\n df = df.loc[df['month'] == month] # first filter the df by month\n df = df.loc[df['day_of_week'] == day.title()] # then filter the df by day of week\n\n return df # no filter applied", "def test_geo_data_created(self):\n # Currently, there are no GeometryStore or PointGeometry objects in the database\n self.assertEqual(GeometryStore.objects.count(), 0)\n self.assertEqual(PointGeometry.objects.count(), 0)\n\n self.call_command(filename='power_plant_import/tests/data/six_rows.csv')\n\n # Get the PowerPlants that were created during the import\n (powerplant_ouessant, powerplant_ilarionas, powerplant_tonstad) = self.get_created_plants()\n # Get the Projects that were created during the import\n (project_ouessant1, project_ouessant2, project_liaoning) = self.get_created_projects()\n\n # GeometryStore objects were created for:\n # - powerplant_ouessant\n # - powerplant_ilarionas\n # - project_liaoning\n # The project_ouessant1 and project_ouessant2 should use\n # powerplant_ouessant's GeometryStore\n self.assertEqual(GeometryStore.objects.count(), 3)\n # PointGeometry objects were created for:\n # - powerplant_ouessant\n # - powerplant_ilarionas\n # - project_liaoning\n # The project_ouessant1 and project_ouessant2 should use\n # powerplant_ouessant's PointGeometry\n self.assertEqual(PointGeometry.objects.count(), 3)\n # The powerplant_ouessant point is correct\n powerplant_ouessant_points = powerplant_ouessant.geo.points.all()\n self.assertEqual(powerplant_ouessant_points.count(), 1)\n self.assertEqual(powerplant_ouessant_points.first().geom.x, -5.11121)\n self.assertEqual(powerplant_ouessant_points.first().geom.y, 48.43754)\n # The powerplant_ilarionas point is correct\n powerplant_ilarionas_points = powerplant_ilarionas.geo.points.all()\n self.assertEqual(powerplant_ilarionas_points.count(), 1)\n self.assertEqual(powerplant_ilarionas_points.first().geom.x, 21.8039)\n self.assertEqual(powerplant_ilarionas_points.first().geom.y, 40.0966)\n # The project_liaoning gets its geodata from its latitude and longitude\n # cells\n project_liaoning_points = project_liaoning.geo.points.all()\n self.assertEqual(project_liaoning_points.count(), 1)\n self.assertEqual(project_liaoning_points.first().geom.x, 121.38065)\n self.assertEqual(project_liaoning_points.first().geom.y, 41.16469)\n # For the project_ouessant1 and project_ouessant2, the latitude and\n # longitude cells are blank, so they get their geodata from their\n # parent PowerPlant (powerplant_ouessant).\n self.assertEqual(project_ouessant1.geo, project_ouessant1.power_plant.geo)\n self.assertEqual(project_ouessant2.geo, project_ouessant2.power_plant.geo)\n # The powerplant_tonstad has no geo data\n self.assertIsNone(powerplant_tonstad.geo)", "def populate_cities():\n if City.query.filter_by(name=CITIES[0]).first():\n return\n\n for city in CITIES:\n _add_city(city)", "def load_states():\n\n print \"States and Territories\"\n\n State.query.delete()\n\n for row in open(\"data/states_and_territories.txt\"):\n row = row.rstrip()\n # can't seem to get rid of \"\\r\" character other than doing a .split\n piped_rows = row.split(\"\\r\")\n for i in piped_rows:\n state_info = i.split(\"|\")\n state_name = state_info[0]\n state_code = state_info[1]\n\n state = State(state_name=state_name, state_code=state_code)\n\n db.session.add(state)\n\n db.session.commit()\n print \"States seeded\"", "def expected_city_names_fixture():\n return {'b', 'a', 'c'}", "def test_get_country_by_geo_location(self):\n pass", "def test_city_country(self):\n santiago_chile = get_city_name('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')", "def semi_all_static_feature(city):\n poi_frequency = np.load(exp_data_path + os.sep + 'poi_frequency' + os.sep + 'poi_frequency_{}.npy'.format(city),\n allow_pickle=True) # .tolist()\n poi_num = np.load(exp_data_path + os.sep + 'poi' + os.sep + 'poi_{}.npy'.format(city), allow_pickle=True)\n poi_entropy = np.load(exp_data_path + os.sep + 'poi_entropy' + os.sep + 'poi_entropy_{}.npy'.format(city),\n allow_pickle=True)\n road = np.load(exp_data_path + os.sep + 'roadnet' + os.sep + 'roadnet_{}.npy'.format(city), allow_pickle=True)\n transportation = np.load(exp_data_path + os.sep + 'transportation' + os.sep + 'transportation_{}.npy'.format(city),\n allow_pickle=True)\n commerce = np.load(exp_data_path + os.sep + 'commerce' + os.sep + 'commerce_{}.npy'.format(city), allow_pickle=True)\n\n file_name = exp_data_path + os.sep + 'station' + os.sep + 'all_demand_{}.npy'.format(city)\n demand_data = np.load(file_name, allow_pickle=True)\n total_num = demand_data[:, 0, -2, np.newaxis]\n slow_num = demand_data[:, 0, 0, np.newaxis]\n fast_num = demand_data[:, 0, 2, np.newaxis]\n\n raw_data = np.concatenate((slow_num, fast_num, total_num, poi_frequency, poi_num, poi_entropy, road, transportation, commerce), axis=1)\n csv_data = pd.DataFrame(raw_data, columns=SEMI_GENERAL_HEADER)\n print(csv_data.shape)\n # print(csv_data.iloc[:, 2])\n\n file_path = exp_data_path + os.sep + 'static' + os.sep + 'semi_static_feature_{}.csv'.format(city)\n if os.path.exists(file_path):\n os.remove(file_path)\n csv_data.to_csv(file_path)\n pass", "def load_data(city, month, day):\n \n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n\n\n # filter by month \n if month != 'all':\n \n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month =months.index(month) + 1\n \n \n df = df[df['month'] == month]\n\n # filter by day of week \n if day != 'all':\n \n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n #Used the practice#3 from Project solution here to convert time columns to month and weekday_name\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n #print('this is the month', month)\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n while month != \"\":\n # load data file into a dataframe\n filename = CITY_DATA[city]\n df = pd.read_csv(filename)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n # df['day_of_week'] = df['Start Time'].dt.day_name()\n\n\n try: df['day_of_week'] = df['Start Time'].dt.weekday_name\n except: df['day_of_week'] = df['Start Time'].dt.day_name()\n else: df['day_of_week'] = df['Start Time'].dt.weekday\n \n \n \n df['hour'] = df['Start Time'].dt.hour\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n # months = ['january', 'february', 'march', 'april', 'may', 'june','july','august','september','october','november','december']\n month = int(months.index(month)) + 1\n \n # filter by month to create the new dataframe\n df = df.loc[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df.loc[df['day_of_week'] == day.title()]\n \n return df", "def test_search_cities(self):\n City.objects.delete()\n for i in range(3):\n city = CityFactory(name='Petropavlovsk')\n city.save()\n\n city = CityFactory(name='Almaty')\n city.save()\n\n u = UserFactory(role=User.MODERATOR)\n u.set_password('123')\n u.save()\n\n auth_url = prepare_url('login')\n data = {\n 'username': u.username,\n 'password': '123'\n }\n response = self.client.post(auth_url, data=data, format='json')\n token = response.data['token']\n\n url = prepare_url('admin-cities-list', query={'search': 'petro'})\n self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 3)", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city.lower()])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month.lower()) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n return df", "def load_data(city, month, day):\n # load datafile into a DataFrame\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to Date time\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extracting month and day of the week from Start time\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday\n\n #filter by month when applicable\n if month != 'all':\n month = MONTH_DATA.index(month)\n\n #filter by month to create a new DataFrame\n df = df[df['month'] == month]\n\n #filter by day of the week where applicable\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(client):\n codes = [\"DUB\", \"LHR\", \"ETC\", \"XXX\"]\n q = generateMultiInsertQuery(codes, \"Airport\")\n #print(json.dumps(q.json(), indent=4))\n q.execute(client)", "def test_city_country(self):\n dublin_ireland = city_country('dublin', 'ireland')\n self.assertEqual(dublin_ireland, 'Dublin, Ireland')" ]
[ "0.63873357", "0.63659126", "0.6264148", "0.59355015", "0.5915979", "0.5888972", "0.5843924", "0.5843775", "0.58309996", "0.58118176", "0.5742887", "0.57028115", "0.561459", "0.5560846", "0.5550713", "0.55434555", "0.55350614", "0.5522535", "0.5517603", "0.55035067", "0.5466038", "0.54654384", "0.5464822", "0.54520917", "0.5446016", "0.54459006", "0.54222333", "0.54139197", "0.5391507", "0.53745365", "0.53733003", "0.53727406", "0.5358484", "0.5357724", "0.5342859", "0.5339477", "0.53367555", "0.5326953", "0.53267944", "0.53226024", "0.5313353", "0.53117055", "0.53117055", "0.5310639", "0.5307862", "0.53074574", "0.5298835", "0.5297959", "0.5294521", "0.52888954", "0.52827966", "0.5281102", "0.5275038", "0.52622527", "0.525376", "0.5253701", "0.52535266", "0.52507365", "0.52484703", "0.5242878", "0.5241183", "0.5237875", "0.52368206", "0.52328336", "0.52325284", "0.52284235", "0.5227002", "0.5224651", "0.5224454", "0.5222266", "0.5213354", "0.5209869", "0.5209081", "0.52085584", "0.52051294", "0.5201916", "0.52003604", "0.5198854", "0.5193625", "0.5191095", "0.5187074", "0.5186528", "0.5181633", "0.5178715", "0.51767296", "0.5174456", "0.5172744", "0.5171692", "0.51713336", "0.5170745", "0.5170488", "0.5169219", "0.5169182", "0.516741", "0.51671827", "0.5159425", "0.5156135", "0.51539046", "0.51532555", "0.5152527" ]
0.5511484
19
Checks whether the search functionality works fine
def test_search(self): from importCsv.models import City, Hotel path = reverse("search") user = mixer.blend(User, is_staff=True, is_superuser=True) city = mixer.blend(City, abbrev="tes", name="test") mixer.blend(Hotel, city=city, data="testData", name="test hotel") client = Client() client.force_login(user) r = client.post(path, {"tes": "on"}) assert r.status_code == 200 assert r.content.find(b'test hotel')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_search(self):\n pass", "def test_search(self):\n pass", "def test_search(self):\n pass", "def search():\n pass", "def test_non_existent_term_search(self):\n\n expected_results = []\n results = self.searcher.search(\"asdasdasdas\")\n\n self.assertListEqual(results, expected_results)", "def test_search_3(self):\n\n # search for \"cheese\"\n FrontSearchForm() \\\n .populate_form({'search_box' : 'cheese'}) \\\n .submit_form()\n\n # check that results are shown\n AppBar() \\\n .result_stats.should(be.visible)", "def test_search_1(self):\n\n # import pdb; pdb.set_trace()\n\n # type \"cheese\" into the search field\n s(by.css('[name=\"q\"]')) \\\n .set_value('cheese')\n\n # click the \"Google Search\" button\n s(by.css('[name=\"btnK\"]')) \\\n .click()\n\n # check that results are shown\n s(by.css('#resultStats')) \\\n .should(be.visible)", "def test_search_4(self):\n\n # search for \"cheese\"\n form = FrontSearchForm()\n form.search_box.set_value('cheese')\n form.submit.click()\n\n # check that results are shown\n AppBar() \\\n .result_stats.should(be.visible)", "def test_search_checkname(self):\n self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')\n self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')", "def test_search_show(self):\n self.assertEquals(\n len(self.t['life on mars'].search('the', key='episodename')),\n 10\n )", "def test_search_page(self):\n result = self.client.get(\"/search\")\n self.assertIn(b\"Search\", result.data)", "def test_search_multiresults(self):\n self.assertEquals(len(self.t['Scrubs'].search('my first')) >= 3, True)", "def test_small_search_exists(self):\n\n search_html = 'agency search--box scrollable-dropdown-menu'\n\n response = self.client.get(reverse('learn'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('about'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('agencies'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('developers'))\n self.assertContains(response, search_html)\n\n response = self.client.get(reverse('developer'))\n self.assertContains(response, search_html)\n\n response = self.client.get(\n reverse(\n 'contact_landing',\n args=['department-of-commerce--census-bureau']))\n self.assertContains(response, search_html)", "def test_01_search(self):\r\n res = self.app.get('/search')\r\n err_msg = \"Search page should be accessible\"\r\n assert \"Search\" in res.data, err_msg", "def test_search(self):\n d = self._search()\n self._response([2, 5, 10])\n self.assertEqual(self.successResultOf(d), [2, 5, 10])", "def continue_search( self ):\n return True;", "def test_search_2(self):\n\n # type \"cheese\" into the search field\n s('[name=\"q\"]') \\\n .set_value('cheese')\n\n # click the \"Google Search\" button\n s('[name=\"btnK\"]') \\\n .click()\n\n # check that results are shown\n s('#resultStats') \\\n .should(be.visible)", "def test_act_is_searching(self):\n # setup\n self.strategy._is_searching = True\n\n # operation\n self.search_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(1)\n has_attributes, error_str = self.message_has_attributes(\n actual_message=self.get_message_from_outbox(),\n message_type=OefSearchMessage,\n performative=OefSearchMessage.Performative.SEARCH_SERVICES,\n to=self.skill.skill_context.search_service_address,\n sender=str(self.skill.public_id),\n query=self.skill.skill_context.strategy.get_location_and_service_query(),\n )\n assert has_attributes, error_str", "def test_search_720(self):\n self.driver.get(self.domain)\n self.assertTrue(u'XXXX' in\n self.driver.page_source, 'Title text not found')\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n wait = ui.WebDriverWait(self.driver, 5)\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n search.click()\n search_field = self.driver.find_element_by_css_selector(\"#XXXX\")\n search_field.send_keys(\"XXXX\")\n search_field.submit()\n try:\n wait.until(lambda driver: u\"XXXX\" in\n self.driver.find_element_by_css_selector(\"xxxx > a\").text,\n 'Not found!')\n except:\n current_url = self.driver.current_url\n resp = requests.get(current_url)\n if resp.status_code != 200:\n raise Exception(\"Search failed! => [%s] %s\" % (resp.status_code,\n current_url))", "def test_existent_term_search(self):\n results = self.searcher.search(\"coach\")\n expected_results = 3\n\n self.assertEqual(results[0].indexable.docid, expected_results)", "def test_search_1200(self):\n self.driver.get(self.domain)\n self.driver.maximize_window()\n self.assertTrue(u'TITLE' in\n self.driver.page_source, 'Title text not found')\n search = self.driver.find_element_by_css_selector(\"#XXXX\")\n wait = ui.WebDriverWait(self.driver, 5)\n search.clear()\n search.send_keys(\"XXXX\")\n search.submit()\n try:\n wait.until(lambda driver: u\"XXXX\" in\n self.driver.find_element_by_css_selector(\"xxxx > a\").text,\n 'Not found!')\n except:\n current_url = self.driver.current_url\n resp = requests.get(current_url)\n if resp.status_code != 200:\n raise Exception(\"Search failed! => [%s] %s\" % (resp.status_code,\n current_url))", "def test_run_search__found_and_unavailable(self):\n basics = {\n 'API_URL_ROOT': self.api_url_root,\n 'API_KEY': self.api_key,\n 'PARTNERSHIP_ID': self.partnership_id,\n 'UNIVERSITY_CODE': self.university_code,\n 'LOG_PATH': self.LOG_PATH }\n bd = BorrowDirect( basics )\n bd.run_search( self.patron_barcode, 'ISBN', self.isbn_found_and_unavailable )\n self.assertEqual( ['Available', 'RequestLink', 'SearchTerm'], sorted(bd.search_result.keys()) )\n self.assertEqual( False, bd.search_result['Available'] )", "def test02_blog_search_box(self):\n self.info(\"Use Search box in home bage.\")\n self.find_element(\"blogs_home_search\").click()\n search_box = self.find_element(\"blogs_search_box\")\n search_box.send_keys(\"test\")\n search_icon = self.find_element(\"blogs_icon_search\")\n search_icon.click()\n\n self.info(\"Check search box works successfully. \")\n search_results = self.find_element(\"blogs_search_result\")\n results = search_results.find_elements_by_tag_name(\"li\")\n results_text = self.find_element(\"blogs_result_text\")\n self.assertIn(str(len(results)), results_text)\n\n self.info(\"Try to get one of results, should works successfully.\")\n if results:\n random_post = random.choice(results)\n tmp = random_post.text\n post_name = tmp[tmp.find(\":\") + 2 :]\n random_post.find_element_by_tag_name(\"a\").click()\n self.assertIn(post_name, self.driver.current_url)", "def test_filter_search_form_is_valid(self):\r\n response = self.client.get(reverse('search_results'), {\r\n 'name': 'nutella',\r\n 'category': '1',\r\n 'nutriscore': 'd'\r\n })\r\n self.assertTrue(response.context['product_list'])", "def search(self, *args, **kwargs):", "def search_suggestion_box_is_present(driver, selector, index, results_page):\n if (index == 1) and (results_page == 1):\n try:\n # This try-except statement allows us to avoid the \n # problems cause by the LinkedIn search suggestion box\n driver.find_element_by_css_selector(\"div.suggested-search.bd\")\n except Exception as e:\n pass\n else:\n return True\n else:\n return False", "def test_search(self):\n\n with self.client as c:\n response = c.get(\"/users?q=al\")\n data = str(response.data)\n\n self.assertIn(\"@alice\", data)\n self.assertIn(\"@alvin\", data)\n\n self.assertNotIn(\"@bob\", data)\n self.assertNotIn(\"@carl\", data)", "def search(self, query):", "def is_searching(self):\n return self.get_search_active_thread_count() > 0", "def test_absorbs_naked_a_search(self):\n invenio_search = \"author:ellis\"\n naked_search = \"a ellis\"\n self._compare_searches(invenio_search, naked_search)", "def test_search(self):\n project = factories.ProjectFactory.create(title=\"Test\")\n self.client.force_login(project.owned_by)\n\n response = self.client.get(\"/search/\")\n self.assertContains(response, \"Search query missing.\")\n\n response = self.client.get(\"/search/?q=Test\")\n self.assertContains(response, project.get_absolute_url())\n\n self.assertContains(response, \"projects\")\n self.assertContains(response, \"organizations\")\n self.assertContains(response, \"people\")\n self.assertContains(response, \"invoices\")\n self.assertContains(response, \"recurring-invoices\")\n self.assertContains(response, \"offers\")\n self.assertContains(response, \"deals\")\n\n with override_settings(FEATURES={\"controlling\": False}):\n response = self.client.get(\"/search/?q=Test\")\n self.assertContains(response, project.get_absolute_url())\n\n self.assertContains(response, \"projects\")\n self.assertContains(response, \"organizations\")\n self.assertContains(response, \"people\")\n self.assertNotContains(response, \"invoices\")\n self.assertNotContains(response, \"recurring-invoices\")\n self.assertNotContains(response, \"offers\")\n self.assertNotContains(response, \"deals\")", "def search(self, find_val):\n return False", "def on_searchin_changed(self):\r\n\r\n self.check_searchin()", "def test_valid_search_query_and_category_return_results(self):\n article = ArticleFactory()\n article.publish()\n request = RequestFactory().get(\"\", {\"q\": article.title[:10]})\n response = Search.as_view()(request, category=\"articles\")\n self.assertIn(\"results\", response.context_data)\n results = response.context_data[\"results\"]\n self.assertEqual(len(results), 1)\n self.assertIn(article, results)", "def check_searchin(self):\r\n\r\n self.limit_panel_toggle()\r\n\r\n pth = self.m_searchin_text.GetValue()\r\n if not self.searchin_update:\r\n if isdir(pth):\r\n self.m_searchin_dir_picker.SetPath(pth)\r\n elif isfile(pth):\r\n self.m_searchin_dir_picker.SetPath(dirname(pth))\r\n self.searchin_update = False", "def test_search_form(self):\n set_up_one_user(self, 1, 1)\n login = self.client.login(username='test', password='2HJ1vRV0Z&3iD')\n response = self.client.post(reverse('index'), {'terms_en': 'Test Search', 'websites': [self.website.pk]})\n s = Search.objects.filter(terms_en=\"Test Search\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(s), 1)", "def search(request):\n raise NotImplementedError", "def test_search_found_unavailable(self):\n s = Searcher( self.logger )\n ( search_key, search_value ) = ( 'ISBN', self.isbn_found_and_unavailable )\n result_dct = s.search(\n self.patron_barcode, search_key, search_value, self.api_url_root, self.api_key, self.partnership_id, self.university_code )\n self.assertEqual(\n [u'Available', u'RequestLink', u'SearchTerm'], sorted(result_dct.keys()) )\n self.assertEqual(\n False, result_dct['Available'] )", "def test_ajax_search(self):\r\n # first let's add a bookmark we can search on\r\n self._get_good_request()\r\n search_res = self.testapp.get(\r\n '/admin/results/google',\r\n headers={\r\n 'X-Requested-With': 'XMLHttpRequest',\r\n 'Accept': 'application/json'\r\n }\r\n )\r\n\r\n self.assertTrue(\r\n search_res.status == '200 OK',\r\n \"Status is 200: \" + search_res.status)\r\n\r\n self.assertTrue(\r\n 'my google desc' in search_res.body,\r\n \"We should find our description on the page: \" + search_res.body)\r\n\r\n # also check for our specific json bits\r\n self.assertTrue(\r\n 'success' in search_res.body,\r\n \"We should see a success bit in the json: \" + search_res.body)\r\n\r\n self.assertTrue(\r\n 'payload' in search_res.body,\r\n \"We should see a payload bit in the json: \" + search_res.body)\r\n\r\n self.assertTrue(\r\n 'message' in search_res.body,\r\n \"We should see a message bit in the json: \" + search_res.body)", "def test_empty_query_search(self):\n assert UserProfile.search('').count()", "def test_post_foods_search(self):\n pass", "def test_search_form_is_valid(self):\r\n response = self.client.get(reverse('search_results'), {\r\n 'name': 'product'\r\n })\r\n self.assertTemplateUsed(response, 'purbeurre/search_results.html')", "def search(self, *args, **kwargs): # real signature unknown\n pass", "def search(request):\n\n term = \"\"\n organizations = None\n memberships = None\n events = None\n persons = None\n airports = None\n training_requests = None\n comments = None\n only_result = None\n\n if request.method == \"GET\" and \"term\" in request.GET:\n form = SearchForm(request.GET)\n if form.is_valid():\n term = form.cleaned_data.get(\"term\", \"\")\n tokens = re.split(r\"\\s+\", term)\n\n organizations = Organization.objects.filter(\n Q(domain__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"fullname\")\n if len(organizations) == 1 and not only_result:\n only_result = organizations[0]\n\n memberships = Membership.objects.filter(\n registration_code__icontains=term\n ).order_by(\"-agreement_start\")\n if len(memberships) == 1 and not only_result:\n only_result = memberships[0]\n\n events = Event.objects.filter(\n Q(slug__icontains=term)\n | Q(host__domain__icontains=term)\n | Q(host__fullname__icontains=term)\n | Q(url__icontains=term)\n | Q(contact__icontains=term)\n | Q(venue__icontains=term)\n | Q(address__icontains=term)\n ).order_by(\"-slug\")\n if len(events) == 1 and not only_result:\n only_result = events[0]\n\n # if user searches for two words, assume they mean a person\n # name\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n (Q(personal__icontains=name1) & Q(family__icontains=name2))\n | (Q(personal__icontains=name2) & Q(family__icontains=name1))\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n )\n persons = Person.objects.filter(complex_q)\n else:\n persons = Person.objects.filter(\n Q(personal__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n ).order_by(\"family\")\n\n if len(persons) == 1 and not only_result:\n only_result = persons[0]\n\n airports = Airport.objects.filter(\n Q(iata__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"iata\")\n if len(airports) == 1 and not only_result:\n only_result = airports[0]\n\n training_requests = TrainingRequest.objects.filter(\n Q(group_name__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(github__icontains=term)\n | Q(affiliation__icontains=term)\n | Q(location__icontains=term)\n | Q(user_notes__icontains=term)\n )\n if len(training_requests) == 1 and not only_result:\n only_result = training_requests[0]\n\n comments = Comment.objects.filter(\n Q(comment__icontains=term)\n | Q(user_name__icontains=term)\n | Q(user_email__icontains=term)\n | Q(user__personal__icontains=term)\n | Q(user__family__icontains=term)\n | Q(user__email__icontains=term)\n | Q(user__github__icontains=term)\n ).prefetch_related(\"content_object\")\n if len(comments) == 1 and not only_result:\n only_result = comments[0]\n\n # only 1 record found? Let's move to it immediately\n if only_result and not form.cleaned_data[\"no_redirect\"]:\n msg = format_html(\n \"You were moved to this page, because your search <i>{}</i> \"\n \"yields only this result.\",\n term,\n )\n if isinstance(only_result, Comment):\n messages.success(request, msg)\n return redirect(\n only_result.content_object.get_absolute_url()\n + \"#c{}\".format(only_result.id)\n )\n elif hasattr(only_result, \"get_absolute_url\"):\n messages.success(request, msg)\n return redirect(only_result.get_absolute_url())\n\n else:\n messages.error(request, \"Fix errors below.\")\n\n # if empty GET, we'll create a blank form\n else:\n form = SearchForm()\n\n context = {\n \"title\": \"Search\",\n \"form\": form,\n \"term\": term,\n \"organisations\": organizations,\n \"memberships\": memberships,\n \"events\": events,\n \"persons\": persons,\n \"airports\": airports,\n \"comments\": comments,\n \"training_requests\": training_requests,\n }\n return render(request, \"dashboard/search.html\", context)", "def search(self, search):\n raise NotImplementedError", "def start_search(self):\n self._raise_not_supported()", "def test_search_results(self):\n\t\tself.driver.get(\"http://www.google.com\")\n\t\tsearchbox = self.driver.find_element_by_name('q')\n\t\tsearchbox.send_keys(self.searchTerm)\n\t\tsearchbox.send_keys(Keys.RETURN)\n\t\traw_input('\\nenter to continue:')\n\t\t# Get a list of all search results\n\t\tsearchResultsList = self.driver.find_elements_by_tag_name('a')\n\t\tprint \"\\nsearch results list data type = \", type(searchResultsList)\n\t\tprint \"--> len of search results list = \", len(searchResultsList)\n\t\tprint \"--> search term = '%s'\" % self.searchTerm\n\n\t\t## Find first 10 results appearing after 1st instance of search term\n\t\t#indexTermAppears = self.getIndexOfFirstAppearance(searchResultsList)\n\t\t#print \"index Term Appears data type = \", type(indexTermAppears)\n\n\n\n\n\t\t#firstTen = searchResultsList[indexTermAppears:indexTermAppears+10]\n\t\t\n\t\t# Look for the search term in each href of all 10 search results\n\t\tfirstTen = searchResultsList\n\t\tif len(firstTen) == 0:\n\t\t\tself.fail(\"Test failed b/c no links were found\")\n\t\tfailCount = 0\n\t\tpassCount = 0\n\t\tlisthrefs = []\n\n\t\tfor link in firstTen:\n\t\t\tlinkName = link.get_attribute('href')\n\t\t\tprint \"linkName type: \", type(linkName)\n\t\t\t# type check does NOT require quotes for the answer\n\t\t\tif type(linkName) == unicode: ##########################\n\t\t\t\tlisthrefs.append(linkName)\n\t\t\t\tprint \"linkName: \", linkName, \" -- linkName type = \", type(linkName)\n\t\t\t\tif self.searchTerm not in linkName:\n\t\t\t\t\tfailCount += 1\n\t\t\t\t\tprint \"search term: '%s' does NOT appear in %s \" % (self.searchTerm, linkName)\n\t\t\t\telse:\n\t\t\t\t\tpassCount += 1\n\t\t\t\t\tprint \"\\nsearch term: '%s' DOES appear in %s \" % (self.searchTerm, linkName)\n\t\t\t\t\n\t\tprint \"\\npassCount = %d --------- failCount = %d\" % (passCount, failCount)\n\t\t\n\t\t# creat a list of the top 20\n\t\tsearchTermList = []\n\t\tmaxLinks = 20\n\t\tcurrentLinks = 0\n\t\tisTriggered = False\n\t\tnumLinksMissingSearchTerm = 0\n\t\tnumLinksContainingSearchTerm = 0\n\n\t\tfor href in listhrefs:\n\t\t\tif not isTriggered:\n\t\t\t\tif 'automation' in href:\n\t\t\t\t\tisTriggered = True\n\t\t\t\t\tcurrentLinks += 1 \n\t\t\t\t\tsearchTermList.append(href)\n\t\t\telif currentLinks < maxLinks:\n\t\t\t\tsearchTermList.append(href)\n\t\t\t\tcurrentLinks += 1\n\n\t\tprint \"search term list contains the 20 links after first instance of search term\"\n\t\tprint searchTermList\n\n\t\tfor link in searchTermList:\n\t\t\tif 'automation' in link:\n\t\t\t\tnumLinksContainingSearchTerm += 1\n\t\t\telse:\n\t\t\t\tnumLinksMissingSearchTerm += 1\n\t\t\t\tprint \"search term: \", self.searchTerm, \"does NOT appear in \\n - link: \", link\n\n\t\traw_input('enter to continue')\n\n\t\t# Is Fail Count sitll zero?\n\n\t\tself.assertEqual(numLinksMissingSearchTerm, 0)", "def test_search_page_no_DB_results(self, mock_api, mock_search):\n mock_search.side_effect = [LookupError, LookupError]\n response = self.client.post(\"/search/\", {\"query\": \"test\"})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/results_page.html\")\n self.assertTemplateNotUsed(response, \"search/result.html\")\n self.assertEqual(response.context[\"error\"],\n \"Votre recherche n'a donné aucun résultats\")", "def test_nonvouched_search(self):\n url = reverse('search')\n response = self.mozillian_client.get(url)\n eq_(response.status_code, 200)\n eq_(len(response.context['people']), 2)\n\n response = self.mozillian_client.get(\n url, {'q': 'Am', 'include_non_vouched': 1})\n eq_(response.status_code, 200)\n eq_(len(response.context['people']), 3)", "def test_query_events_by_text_search(self):\n events = list(query_events_by_text_search(Event.objects.all(), 'Film'))\n self.assertTrue(self.event_film in events)\n self.assertFalse(self.event_show1 in events)", "def search(self, term):", "def is_searchable(self):\n return self._get_search_query() != ''", "def search():\n\n # POST\n if request.method == \"POST\":\n\n # validate form submission\n if not request.form.get(\"intervention\"):\n return render_template(\"results.html\", results=entries.values())\n ''' \n elif not request.form.get(\"setting\"):\n return apology(\"missing setting\")\n elif not request.form.get(\"emrpref\"):\n return apology(\"missing emr pref\")\n elif not request.form.get(\"budget\"):\n return apology(\"missing budget\")'''\n \n results = []\n for k in entries:\n print('entries', entries[k]['Keywords'])\n print('term', request.form.get(\"intervention\"))\n if request.form.get(\"intervention\") in entries[k]['Keywords']:\n print('ya')\n results.append(entries[k])\n\n\n return render_template(\"results.html\", results=results)\n\n\n # GET\n else:\n return render_template(\"search.html\")", "def test_searchOn(self):\n self.assertFalse(\n self.server.search_ON(self.earlierQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_ON(self.sameDateQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_ON(self.laterQuery, self.seq, self.msg))", "def test_search_found_available(self):\n s = Searcher( self.logger )\n ( search_key, search_value ) = ( 'ISBN', self.isbn_found_and_available )\n result_dct = s.search(\n self.patron_barcode, search_key, search_value, self.api_url_root, self.api_key, self.partnership_id, self.university_code )\n self.assertEqual(\n ['Available', 'PickupLocation', 'RequestLink', 'SearchTerm'], sorted(result_dct.keys()) )\n self.assertEqual(\n True, result_dct['Available'] )", "def test_analyze_a_recipe_search_query(self):\n pass", "def test_view_bad_search(self):\n response = self.client.get(reverse('details', args=(self.s.id,)))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context['details']), 0)", "def test_autocomplete_recipe_search(self):\n pass", "def test_run_search__found_and_available(self):\n basics = {\n 'API_URL_ROOT': self.api_url_root,\n 'API_KEY': self.api_key,\n 'PARTNERSHIP_ID': self.partnership_id,\n 'UNIVERSITY_CODE': self.university_code,\n 'LOG_PATH': self.LOG_PATH }\n bd = BorrowDirect( basics )\n bd.run_search( self.patron_barcode, 'ISBN', self.isbn_found_and_available )\n # print bd.search_result\n self.assertEqual( ['Available', 'PickupLocation', 'RequestLink', 'SearchTerm'], sorted(bd.search_result.keys()) )\n self.assertEqual( True, bd.search_result['Available'] )", "def test_search_systems_post(self):\n pass", "def test_post_chain_search(self):\n pass", "def test_search_with_space(self):\n # Create a group to test searching for groups\n Group.objects.create(name='spam', auto_complete=True)\n Group.objects.create(name='jam', auto_complete=True)\n Group.objects.create(name='bread', auto_complete=True)\n\n url = reverse('search')\n response = self.mozillian_client.get(url, {'q': 'am'})\n\n eq_(response.status_code, 200)\n\n queryset = response.context['people'].object_list\n for up in [self.mozillian.userprofile, self.mozillian2.userprofile]:\n self.assertTrue(up in queryset)\n\n # Assert appropriate group names are found in the document\n self.assertContains(response, 'spam')\n self.assertContains(response, 'jam')\n self.assertNotContains(response, 'bread')", "def test_search_len(self):\n self.assertEquals(len(self.t['My Name Is Earl'].search('Faked His Own Death')), 1)", "def __search(self):\n self.resultList.clear()\n self.infoLabel.clear()\n \n self.buttonBox.button(QDialogButtonBox.Close).setEnabled(False)\n self.buttonBox.button(QDialogButtonBox.Cancel).setEnabled(True)\n self.buttonBox.button(QDialogButtonBox.Cancel).setDefault(True)\n self.searchButton.setEnabled(False)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n QApplication.setOverrideCursor(Qt.WaitCursor)\n QApplication.processEvents(QEventLoop.ExcludeUserInputEvents)\n \n self.__canceled = False\n \n self.__query = [term for term in self.searchEdit.text().strip().split()\n if term not in PipSearchDialog.Stopwords]\n self.__client.call(\n \"search\",\n ({\"name\": self.__query, \"summary\": self.__query}, \"or\"),\n self.__processSearchResult,\n self.__searchError\n )", "def test_get_foods_search(self):\n pass", "def test_search_form_limit(self):\n set_up_one_user(self, 1, 0)\n login = self.client.login(username='test', password='2HJ1vRV0Z&3iD')\n response = self.client.post(reverse('index'), {'terms_en': 'Test Search', 'websites': [0]})\n s = Search.objects.filter(terms_en=\"Test Search\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(s), 0)", "def test_search(self):\n tester = app.test_client(self)\n response = tester.post('/search')\n self.assertEqual(response.status_code,200)", "def test_ice_and_fire_external_invalid_search(self):\n response = self.client.get('/api/external-books?name=abc23123', format='json')\n self.assertEqual(200, response.data['status_code'])\n self.assertEqual(0, len(response.data['data']))", "def search():\n query = input('Please enter your search query\\n')\n # For now, we will just print the whole database\n #db_actions.display()\n db_actions.search(query)", "def test_search_test_search_returns_correct_menu(self):\n # create some db records\n dataset = self.create_mixed_test_data()\n test_search_string = 'bravo'\n\n with patch('builtins.input', side_effect=test_search_string):\n result = self.menu.search_text_search()\n\n expected_result = self.menu.present_next_result\n\n self.assertEqual(expected_result, result)", "def abort_search(self):\n self._raise_not_supported()", "def search(request):\n\n # get form data \n searchItem = request.GET.get(\"q\")\n # if searchItem is an exact match redirect to that page\n if (util.get_entry(searchItem) is not None):\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": searchItem\n }))\n # add any pages with the string in it to results list \n else: \n results = []\n substring = False\n for title in util.list_entries():\n if searchItem.upper() in title.upper():\n results.append(title)\n if results:\n substring = True\n # return results\n return render(request, \"encyclopedia/search.html\", {\n \"searchItem\": searchItem,\n \"substring\": substring,\n \"results\": results\n })", "def test_search_no_params_error(self):\n self.assertRaises(\n TypeError,\n lambda: self.t['Scrubs'].search()\n )", "def do_search(self):\n if self.patternEditor.value is not None:\n self.nbResult = self.nbMaxResult = 0\n self.resultPanel.clear_content()\n pattern = self.patternEditor.value\n if self._is_pattern_all(pattern):\n self.parent.uifacade.inform(\"application.exportblock\", None)\n else:\n self.parent.uifacade.inform(\"application.searchblock\", pattern)", "def test_search(self):\n resp = self.client.get(\n reverse('profiles:search'),\n # search with a lowercase search term\n data={'search_term': 'Test Project'})\n\n # projects that matches the search term \"Test Project\"\n self.assertContains(resp, 'Test Project')\n self.assertContains(resp, 'Django developer')\n self.assertContains(resp, str(self.project))\n # various page information\n self.assertContains(resp, 'Projects')\n self.assertContains(resp, 'All Needs')\n self.assertContains(resp, 'Projects')\n\n self.assertTemplateUsed('homepage.html')", "def search_again(self):\n\n response = input(\n \"\\nWould you like to search for something else? (Yes or No): \")\n\n while response.lower().strip() != 'yes' or response.lower().strip() != 'no':\n\n if response.lower().strip() == 'yes':\n search_method_choice = self.search_method_menu()\n self.search_tasks(search_method_choice)\n elif response.lower().strip() == \"no\":\n self.main_menu()\n else:\n response = input(\"\\nInvalid choice, please try again: \")", "def test_if_app_can_search_for_existing_list_without_products(self):\n add_list=self.client.post('/shoppinglists/', \n data=self.shopllist,\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n searchforlists=self.client.get('/search/?q=shoes',\n headers={\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(searchforlists.status_code,200) \n self.assertIn(\"No list found\",str(searchforlists.data))", "def test_search_not_found(self):\n s = Searcher( self.logger )\n ( search_key, search_value ) = ( 'ISBN', self.isbn_not_found )\n result_dct = s.search(\n self.patron_barcode, search_key, search_value, self.api_url_root, self.api_key, self.partnership_id, self.university_code )\n self.assertEqual(\n {\"Problem\":{\"ErrorCode\":\"PUBFI002\",\"ErrorMessage\":\"No result\"}}, result_dct )", "def test_small_search_dne(self):\n\n search_html = 'agency search--box scrollable-dropdown-menu'\n\n response = self.client.get(reverse('home'))\n self.assertNotContains(response, search_html)", "def supports_book_search(self):\n return False", "def test_search_page_item_not_in_database(self, mock_api, mock_search):\n mock_search.side_effect = [LookupError, {\"result\": \"result\",\n \"products\": \"products\"}]\n response = self.client.post(\"/search/\", {\"query\": \"test\"})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/results_page.html\")\n self.assertTemplateUsed(response, \"search/result.html\")\n self.assertEqual(response.context[\"result\"], \"result\")\n self.assertEqual(response.context[\"products\"], \"products\")", "def test_search_page_no_API_results(self, mock_api, mock_search):\n mock_search.side_effect = [LookupError, ValueError]\n response = self.client.post(\"/search/\", {\"query\": \"test\"})\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/results_page.html\")\n self.assertTemplateNotUsed(response, \"search/result.html\")\n self.assertEqual(response.context[\"error\"],\n \"Votre recherche n'a donné aucun résultats\")", "def test_search_users(self):\n users = Profile.search_user(\"hey\")\n self.assertTrue(len(users) == 1)", "async def search(self, *args, **kwargs):\n pass", "def test_restlike_search(self):\r\n # first let's add a bookmark we can search on\r\n self._get_good_request()\r\n\r\n search_res = self.testapp.get(\r\n '/api/v1/admin/bmarks/search/search?search_content=True')\r\n\r\n self.assertTrue(\r\n search_res.status == '200 OK',\r\n \"Status is 200: \" + search_res.status)\r\n self.assertTrue(\r\n 'python' in search_res.body,\r\n \"We should find the python tag in the results: \" + search_res.body)", "def searchText(self):\n tabId = self.tab.currentIndex()\n if tabId == -1: return False\n currentDoc = self.tab.widget(tabId)\n \n if isinstance(currentDoc, WelcomePage):\n return\n \n if currentDoc.extension in [ TestUnit.TYPE, TestSuite.TYPE, TestAdapter.TYPE,\n TestData.TYPE, TestLibrary.TYPE, TestTxt.TYPE ]:\n selectedText = ''\n if currentDoc.editor().hasSelectedText():\n selectedText = currentDoc.editor().selectedText()\n # self.hideFindReplaceAction.setChecked(True)\n self.findWidget.showEnhanced(textSelected=selectedText)", "def test_search(self):\n response = Tmdb.search('ozark')\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['results'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)", "def test_wrong_search_criteria(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_decrease\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n self.assertEqual(json.loads(resp.content),\"You give your input in wrong format. Please check the API documentation for the appropriate input format!!\",\"Sorting Critera Input Control Doesn't Work\")", "def test_search_project(self):\n title = Project.search_project(\"dee\")\n self.assertTrue(len(title) > 0)", "def on_searchButton_clicked(self):\n self.__search()", "def test_context_data_with_valid_search_and_no_results(self):\n response = self.client.get(self.get_url(self.study.pk), {'description': 'test'})\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)", "def test_search_title_and_content(self):\r\n response = self.client.post(reverse('questions:search'),\r\n {'search': 'find this'})\r\n self.assertEqual(response.status_code, 200)\r\n results = response.context['question_list']\r\n self.assertEqual(results.count(), 2)", "def test_act_not_is_searching(self):\n # setup\n self.strategy._is_searching = False\n\n # operation\n self.search_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(0)", "def valid_searchpanel(arch, **kwargs):\n return len(arch.xpath('/search/searchpanel')) <= 1", "def _arguments_valid(self) -> bool:\n return self.find and self.near and self.max_results >= 1", "def test_search_data(self):\n tester = app.test_client(self)\n response = tester.post('/search')\n data = response.get_json()\n self.assertTrue('data' in data)", "def isSearchRequest(self):\n return re.search('Search.+Request', self.name) is not None", "def validate_lookup_search_term_format(search_query):\n if len(search_query) != 0:\n clear()\n return True\n\n else:\n clear()\n return False", "def test_no_search_string(self):\n resp = SearchTest.client.get('/api/search/',{'token':SearchTest.valid_token})\n self.assertEqual(json.loads(resp.content),\"You give your input in wrong format. Please check the API documentation for the appropriate input format!!\",\"No Search String Test Error\")", "def test_no_value_for_search_query_returns_empty_queryset(self):\n article = ArticleFactory()\n article.publish()\n request = RequestFactory().get(\"\", {})\n response = Search.as_view()(request)\n self.assertIn(\"results\", response.context_data)\n results = response.context_data[\"results\"]\n self.assertEqual(len(results), 0)" ]
[ "0.73777276", "0.73777276", "0.73777276", "0.7130437", "0.70597166", "0.7040227", "0.7010459", "0.6978306", "0.6912243", "0.69074774", "0.68966824", "0.68724257", "0.68662024", "0.68647575", "0.678795", "0.67700964", "0.6743339", "0.67405117", "0.6720262", "0.6709872", "0.66889524", "0.6665729", "0.6652973", "0.66523904", "0.6651433", "0.664961", "0.6634137", "0.66329354", "0.66305304", "0.6613175", "0.66019887", "0.65977395", "0.6595409", "0.65738297", "0.6538312", "0.65366644", "0.6529268", "0.6523937", "0.6521691", "0.651314", "0.65096265", "0.65012383", "0.6480618", "0.64798224", "0.6474702", "0.6459653", "0.64536464", "0.64491785", "0.64469", "0.64464736", "0.6419498", "0.6417537", "0.638247", "0.6379097", "0.63774395", "0.6371114", "0.63689935", "0.63428575", "0.6335172", "0.63350135", "0.63340247", "0.63277996", "0.63261133", "0.62988657", "0.62851787", "0.62833464", "0.6266355", "0.6265001", "0.62587404", "0.6234981", "0.6230228", "0.62285364", "0.6220744", "0.62127614", "0.62013876", "0.61999685", "0.6189991", "0.6183072", "0.618216", "0.61761254", "0.6172622", "0.61667275", "0.6152551", "0.61515546", "0.61485076", "0.61471987", "0.6145704", "0.61373836", "0.61318344", "0.613144", "0.6126089", "0.61215144", "0.6119843", "0.6118264", "0.611198", "0.61106527", "0.6110426", "0.6108664", "0.6104712", "0.60986" ]
0.63336676
61
Validates the functionality of logout
def test_logout(self): from django.contrib.messages import get_messages path = reverse("logout") user = mixer.blend(User, is_staff=True, is_superuser=True) client = Client() client.force_login(user) client.post('/admin/') r = client.post(path) messages = list(get_messages(r.wsgi_request)) assert str(messages[0]) == "Successfully logged out"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logout():", "def logout(self):", "def logout(self):\n pass", "def logout():\n login()", "def logout(self):\r\n # should redirect\r\n check_for_get_code(self, 302, reverse('logout'))", "def logout_user():\n pass", "def logout(request):\n if request.user.is_authenticated():\n auth_logout(request)\n return HttpResponseRedirect(reverse(\"voter_validation:index\"))", "def logout(self):\n self.getLink('Logout').click()\n self.html_redirect()\n assert 'You have been logged out successfully.' in self.message, \\\n 'Not successfully logged out: message={0.message!r}'.format(self)", "def logout(self, request):\n pass", "def test_logout_redirect(self):\n rc = self.logout()\n assert b'You logged out.' in rc.data", "def test_logout(self):\r\n self.logout()", "def logout(self):\n data = {'action': 'logout'}\n self.call(data)\n self._high_limits = None\n return True", "def test_logout_route_requires_login(self):\n response = self.client.get('/users/logout', follow_redirects=True)\n self.assertIn(b'Please log in to access this page', response.data)", "def logout():\n return logout_user()", "def logout(self):\n with self.client.post(\"/logout\", catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.status_code > 200 and r_hist.status_code < 400:\n response.success()\n self.user.username = None\n # go to UnauthenticatedTasks\n self.interrupt()", "def test_logout_un_logged_in_user_false(self):\n resp = self.client().post('/api/v1/auth/logout/1')\n self.assertEqual(resp.status_code, 400)\n resp = resp.get_json()\n self.assertEqual(resp['error'],\n 'That user is not logged in')", "def logout():\n try:\n if session[\"user\"]:\n flash(\"You have logged out successfully\", category=\"success\")\n session.pop(\"user\")\n except KeyError:\n flash(\"You are not logged in\", category=\"error\")\n try:\n if session[\"admin\"]:\n session.pop(\"admin\")\n except KeyError:\n # user is not an admin\n pass\n finally:\n return redirect(url_for(\"get_terms\"))", "def test_user_can_logout(self):\n response = self.client.post(\n CONSTS.USER_LOGOUT_URL,\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotEqual(User.objects.get().last_login, datetime.datetime.now())", "def test_logout(self):\n response = self.client.get('/logout/')\n self.assertEqual(response.status_code, 302)", "def test_logout(self):\n # Logging in voluntarily the quick way:\n resp = self.app.get('/login_handler?login=manager&password=managepass',\n status=302)\n resp = resp.follow(status=302)\n ok_('authtkt' in resp.request.cookies,\n 'Session cookie was not defined: %s' % resp.request.cookies)\n # Logging out:\n resp = self.app.get('/logout_handler', status=302)\n ok_(resp.location.startswith('http://localhost/post_logout'))\n # Finally, redirected to the home page:\n home_page = resp.follow(status=302)\n authtkt = home_page.request.cookies.get('authtkt')\n ok_(not authtkt or authtkt == 'INVALID',\n 'Session cookie was not deleted: %s' % home_page.request.cookies)\n eq_(home_page.location, 'http://localhost/')", "def auth_logout(request):\n logout(request)\n return HttpResponseRedirect( reverse('startpage') )", "def log_out(self):\n self.__is_logged_in = False", "def logout(self, **kwargs):\n\tself.call('logout')", "def logout():\n do_logout()\n return redirect('/login')", "def logout_action(request):\n if request.user.is_authenticated:\n logout(request)\n return HttpResponseRedirect(reverse('metro:simulation_manager'))", "def logout():\n flash(_('You were logged out'))\n session.pop('user_id', None)\n return redirect(url_for('index'))\n #return redirect(url_for('public_timeline'))", "def _handle_logout(self):\n self.food_service.log_out()\n self._handle_after_logout()", "def test_logout(self):\n\n self.client.post(reverse('login'), self.user)\n response = self.client.post(reverse('logout'))\n self.assertEqual(response.status_code, 302)\n response = self.client.get(reverse('polls:index'))\n self.assertFalse(response.context['user'].is_authenticated)", "def test_logout(self):\n\n result = self.client.get(\"/logout_process\",follow_redirects=True)\n self.assertIn(b'Login:',result.data)", "def test_logout_without_token(self):\n self.create_user()\n\n url = reverse_lazy('authenticate:logout')\n response = self.client.get(url)\n\n detail = str(response.data['detail'])\n status_code = int(response.data['status_code'])\n\n self.assertEqual(len(response.data), 2)\n self.assertEqual(detail, 'Authentication credentials were not provided.')\n self.assertEqual(status_code, 401)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_valid_logout(self):\n with self.client:\n # user registration\n user_response = register_user(self)\n response_data = json.loads(user_response.data.decode())\n self.assertTrue(response_data[\"Authorization\"])\n self.assertEqual(user_response.status_code, 201)\n\n # registered user login\n login_response = login_user(self)\n data = json.loads(login_response.data.decode())\n self.assertTrue(data[\"Authorization\"])\n self.assertEqual(login_response.status_code, 200)\n\n # valid token logout\n response = self.client.post(\n \"/auth/destroy_token\",\n headers=dict(\n Authorization=\"Token \"\n + json.loads(login_response.data.decode())[\"Authorization\"]\n ),\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data[\"status\"] == \"success\")\n self.assertEqual(response.status_code, 200)", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def process_logout():\n\n print \" LOGGED OUT USER \"\n\n del session[\"user_id\"]\n\n flash(\"You have Successfully Logged Out!\")\n\n return redirect(\"/\")", "def logout():\n logout_user()\n flash(\"Successfully signed out\", category='info')\n return redirect(url_for('url.index'))", "def logout():\n\n do_logout()\n flash(f\"You are now logged out!\", \"success\")\n return redirect('/')", "def logout(self):\n driver = self.selenium_test.driver\n assert url_for('index') in driver.current_url\n driver.find_element_by_link_text('Sair').click()\n self.selenium_test.wait_to_be_logged_out()", "def logout():\n if 'access_token' in login_session:\n del login_session['access_token']\n del login_session['email']\n flash(\"you are now logout\")\n return redirect(url_for('catelog'))", "def logout():\n\n if do_logout():\n flash('Logout Successful.', 'success')\n\n return redirect('/login')", "def logout():\n\n do_logout()\n flash(\"Successfully logged out\", \"success\")\n return redirect('/')", "def logout(request):\n alogout(request)\n \n return HttpResponseRedirect(\"/\")", "def logout():\n logout_user()\n return redirect(url_for(\".login\"))", "def logout():\n logout_user()\n return redirect(url_for('main.index'))", "def logout():\n\n do_logout()\n flash('successfully logged out')\n return redirect(\"/\")", "def test_logout(self):\n self.client.login(**self.user_credentials)\n\n self.assertEqual(self.client.session['_auth_user_id'], self.user.pk)\n\n self.client.get('/logout')\n self.assertTrue('_auth_user_id' not in self.client.session)", "def logout(request):\n if request.method == 'POST':\n auth.logout(request)\n return redirect('home')\n else:\n return render(request, 'accounts/signup.html')", "def test_logout(self):\n\n result = self.client.get(\"/logout\", follow_redirects=True)\n self.assertIn(b\"Logout successful\", result.data)", "def logout(self, request, **kwargs):\n\t\tself.method_check(request, allowed=['get'])\n\t\tself.is_authenticated(request)\n\t\tif request.user and request.user.is_authenticated():\n\t\t\tlogout(request)\n\t\t\treturn self.create_response(request, { 'success': True })\n\t\telse:\n\t\t\treturn self.create_response(request, { 'success': False, 'error_message': 'You are not authenticated, %s' % request.user.is_authenticated() })", "def logout():\n logout_user()\n return redirect(url_for('auth.index'))", "def logout():\n session['logged_in'] = False\n return '', 204", "def logout_view(request):\n logout(request) # Logout request user\n messages.info(request, \"You have logged out successfully.\", fail_silently=False)\n return redirect(\"showLoginPage\")", "def logout():\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"tips\"))", "def test_happy_login_logout(self):\n\n rv = self.login(\"john@simplylift.co\")\n assert rv.status_code in [200]\n assert b\"Logout\" in rv.data\n\n rv = self.logout()\n assert rv.status_code in [200]\n assert b\"Please enter your secretary email to continue\" in rv.data", "def logout():\n if session.get('authed', False):\n for i in ['phone', 'authed', 'confirmation_code']:\n if session.has_key(i):\n del session[i]\n return redirect(my_url('index'))", "def logout_view(request):\n auth_logout(request)\n return redirect('home')", "def logout_page(request):\n logout(request)\n return HttpResponseRedirect('/')", "def test_logout(self):\n with self.client:\n self.client.post(\n '/users/login',\n data=dict(username=\"eschoppik\", password=\"secret\"),\n follow_redirects=True\n )\n response = self.client.get('/users/logout', follow_redirects=True)\n self.assertIn(b'You are now logged out', response.data)\n self.assertFalse(current_user.is_authenticated)", "def signout(self):\n username = cherrypy.session.get('username')\n if username is None:\n pass\n else:\n cherrypy.lib.sessions.expire()\n raise cherrypy.HTTPRedirect('/')", "def test_anonymous_logout(self):\n resp = self.client.post(reverse('logout'))\n assert resp.status_code == 200, resp.content.decode('utf-8')\n assert not self.is_authenticated(self.user)\n assert not self.is_authenticated(self.other_user)", "def logout():\n logout_user()\n return redirect(url_for('home'))", "def logout():\n session.pop('username', None)\n session.pop('user_id', None)\n session.pop('logged_in', None)\n session.pop('is_admin', None)\n\n flash('Successfully logged out', 'alert-info')\n\n return redirect(url_for('index'))", "def logout(request):\n auth_logout(request)\n messages.success(request, 'You are now logged out')\n return redirect('/')", "def ttest_login_logout(self):\n rv = self.login(\n app.config['USERNAME'],\n app.config['PASSWORD']\n )\n assert b'You were logged in' in rv.data\n rv = self.logout()\n assert b'You were logged out' in rv.data\n rv = self.login(\n app.config['USERNAME'] + 'x',\n app.config['PASSWORD']\n )\n assert b'Invalid username' in rv.data\n rv = self.login(\n app.config['USERNAME'],\n app.config['PASSWORD'] + 'x'\n )\n assert b'Invalid password' in rv.data", "def user_logout(request):\r\n logout(request)\r\n return redirect('accounts:login')", "def logout_post():\n\treturn \"LOGOUT\"", "def logout(request):\n return internal_logout(request, next_page = reverse('wainz.views.composite'), redirect_field_name = 'next')", "def logout():\n logout_user()\n flash('You have successfully been logged out')\n\n # redirect to login page\n return redirect(url_for('auth.login'))", "def GET_logout(self):\r\n self.logout()\r\n return self.redirect('/')", "def log_out(request):\n logout(request)\n return redirect('user_login')", "def logout():\n session.pop('logged_in', None)\n flash('You were logged out', 'success')\n return redirect(url_for('show_entries'))", "def logout():\n rino.login.logout()", "def logout():\n session.pop('logged_in', None)\n session.pop('fname', None)\n session.pop('patron', None)\n flash('You were logged out')\n return redirect('/')", "def user_logout():\n\n session.pop('logged_in', None)\n flash('You are now logged out')\n\n return redirect('/')", "def logout():\n\n logout_user()\n return redirect(url_for('login'))", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n # redirect to the login page\n return redirect(url_for('view.login'))", "def test_logout(self):\n assert not self.is_authenticated(self.user)\n resp = self.client.post(\n reverse('login'),\n json.dumps({\n \"username\": self.USERNAME,\n \"password\": self.PASSWORD,\n }),\n content_type=\"application/json\"\n )\n assert resp.status_code == 200, resp.content.decode('utf-8')\n assert self.is_authenticated(self.user)\n assert not self.is_authenticated(self.other_user)\n\n # Now logout the logged in user.\n resp = self.client.post(reverse('logout'))\n assert resp.status_code == 200, resp.content.decode('utf-8')\n assert not self.is_authenticated(self.user)\n assert not self.is_authenticated(self.other_user)", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))", "def logout():\n logout_user()\n flash('You have successfully been logged out.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))", "def logout(self):\n self.auth = None", "def s_logout(request):\n logout(request) # use django.contrib.auth.logout , clear all session , redirect to logout\n return redirect('/')", "def logOut(self):\n self.client.logout()", "def logout():\n logout_user()\n return redirect(url_for('index'))", "def logout():\n logout_user()\n return redirect(url_for('index'))", "def logout():\n\n logout_user()\n return redirect('/')", "def test_logout(self):\n\n result = self.client.get(\"logout\", follow_redirects=True)\n self.assertIn(\"<h1 class=\\\"title\\\">Bark Park!</h1>\", result.data)", "def test_user_logout(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.get_token())\n response = self.client.delete(reverse('accounts:user-logout'))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def logout():\n try:\n user_pic = driver.find_element_by_class_name('m-topbar__userpic')\n user_pic.click()\n wait()\n click_on('Logout')\n wait()\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"", "def test_login_logout(self):\n self.register(\n app.config['TEST_USER'],\n app.config['TEST_PW'])\n\n rc = self.login(\n app.config['TEST_USER'],\n app.config['TEST_PW'])\n assert b'Logout' in rc.data\n\n rc = self.logout()\n assert b'You logged out.' in rc.data\n\n rc = self.login(\n app.config['TEST_USER'],\n 'wrong password')\n assert b'Invalid username or password.' in rc.data\n\n rc = self.login(\n 'user_doesnt_exist',\n 'wrong password')\n assert b'Invalid username or password.' in rc.data", "def logout(self):\n try:\n log.info(\"Logging out of the netscaler\")\n self.post(\"/logout\", {\"logout\": {}})\n except BadNetScaler as error:\n log.error(\"Failed to logout of the netscaler: %s\", error)\n self.sessionid = \"\"", "def test__logged_out_authenticated_user(self):\n self.client.login(\n username='volunteer1@example.com',\n password='volunteer1',\n )\n response = self.client.get('/o/logout')\n\n self.assertRedirects(\n response,\n settings.ANGULAR_ROOT,\n 302,\n fetch_redirect_response=False,\n )", "def logout_view(request):\n logout(request)\n return redirect('login')", "def logout_view(request):\n logout(request)\n return redirect('login')", "def logout_page(request):\n logout(request)\n return redirect('/')", "def logout_view(request):\n logout(request)\n return HttpResponseRedirect('/')", "def logout_view(request):\n logout(request)\n return HttpResponseRedirect('/')", "def logoutPage(request):\n\n logout(request)\n return redirect('index')", "def logoff(request):\n return render(request,\"index.html\",{\"text\":\"Sorry. You cannot logout.\"})", "def logout():\n if \"username\" in session.keys():\n del session[\"username\"]\n if not app.config[\"DISABLE_AUTH\"]:\n return redirect(url_for(\"login\") + \"?slo\")\n else:\n return redirect(url_for(\"index\"))", "def log_out(self):\n DB.log_out()\n self.customer.log_out()\n self.displayUi = LoginScreen()\n self.hide()\n self.displayUi.show()", "def logout():\n session_logout()\n return redirect(url_for(\"home\"))", "def logout():\n logout_user()\n return redirect(url_for('default.home'))" ]
[ "0.76756245", "0.75859284", "0.73818344", "0.73260444", "0.72436315", "0.72046804", "0.7117295", "0.7113737", "0.7077796", "0.69774604", "0.6975256", "0.69690436", "0.69545597", "0.69395155", "0.6934152", "0.69264627", "0.6844665", "0.68182045", "0.6816941", "0.6815389", "0.67708033", "0.6749815", "0.67123514", "0.6686903", "0.6674756", "0.66528636", "0.6649394", "0.6646848", "0.6645748", "0.66417825", "0.6620647", "0.6607984", "0.6607984", "0.6592491", "0.65905255", "0.6571922", "0.65697545", "0.65690655", "0.6565586", "0.6564561", "0.65632457", "0.6560979", "0.6560241", "0.65594774", "0.6554676", "0.6552264", "0.65516835", "0.6541552", "0.65357155", "0.6534575", "0.65328175", "0.6526146", "0.6522917", "0.65191495", "0.65187186", "0.651216", "0.65119207", "0.651171", "0.65103006", "0.65087694", "0.6507024", "0.65015787", "0.6500244", "0.64964545", "0.6495776", "0.6494746", "0.64925706", "0.6492117", "0.6488791", "0.648062", "0.6480013", "0.6476416", "0.64734507", "0.6470475", "0.646625", "0.64652437", "0.64652437", "0.6458047", "0.64574176", "0.64570326", "0.645458", "0.645458", "0.64545566", "0.64536583", "0.64507073", "0.6449846", "0.64479274", "0.6447721", "0.64417446", "0.64412916", "0.64412916", "0.6439364", "0.64377373", "0.64377373", "0.64331365", "0.64297944", "0.64274555", "0.642347", "0.6419039", "0.64171875" ]
0.64820796
69
Checks the main page is Ok
def test_main(self): path = reverse("main") request = RequestFactory().get(path) response = index(request) assert response.status_code == 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_page(self, html_content):\n if \"Sign in for the best experience\" in html_content:\n valid_page = False\n elif \"The request could not be satisfied.\" in html_content:\n valid_page = False\n else:\n valid_page = True\n return valid_page", "def _verify_page(self):", "def test_main_page(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n content = response.content.decode('utf-8')\n self.assertTrue('Improving the FOIA request experience' in content)", "def test_1_validate_landing_page(self):\n self.log.debug(\"Started test_valid_city_weather Test \")\n self.page.validate_correct_landing_page()\n\n self.log.debug(\"Checking all elements of home page\")\n self.page.validate_all_labels()", "def test_status_is_ok(self):\n why_page = self.client.get(\"/why/\")\n self.assertEqual(why_page.status_code, 200)", "def test_pages_are_valid(self):\n\n url = 'http://www.example.com'\n\n r = LiveRedirect(url=url,duration=HALF_DAY)\n r.save()\n\n TEST_URLS = [\n '%s/' % self.live_server_url,\n '%s/%s' % (self.live_server_url,r.slug),\n '%s/%s/' % (self.live_server_url,r.slug),\n ]\n\n for url in TEST_URLS:\n self.browser.get(url)\n\n body = self.browser.find_element_by_tag_name('body')\n title = self.browser.find_element_by_tag_name('title')\n\n # Check that it is not a 404 or 500\n self.assertNotIn('404',body.text,\"%s returns 404!\" % url)\n self.assertNotIn('500',body.text,\"%s returns 500!\" % url)\n\n # Check that title is valid\n\n self.assertNotIn('NO-TITLE',title.text,\"%s is using default base title!\" % url)\n self.assertIsNotNone(title.text, \"%s has no title!\" % url)\n self.assertNotEquals('',title.text, \"%s has no title!\" % url)", "def test_status_is_ok(self):\n home_page = self.client.get(\"/\")\n self.assertEqual(home_page.status_code, 200)", "def test_main_page_load(self):\n response = self.client.get(reverse(\"index\"))\n self.assertEqual(response.status_code, 200)", "def user_should_get_an_ok_response():\n assert web_app.validate_reponse()", "def test_homepage(self):\n\n response = self.client.get(\"/\")\n self.assertIn(\"Books</title>\", response.data)\n self.assertIn(\"Goodreads ID\", response.data)", "def test_home_view_is_status_ok(self):\n from imager_profile.views import HomeView\n req = self.request.get(\"/\")\n view = HomeView.as_view()\n response = view(req)\n self.assertTrue(response.status_code == 200)", "def IsOk(self):\r\n \r\n return True", "def test_page_existence(self):\r\n # Log in\r\n self.auth_page.visit()\r\n\r\n # Verify that each page is available\r\n for page in self.pages:\r\n page.visit()", "def test_legal_page(self):\n response = self.client.get(\"/legal/\")\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/legal.html\")", "def test_landing_page(self):\n response = self.app.get(\"/\", follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n\n res_txt = response.get_data(as_text=True)\n\n self.assertIn(\"input\", res_txt)\n self.assertIn(\"button\", res_txt)\n self.assertIn(\"Welcome to\", res_txt)", "def test_homepage(self):\n \n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"What type of user are you?\", result.data)", "def test_status_home(self):\n self.assertEqual(200, self.response.status_code)", "def check_function():\r\n About(ROOT)\r\n # quit_or_not(root)\r", "def test_landing_page(self):\n # Create a test client\n client = server.app.test_client()\n\n # Use the test client to make requests\n result = client.get('/', follow_redirects=True)\n\n # Compare result.data with assert method\n self.assertIn(b'<p class=\"navbar-text\">Already have an account?</p>', \n result.data)", "def IsOk(self):\r\n \r\n return self.window != None", "def test_homepage(self):\n rc = self.app.get('/')\n assert b'Welcome to Code TA' in rc.data\n assert b'Logout' not in rc.data", "def check(self):\n if not self.logged_in:\n self.threaded_login()\n #QTimer().singleShot(2000,lambda: self.check())\n #return\n #print \"Getting weapon page\"\n self.threaded_get_page(\"weapon\", self.process_weapon_stats)\n if self.check_challenge:\n self.threaded_get_page(\"challenges\", self.process_challenge)", "def test_public_unit_page_html(self):\r\n html = self.get_page_html(self.vertical)\r\n self.validate_html_for_add_buttons(html)", "def test_main_page(remote_browser, base_url, logger_fixture):\n logger_fixture.info(\"===== test_main_page =====\")\n main_page = MainPage(remote_browser, base_url)\n main_page\\\n .open(logger_fixture)\\\n .check_featured_block_is_not_empty()", "def is_good_response(self, resp):\r\n\t\tcontent_type = resp.headers['Content-Type'].lower()\r\n\t\treturn (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)", "def test_home_page_returns_correct_html(self):\n request = HttpRequest()\n response = home_page(request)\n self.assertIn(\n b'<h1>42 Coffee Cups Test Assignment</h1>',\n response.content)", "def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data", "def test_given_home_page_behavior(self):\n res = self.client().get('/')\n self.assertEqual(res.status_code, 200)\n json_res = json.loads(res.get_data(as_text=True))\n self.assertEqual('Home page', json_res['message'])", "def _check_ready(self, _widget, __event=None, __page=0):\r\n\r\n if self.cmbHardware.get_active() > 0:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, True)\r\n else:\r\n self.assistant.set_page_complete(self.fxdPageGeneral, False)\r\n\r\n return False", "def test_public_pages_load(self):\r\n pages = (\r\n reverse('login'),\r\n reverse('signup'),\r\n )\r\n for page in pages:\r\n print(\"Checking '{0}'\".format(page))\r\n self.check_page_get(page, 200)", "def test_public_status_page_post_public_status_page(self):\n pass", "def is_good_response(self, resp):\r\n content_type = resp.headers['Content-Type'].lower()\r\n return (resp.status_code == 200 and content_type is not None and content_type.find('html') > -1)", "def test_page_existence(self):\r\n for page in self.pages:\r\n page.visit()", "def checkStatus(url):\n def checkForIndexPage(r):\n \"\"\"Checks whether it a given url is actually an Index Of page. Takes in a Request object\"\"\"\n soup = BeautifulSoup(r.text, 'lxml')\n head = soup.find('h1')\n if head != None and head.string != None and (\"Index of \" in head.string):\n return \"Shows 'Index Of' page ✘\" \n else:\n return \"Displays properly ✓\"\n\n returnString = \"\"\n try:\n r = requests.get(url)\n returnString += str(r.status_code) \n if r.status_code == 200: # if the page is accessible, then check whether it displays properly\n returnString += \"\\n\\t\" + checkForIndexPage(r)\n return returnString\n except Exception as e:\n return(e)", "def test_homepage(self):\n\n with self.client as client:\n response = client.get('/')\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('<table class=\"board\">', html)\n self.assertIn('<table', html)\n self.assertIn('boggle homepage. used in testing', html)\n # test that you're getting a template", "def eighth_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.ninth_page.wait_for_page()", "def test_main_app(self):\n resp = self.app.get('/')\n # ensure relevant pieces of UI are returned\n assert 'Foggy Fork' in resp.data\n assert 'A San Francisco Food Truck Map' in resp.data\n assert 'Where in the fog are you looking for food?' in resp.data\n assert '<div id=\"map-canvas\"></div>' in resp.data", "def test_01_index(self):\r\n res = self.app.get(\"/\", follow_redirects=True)\r\n assert self.html_title() in res.data, res\r\n assert \"Create an App\" in res.data, res", "def test_create_page_with_main_box(self):\n\n main_block = PageMainBlock.objects.create(**_main_block_data)\n Page.objects.create(main_block=main_block, **_page_data)\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('text', response.context)\n self.assertIn('title', response.context)\n self.assertIn('image', response.context)", "def test_verify_main_screen_elements(self):\n\n test_name = sys._getframe().f_code.co_name\n\n log.info(\"###### TEST EXECUTION STARTED :: \" + test_name + \" ######\")\n\n with allure.step(\"Verify Main Screen Elements\"):\n result = self.main_page.verify_main_screen_elements()\n self.exe_status.mark_final(test_step=test_name, result=result)", "def test_parse_message_success_load_page(self):\n for test_case in self.success_test_params_load_page:\n expected = test_case[KEY_EXPECTED]\n try:\n app.index()\n bring_message = \"finishedLoading\"\n except:\n bring_message = \"finishedLoading\"\n\n self.assertEqual(expected, bring_message)", "def test_page(self, url):\n if self.get_url_components(url):\n components = self.get_url_components(url)\n else:\n return False\n for extension in self.file_extensions_only_first:\n self.check_in_front(components, extension)\n for extension in self.file_extensions_only_last:\n self.check_in_back(components, extension)\n for extension in self.file_extensions_all_possibilities:\n self.check_in_front(components, extension + '.')\n self.check_in_middle(components, '.' + extension)\n self.check_in_back(components, '.' + extension)", "def check_mitm_status_page(self, check_url):\n response = requests.get(check_url)\n if response.status_code == 200:\n return response\n else:\n sys.exit(2)", "def _is_current_page(self):\n self.selenium.wait_until_location_contains(\"/list\",timeout=60, message=\"Records list view did not load in 1 min\")\n self.selenium.location_should_contain(\"General_Accounting_Unit__c\",message=\"Current page is not a DataImport List view\")", "def test_home_page(self):\n\n self.browser.get('http://localhost:8000/index.html')\n\n # there is a page title defined by <title></title> on the home page\n # check it\n\n self.assertIn('Stability within Movement',self.browser.title)\n\n # You will have an image for your home page I am assuming.\n # Put the name of your image here in place of homebrew.png\n # In general this is how we check for images on a page.\n\n # The user sees an image of sun hitting the Washington Monument\n\n m=self.browser.find_element_by_tag_name('img')\n self.assertIn('help.jpg',m.get_attribute('src'))\n\n a=self.browser.find_element_by_id('sun')\n a.click()\n\n self.assertIn('sun',self.browser.title)\n\n h=self.browser.find_element_by_tag_name('h1')\n\n m=self.browser.find_element_by_tag_name('img')\n\n # the user goes back to the home page\n # self.browser.back()\n self.browser.get('http://localhost:8000/index.html')\n\n # the user sees at the bottom of the page a link to credits\n l=self.browser.find_element_by_link_text('Credits')\n\n # the user clicks on the credits link\n l.click()\n # and sees the credits.html page\n a=self.browser.current_url\n self.assertIn(\"credits.html\",a)", "def test_questions_page(self):\n # import pdb\n # pdb.set_trace()\n\n result = self.client.get('/questions')\n self.assertIn('<h2>Submit A Question</h2>', result.data)\n\n print \"DONE WITH QUESTIONS PAGE CHECK\"", "def test_homepage(self):\n\n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Homepage\", result.data)", "def test_list_views_check_main_title_descriptin(self):\n url = reverse('blogs:list')\n response = self.client.get(url)\n # TODO you need to check that the tiles are present in the list Dilshad. You are only looking for the http200\n self.assertEqual(response.status_code, 200)\n self.assertIn(self.main_title, str(response.content))\n self.assertIn(self.description1, str(response.content))\n self.assertIn(self.description2, str(response.content))", "def test_dashboard_page_status(self):\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)", "def test_parse_message_failure_load_page(self):\n for test_case in self.failure_test_params_load_page:\n expected = test_case[KEY_EXPECTED]\n try:\n app.index()\n bring_message = \"finishedLoading\"\n except:\n bring_message = \"notfinishedLoading\"\n self.assertNotEqual(expected, bring_message)", "def check_status(self):", "def _checkErrors(self, landPage):\n noLicenseTags = ['Purchase a Subscription',\n 'Purchase This Content',\n 'to gain access to this content',\n 'purchaseItem',\n 'Purchase Full Text',\n 'Purchase access',\n 'Purchase PDF',\n 'Pay Per Article',\n 'Purchase this article.',\n 'Online access to the content you have requested requires one of the following',\n 'To view this item, select one of the options below',\n 'PAY PER VIEW',\n 'This article requires a subscription.',\n 'leaf-pricing-buy-now',\n 'To access this article, please choose from the options below',\n 'Buy this article',\n 'Your current credentials do not allow retrieval of the full text.',\n 'Access to the content you have requested requires one of the following:',\n 'Online access to the content you have requested requires one of the following']\n if pageContains(landPage, noLicenseTags):\n logging.info(\"generic crawler found 'No license' on \" + landPage['url'])\n raise pubGetError('No License', 'noLicense', landPage['url'])\n errTags = ['This may be the result of a broken link',\n 'please verify that the link is correct',\n 'Sorry, we could not find the page you were looking for',\n 'We are now performing maintenance',\n 'DOI cannot be found in the DOI System']\n if pageContains(landPage, errTags):\n raise pubGetError('Error Message', 'errorMessage', landPage['url'])", "def main_page(self):\n choice = \"\"\n while choice != \"x\":\n header, main_menu, choices, underline = self.__get_format.main_menu_format()\n choice = self.__main_menu.main_page(header,main_menu,choices,underline)\n if choice == \"1\":\n self.__rent_controller.Rent_page()\n elif choice == \"2\":\n try_again = \"\"\n while try_again != \"n\":\n try_again, valid = self.__salesman_controller.sign_in_page()\n if valid == True:\n self.__salesman_controller.salesman_menu()\n elif choice == \"3\":\n self.__order_controller.find_order_process(page=2)\n elif choice == \"i\":\n self.__information_controller.information_page()", "def test_home_route_is_status_ok(self):\n response = self.client.get(\"/\")\n self.assertTrue(response.status_code == 200)", "def test_login_page_loads(self):\n response = self.client.get('/users/login')\n self.assertIn(b'Please login', response.data)", "def test_correct_main_page_template(self):\n response = self.client.get('/')\n self.assertTemplateUsed(response, 'main.html')", "def test_homepage(self):\r\n\r\n result = self.client.get(\"/\")\r\n self.assertIn(b\"Welcome!\", result.data)", "def test_index(self):\n\n result = self.client.get(\"/\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(\"<h2>Please Write your Text</h2>\", result.data)", "def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)", "def test_homepage_view(self):\n response = self.client.get(url_for('home'))\n self.assertEqual(response.status_code, 200)", "def _check(self):\n if self.browser is None:\n self._init_browser()\n\n print(\"Checking if the browser can make login\")\n\n # Test 1: Should sign in using the form, signed should be True\n signed = self._sign_in()\n print(\"Finished, [signed = {}]\".format(signed))\n\n return signed", "def test_home_status_code(self):\n result = self.app.get('/')\n self.assertEqual(result.status_code, 200)", "def test_get_indexhtml(self):\n url = self.baseurl + \"/main\"\n req = urllib2.urlopen(url, None, 3)\n self.assertTrue( req.getcode() == 200 , \"200 OK Not FOUND!\")", "def checkForIndexPage(r):\n soup = BeautifulSoup(r.text, 'lxml')\n head = soup.find('h1')\n if head != None and head.string != None and (\"Index of \" in head.string):\n return \"Shows 'Index Of' page ✘\" \n else:\n return \"Displays properly ✓\"", "def test_home(self):\n\t\tresponse = self.client.get('/')\n\t\tself.assertContains(response, 'Home Page', 1, 200)", "def test_homepage_anon(self):\r\n\r\n with self.client:\r\n response = self.client.get('/')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'United States News', response.data)", "def sanity_check(self):\n return True", "def test_home(self):\n response = self.client.get('/')\n self.assertContains(response, 'Home Page', 1, 200)", "def preliminary_check_controls(self):\n\n # is the program still in a binding state?\n if self.is_binding:\n self.error_msg['text'] = 'You are still binding'\n self.display_object_on_canvas(\n self.error_msg,\n 50,\n self.controller.GAME_HEIGHT - self.error_msg.winfo_reqheight() - 15\n )\n\n # are the controls set all unique?\n elif len({\n self.controller.slide_up_control,\n self.controller.slide_down_control,\n self.controller.slide_left_control,\n self.controller.slide_right_control\n }) != 4:\n self.error_msg['text'] = 'All controls must be unique'\n self.display_object_on_canvas(\n self.error_msg,\n 50,\n self.controller.GAME_HEIGHT - self.error_msg.winfo_reqheight() - 15\n )\n\n # all tests passed?\n else:\n # save to file - do this\n\n # move to main menu frame\n self.controller.show_frame(MainMenu)", "def test_get_main_page_without_logged_in_user(self):\n response = self.testapp.get('/')\n self.assertEqual(response.status_int, 200)", "def test_home_page_not_unified(self, user_type, expected_message):\n self.create_user_for_course(self.course, user_type)\n\n # Render the course home page\n url = course_home_url(self.course)\n response = self.client.get(url)\n\n # Verify that welcome messages are never shown\n self.assertNotContains(response, TEST_WELCOME_MESSAGE)\n\n # Verify that the outline, start button, course sock, course tools, and welcome message\n # are only shown to enrolled users or unenrolled staff.\n is_enrolled = user_type is CourseUserType.ENROLLED\n is_unenrolled_staff = user_type is CourseUserType.UNENROLLED_STAFF\n expected_count = 1 if (is_enrolled or is_unenrolled_staff) else 0\n self.assertContains(response, TEST_CHAPTER_NAME, count=expected_count)\n self.assertContains(response, 'Start Course', count=expected_count)\n self.assertContains(response, TEST_COURSE_TOOLS, count=expected_count)\n self.assertContains(response, 'Learn About Verified Certificate', count=(1 if is_enrolled else 0))\n\n # Verify that the expected message is shown to the user\n self.assertContains(response, '<div class=\"user-messages\"', count=1 if expected_message else 0)\n if expected_message:\n self.assertContains(response, expected_message)", "def test_content_is_correct(self):\n home_page = self.client.get(\"/\")\n self.assertTemplateUsed(home_page, \"home/home.html\")\n home_page_template_output = render_to_response(\"home/home.html\", {\"active\": \"home\"}).content\n self.assertEqual(home_page.content, home_page_template_output)", "def test_home_page_returns_correct_html(self):\n\n request = HttpRequest()\n response = home_view(request)\n html = response.content.decode('utf8')\n self.assertTrue(html.startswith('<!doctype html>'))\n self.assertIn('<title>home</title>', html)\n self.assertTrue(html.endswith('</html>'))", "def test_first_page_passes(self):\n\n self.page.open_site(PageLocators.PREVIOUS_LINK)\n self.page.fill_all_fields()\n self.page.send_the_data()", "def show_main_page(request, error_msg=None):\n request.method='GET'\n return MainPage(request, error_msg)", "def test_helpful_page_view(self):\n target_url = url_for('dashboard.helpful_pages')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def test_important_page(self):\n\n result = self.client.get(\"/\", follow_redirects=True)\n self.assertIn(\"Email\", result.data)", "def test_01_front_page(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should not be shown to anonymous users\"\r\n assert dom.find(id='top_users') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should not be shown to authenticated users\"\r\n assert dom.find(id='top_users') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n res = self.signin(email=self.root_addr, password=self.root_password)\r\n print res.data\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to admin\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n self.signout()", "def testindex(self):\n rv = self.app.get('/')\n self.assertEqual(rv.status_code, 302, \"homepage didnot load\")", "def test_can_create_redirect(self):\n\n #Homepage\n self.browser.get(self.live_server_url)\n\n #Look for the submit button and the two inputs\n url_field = self.browser.find_element_by_id('id_url')\n\n #duration is a slider now, so find that\n duration_field = self.browser.find_element_by_id('slider')\n\n # Enter something into the inputs\n url_field.send_keys('www.example.com')\n duration_field.send_keys(Keys.RIGHT)\n\n submit_button = self.browser.find_element_by_tag_name('input')\n submit_button.submit()\n\n # Ensure that the submit doesn't redirect the user somewhere stupid\n body = self.browser.find_element_by_tag_name('body')\n\n # Check that it is not a 404 or 500\n self.assertNotIn('404',body.text)\n self.assertNotIn('500',body.text)", "def test_postflight_page_status(self):\n response = self.client.get('/postflight/')\n self.assertEqual(response.status_code, 200)", "def test_home_view_without_data(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('42 Coffee Cups Test Assignment', response.content)\n self.assertIn('No data', response.content)\n self.assertNotIn('Andrei', response.content)\n self.assertNotIn('Herasko', response.content)", "def test_index_page(self):\n response = self.client.get(\"\")\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"search/base.html\")\n self.assertTemplateUsed(response, \"search/search_form.html\")\n self.assertTemplateUsed(response, \"search/index.html\")", "def test_content_is_correct(self):\n why_page = self.client.get(\"/why/\")\n self.assertTemplateUsed(why_page, \"home/why.html\")\n why_page_template_output = render_to_response(\"home/why.html\", {\"active\": \"why\"}).content\n self.assertEqual(why_page.content, why_page_template_output)", "def test_splash_page(self):\n response = self.testapp.get('/')\n self.assertEqual(response.status_int, 200)\n response.mustcontain(\n 'Bite-sized learning journeys',\n 'Browse the explorations gallery', '100% free!',\n 'Learn', 'About', 'Contact',\n # No navbar tabs should be highlighted.\n no=['class=\"active\"'])", "def test_homepage_render(self):\n\n result = self.client.get(\"/\")\n self.assertIn(\"<h1 class=\\\"title\\\">Bark Park!</h1>\", result.data)", "def validate(self, response):\n return response[\"status_code\"] == 1", "def test_get_ok(test_case, page):\n with test_case.app.test_client() as c:\n test_case.assertEqual(200, c.get('dashboard/{}'.format(page)).status_code)", "def second_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.third_page.wait_for_page()", "def test_01_front_page(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to anonymous users\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to authenticated users\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to admin\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n self.signout()", "def test_view(self):\n self.assertEqual(status.HTTP_200_OK, self.response.status_code)", "def test_application_running(self):\n response = self.client.get('/login', content_type='html/text')\n self.assertEqual(response.status_code, 200)\n response = self.client.get('/signup', content_type='html/text')\n self.assertEqual(response.status_code, 200)", "def init_home_page(self):\n rps = self.session.get(home_url, headers = BROWSER_HEADERS)\n # with open('first_get.html', 'w') as f: f.write(rps.text)\n if CAPTCHA_ELEMENT_ID in rps.text:\n # print(\"CAPTCHA ELEMENT DETECTED!\")\n return self.bypass_captcha(rps.text)\n else:\n print(\"NO CAPTCHA\")\n return True", "def is_good_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200 and content_type is not None\n and content_type.find('html') > -1)", "def test_fac_admin_page(self):\n self.login(self.fac_admin.user.username)\n self._got_to_fac_admin_page()\n self.check_page_title(self.admin_config.get('FAC_ADMIN').get('PAGE_TITLE'))\n self.check_page_contains_ids(self.admin_config.get('FAC_ADMIN').get('ADMIN_LINKS'))", "def test_root(self):\n rv = self.root()\n self.assertEquals(rv.status_code, 200)\n self.assertIn('Welcome to Word Play', rv.get_data(as_text=True))", "def test_view_index(self):\r\n\r\n resp = self.client.get_html(self.url)\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn('course-nav-list', resp.content)", "def is_good_response(resp):\n\tcontent_type = resp.headers['Content-Type'].lower()\n\treturn (resp.status_code == 200 \n\t\tand content_type is not None \n\t\tand content_type.find('html') > -1)", "def test_indexPage(self):\n LOGGER.debug(\"XXX: test_indexPage entered\")\n LOGGER.debug(\"XXX: %s\", settings.TEMPLATES)\n self.post.status = 'publish'\n self.post.save()\n response = self.client.get('/blog/')\n self.assertContains(response, self.post.title)\n # how to debug the loading stuff\n # except TemplateDoesNotExist, e:\n # for tmpl, msg in e.tried:\n # LOGGER.debug(\"XXX Tried '%s'\", tmpl.name)", "def third_page_execution(self):\n self.errors_and_correct_input_values_helper()\n self.fourth_page.wait_for_page()", "def check(self):\n return True" ]
[ "0.73718345", "0.7190955", "0.6915523", "0.65855736", "0.65250415", "0.6493413", "0.6427644", "0.64157593", "0.6401849", "0.6310914", "0.6305969", "0.630592", "0.6293213", "0.62864685", "0.6239085", "0.62373424", "0.61935997", "0.61764014", "0.6162644", "0.61491907", "0.613365", "0.6130083", "0.6119502", "0.608225", "0.60818833", "0.6075651", "0.60724694", "0.60672814", "0.60591996", "0.6041601", "0.6036187", "0.60167325", "0.59985906", "0.5975354", "0.5974946", "0.5958473", "0.594822", "0.59479254", "0.5944176", "0.5941803", "0.5935101", "0.5931914", "0.59194714", "0.5918523", "0.59179807", "0.5904237", "0.5903461", "0.5902082", "0.58975434", "0.5893832", "0.5889658", "0.58847904", "0.58781606", "0.58771986", "0.58712304", "0.58666503", "0.58286417", "0.5808185", "0.5804738", "0.5804738", "0.57960296", "0.5792219", "0.57868606", "0.57852596", "0.57785136", "0.5767263", "0.5758468", "0.57578397", "0.5755629", "0.575067", "0.574825", "0.574681", "0.57386297", "0.5738594", "0.5732826", "0.5729305", "0.5727506", "0.57251596", "0.57184845", "0.57119733", "0.571077", "0.57106507", "0.57098454", "0.5707463", "0.57044584", "0.56966805", "0.5694316", "0.5691917", "0.5691364", "0.5691309", "0.56900173", "0.5683571", "0.56819344", "0.5677087", "0.56763756", "0.567494", "0.56720954", "0.5670854", "0.566868", "0.5667056", "0.5665596" ]
0.0
-1
Checks getting file from remote url works fine
def test_get_file_fail(self): from django.contrib.messages import get_messages path = reverse("setting-csv") user = mixer.blend(User, is_staff=True, is_superuser=True) client = Client() client.force_login(user) r = client.post(path, {"title": "hotel", "url": "http://rachel.wrongurltofetchdata.nl/djangocase/hotel.csv", "username": "py", "password": "30_bumps", "save": "on"}) messages = list(get_messages(r.wsgi_request)) assert r.status_code == 200 assert len(messages) == 1 assert "Received an error" in str(messages[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getfile(url):\n try:\n return urlreq.urlopen(url)\n except urlreq.HTTPError as e:\n safeprint(\"Sever returned with response code \" + str(e.getcode()) + \", download failed.\")", "def check_remote_file_exists(url, login=None, password=None):\r\n credentials = None\r\n if login and password:\r\n credentials = login, password\r\n\r\n response = requests.get(url,\r\n stream=True,\r\n verify=False,\r\n auth=credentials)\r\n if response.status_code >= 400 or response.status_code < 200:\r\n raise Exception('Returned wrong status code: {}'.format(response.status_code))\r\n\r\n response.close()", "def file_exist(file_url):\n try:\n response = requests.head(file_url)\n if 200 <= response.status_code < 300:\n return True\n return False\n except ConnectionError:\n return False", "def get_remote_file(url, success=200, timeout=10):\n try:\n app.logger.info(\"GET: %s\" % url)\n auth = None\n res = requests.get(url, stream=True, timeout=timeout, auth=auth)\n if res.status_code == success:\n return res.headers.get('Content-Type', 'application/octet-stream'), res.raw.data\n except:\n pass\n return None, None", "def testRemote(self):\n try:\n remoteLocator = self.__httpsFileUrl\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n ok = self.__fileU.exists(remoteLocator)\n self.assertTrue(ok)\n size = self.__fileU.size(remoteLocator)\n self.assertGreaterEqual(size, 1000)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def ReadRemoteFile(url) -> bytes:\n local_url = download_util.DownloadResource(url)\n return file_util.OpenFile(local_url).read()", "def _get_file(cls, url: str, ende: str) -> bool:\n resposta = requests.get(url)\n if resposta.status_code == requests.codes.OK:\n with open(ende, 'wb') as novo_arquivo:\n novo_arquivo.write(resposta.content)\n return True\n else:\n resposta.raise_for_status()\n return False", "def get_rmt_file(uri, creds, sFile):\n\n import urllib\n try:\n urllib.urlretrieve(uri, sFile)\n return True\n\n except:\n return False", "def get_remote_file(url):\n # Disable the proxies by not trusting the env\n session = requests.Session()\n session.trust_env = False\n\n # Make the request\n requests.packages.urllib3.disable_warnings()\n try:\n r = session.get(url, verify=False)\n except requests.exceptions.RequestException as e:\n # catastrophic error. bail.\n print(e)\n sys.exit(1)\n\n r = session.get(url, verify=False)\n remote_file = r.text\n return remote_file", "def web_get_file(self, url):\n try:\n print(url)\n response = requests.get(url, verify=False)\n file_buffer = BytesIO(response.content)\n file_buffer.seek(0)\n return file_buffer\n except:\n print(traceback.print_exc())\n return None", "def getfilehttps(self, url):\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n response = urllib.request.urlopen(url, context=ctx)\n result = response.read()\n return result", "def test_raw_file_url_error(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_uncached,\n op=kgb.SpyOpReturn(b'first'))\n\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'first')\n\n # Ensure output of fake result matches.\n repository._get_file_uncached.unspy()\n self.spy_on(repository._get_file_uncached,\n op=kgb.SpyOpReturn(b'second'))\n\n # Grab from cache when no changes and change fake result to confirm\n # it is not called.\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'first')\n\n # When raw_file_url changed, do not grab from cache and ensure output\n # equals second fake value.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'second')", "def test_unfetchable_url(self):\r\n url = u'file://test.html'\r\n read = readable.ReadUrl.parse(url)\r\n self.assertEqual(read.status, 901)", "def get(self, url, path):\n rpath = urllib.parse.urlparse(url).path\n try:\n self.sftp.get(rpath, path)\n except Exception as e:\n osaka.utils.LOGGER.warning(\n \"Encountered exception: {}\\n{}\".format(e, traceback.format_exc())\n )\n raise osaka.utils.OsakaFileNotFound(\"File {} doesn't exist.\".format(url))", "def test_file_managed_http_source(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(\n name=name,\n source=remote_grail_scene33.url,\n source_hash=remote_grail_scene33.hash,\n skip_verify=False,\n )\n assert ret.result is True", "def getOriginalFile(url):\n # does url exist?\n if url is None or url is \"\":\n return", "def _check_source (fileurl, path_unzip, outfile) :\n if outfile is not None and os.path.splitext (outfile)[1].lower () == os.path.splitext (fileurl)[1].lower () :\n file = _check_url_file (fileurl, path_download = path_unzip, outfile = outfile)\n return file\n else :\n file = _check_url_file (fileurl, path_download = path_unzip, outfile = None)\n txt = _check_zip_file (file, path_unzip = path_unzip, outfile = outfile)\n if not os.path.exists (txt):\n message = \"hal_core._check_source: unable to find file \" + txt + \" source (\" + fileurl + \")\"\n raise PQHException (message)\n return txt", "def get_file(cls, url, working_dir):\n if url.lower().startswith(\"s3://\"):\n return cls._s3_get_file(url)\n elif url.lower().startswith(\"http\"):\n return cls._http_get_file(url)\n else:\n return cls._fs_get_file(url, working_dir)", "def get_remote_content(self, path):\n if path.startswith(\"http\"):\n page_path = path\n elif path.startswith(\"www\"):\n page_path = \"https://\" + path\n else:\n page_path = self.source + path\n \n print(\"Getting \" + page_path)\n \n try:\n resp = requests.get(page_path)\n except:\n print(\"Unable to get \" + page_path)\n return None\n \n if resp.status_code == 200:\n return resp.content\n else:\n print(\"Unable to get \" + page_path + \" Response = \" + str(resp.status_code))\n return None", "def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)", "def getfile(link):\r\n\r\n global args\r\n\r\n file_data = None\r\n trying_count = 1\r\n print('')\r\n while file_data == None and trying_count <= args.try_limit:\r\n try:\r\n print('\\rTrying to get {}... ({}/{})'\r\n .format(link, trying_count, args.try_limit), end='')\r\n back_data = requests.get(link)\r\n if back_data.status_code == 200:\r\n file_data = back_data.content\r\n except:\r\n pass\r\n trying_count += 1\r\n if file_data == None:\r\n print(' FAILED!')\r\n else:\r\n print(' SUCCESS.')\r\n return file_data", "def _get(self, remote_filename, local_path):\n\n with local_path.open('wb') as local_file:\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be downloaded: it does not exist' %\n remote_filename)\n\n response = self.http_client.get(\n self.content_url + '/nodes/' + file_id + '/content', stream=True)\n response.raise_for_status()\n for chunk in response.iter_content(chunk_size=DEFAULT_BUFFER_SIZE):\n if chunk:\n local_file.write(chunk)\n local_file.flush()", "def get_file(self):\n while not (self.is_connection_working()):\n print('Connection is not working. Reason should be printed above. Sleeping 5 minutes and retrying.')\n time.sleep(300)\n i = 0\n while True:\n if i >= 3:\n print('Looks like file {} is really not on FTP. Skipping.'.format(self.url))\n return\n if self.file_exists_on_ftp():\n with closing(request.urlopen(self.url, )) as r:\n with open(self.save_filepath, 'wb') as f:\n shutil.copyfileobj(r, f)\n if i > 0:\n print('Download succeeded on attempt {}'.format(i+1))\n return\n else:\n print(\n 'requests.urlopen error. This sometimes means that file {} \"not exists\" on FTP '\n 'but sometimes it is just \"erruption on the Sun\" and file is downloaded on second attempt. '\n 'Sleeping 1 minute and retrying download. Retry will be done {} more times'.format(self.url,\n 3 - (i + 1)))\n time.sleep(60)\n i += 1\n continue\n # print('WARNING: Connection is OK, but system was not able to get file. Skipping.')", "def download_file (url):\n\n '''\n Try and download the file given in the url,\n throw up an error if not possible.\n '''\n try:\n ret = urllib2.urlopen (url)\n except urllib2.HTTPError:\n return None\n except urllib2.URLError:\n return None\n\n print \"Downloaded \" + url\n\n return ret", "def _fs_get_file(url, working_dir):\n if not os.path.isabs(url) and working_dir:\n url = os.path.join(working_dir, url)\n\n try:\n with codecs.open(url, 'r', encoding='utf-8') as f:\n return f.read()\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))", "def test_existing_file(self):\n # Send the request\n request = webhttp.message.Request()\n request.method = \"GET\"\n request.uri = \"/test/index.html\"\n request.set_header(\"Host\", \"localhost:{}\".format(portnr))\n request.set_header(\"Connection\", \"close\")\n self.client_socket.send(str(request).encode())\n\n #Get the resource to compare\n wantedres = webhttp.resource.Resource(\"/test/index.html\")\n \n # Test response\n message = self.client_socket.recv(1024)\n response = self.parser.parse_response(message)\n self.assertEqual(response.code, 200)\n self.assertEqual(response.body, wantedres.get_content())", "def req_CHECKURL(self, url):\n # TODO: what about those MULTI and list to be returned?\n # should we return all filenames or keys within archive?\n # might be way too many?\n # only if just archive portion of url is given or the one pointing\n # to specific file?\n lgr.debug(\"Current directory: %s, url: %s\" % (os.getcwd(), url))\n akey, afile, attrs = self._parse_url(url)\n size = attrs.get('size', None)\n\n # But reply that present only if archive is present\n # TODO: this would throw exception if not present, so this statement is kinda bogus\n akey_fpath = self.get_contentlocation(akey) #, relative_to_top=True))\n if akey_fpath:\n akey_path = opj(self.path, akey_fpath)\n\n # if for testing we want to force getting the archive extracted\n # _ = self.cache.assure_extracted(self._get_key_path(akey)) # TEMP\n efile = self.cache[akey_path].get_extracted_filename(afile)\n\n if size is None and exists(efile):\n size = os.stat(efile).st_size\n\n if size is None:\n size = 'UNKNOWN'\n\n # FIXME: providing filename causes annex to not even talk to ask\n # upon drop :-/\n self.send(\"CHECKURL-CONTENTS\", size) #, basename(afile))\n\n # so it was a good successful one -- record\n self._last_url = url\n else:\n # TODO: theoretically we should first check if key is available from\n # any remote to know if file is available\n self.send(\"CHECKURL-FAILURE\")", "def _get(conn, remote_file, bucket_name=BUCKET_NAME):\n contents = None\n try:\n reply = conn.get(bucket_name, remote_file)\n contents = reply.body\n if reply.http_response.status != 200:\n print 'Failed to fetch current_remote metadata'\n contents = None\n except:\n contents = None\n return contents", "def test_result_file_path_get(self):\n headers = { \n 'Accept': 'application/zip',\n }\n response = self.client.open(\n '/v1/result/{file_path}'.format(file_path='file_path_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_file(url):\n helpers.make_workdir() # create temp working directory\n file_url = url + constant.MALICIOUS_LOCATION\n print(file_url)\n filename = wget.download(file_url, out=constant.WORKDIR)\n return filename", "def test_get_file_exists_caching_with_raw_url(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_exists_uncached,\n op=kgb.SpyOpReturn(True))\n\n # Use spy to put key into cache\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Remove spy to ensure key is still in cache without needing spy\n repository._get_file_exists_uncached.unspy()\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Does not exist when raw_file_url changed because it is not cached.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertFalse(repository.get_file_exists('PATH', 'd7e96b3'))", "def get_remote_bytes(file_url) -> io.BytesIO:\n result = urlfetch.fetch(file_url)\n return io.BytesIO(result.content)", "def remote(self, requests, file, remoteHost):\n # Set the source and dest paths\n remote_url = self.base_url + '/remote?file=' + file + \"&host=\" + remoteHost\n\n print(\"Making remote request: \" + remote_url)\n\n r = requests.get(remote_url, max_price=10)\n\n print(\"Remote request completed.\")\n\n return r.json()", "def download (httpfile, path_unzip = None, outfile = None) :\n if path_unzip is None : path_unzip = GetPath ()\n file = _check_source (httpfile, path_unzip = path_unzip, outfile = outfile)\n return file", "def test_get_blob_response_return_blob_from_local_if_url_is_local(\n self, mock_send_get_request\n ):\n # Arrange / Act\n mock_send_get_request.return_value = \"local\"\n return_value = BlobDownloader(\n f\"{settings.SERVER_URI}/987653456789\"\n ).get_blob_response()\n # Assert\n self.assertEqual(return_value, \"local\")", "def test_nonexistant_file(self):\n # Send the request\n request = webhttp.message.Request()\n request.method = \"GET\"\n request.uri = \"/test/nofilewiththisnameright.html\"\n request.set_header(\"Host\", \"localhost:{}\".format(portnr))\n request.set_header(\"Connection\", \"close\")\n self.client_socket.send(str(request).encode())\n\n # Test response\n message = self.client_socket.recv(1024)\n response = self.parser.parse_response(message)\n self.assertEqual(response.code, 404)\n self.assertEqual(response.body, \"404 \" + webhttp.consts.REASON_DICT[404])", "def _download_epw_file(url):\n r = requests.get(url)\n if r.ok:\n # py2 and 3 compatible: binary write, encode text first\n log.debug(\" ... OK!\")\n return io.StringIO(r.text)\n else:\n log.error(\" connection error status code: %s\" % r.status_code)\n r.raise_for_status()", "def testFtpUrl(self):\n try:\n remoteLocator = self.__ftpFileUrl\n # fn = self.__fileU.getFileName(remoteLocator)\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n dirPath = os.path.join(self.__workPath, \"chem_comp_models\")\n lPath = os.path.join(dirPath, self.__fileU.getFileName(self.__ftpFileUrl))\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n fp = self.__fileU.uncompress(lPath, outputDir=dirPath)\n ok = fp.endswith(\"chem_comp_model.cif\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_image_url(self):\r\n img_url = 'http://www.ndftz.com/nickelanddime.png'\r\n read = ReadUrl.parse(img_url)\r\n\r\n self.assertTrue(\r\n read.status == 200, \"The status is 200: \" + str(read.status))\r\n self.assertTrue(\r\n read.content is None, \"Content should be none: \")", "def test_non_net_url(self):\r\n test_url = \"http://r2\"\r\n read = ReadUrl.parse(test_url)\r\n\r\n self.assertTrue(\r\n read.status == 901,\r\n \"The status is 901: \" + str(read.status))\r\n self.assertTrue(not read.is_image(), \"The content is not an image\")\r\n self.assertTrue(\r\n read.content is None,\r\n \"Content should be none: \" + str(read.content))", "def test_file_managed_http_source_skip_verify(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=True)\n assert ret.result is True", "def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def request(self, protocol, root, directory, filename):\n check_file_location = urllib.parse.quote(directory + filename)\n\n print(\"GET: \" + protocol + root + check_file_location)\n try:\n if protocol == 'http://':\n conn = http.client.HTTPConnection(root)\n elif protocol == 'https://':\n conn = http.client.HTTPSConnection(root)\n else:\n raise ValueError('Invalid protocol!')\n if self.cookie:\n headers = {'Cookie': self.cookie}\n conn.request(\"GET\", check_file_location, headers)\n else:\n conn.request(\"GET\", check_file_location)\n response = conn.getresponse()\n print(response.status, response.reason)\n except ssl.SSLError as err:\n print(\"Request failed, looks like it doesn't support HTTPS?\")\n print(err)\n return False\n except Exception as err:\n print('Request failed.')\n print(err)", "def fetch_maybe(cls, url, path, save=False):\n if os.path.isfile(path):\n # print(\"Found %s\" % os.path.basename(path))\n with open(path, \"rb\") as file:\n return file.read(), True\n if save:\n return cls.fetch_and_save(url, path), False\n return cls.fetch_with_retry(url), False", "def test_file_managed_http_source_no_hash(file, tmp_path, remote_grail_scene33):\n name = str(tmp_path / \"testfile\")\n ret = file.managed(name=name, source=remote_grail_scene33.url, skip_verify=False)\n # This should fail because no hash was provided\n assert ret.result is False", "def __getFile_httplib(self, _src, _dst):\n\n #-------------------- \n # Pre-download callbacks\n #-------------------- \n self.runEventCallbacks('downloadStarted', _src, -1)\n self.runEventCallbacks('downloading', _src, 0)\n\n\n\n #-------------------- \n # Download\n #-------------------- \n response = self.__httpsRequest('GET', _src)\n data = response.read() \n with open(_dst, 'wb') as f:\n f.write(data) \n\n\n\n #-------------------- \n # Post-download callbacks\n #-------------------- \n self.removeFromDownloadQueue(_src)\n self.runEventCallbacks('downloadFinished', _src)", "def download_allowed(self, url, scheme, netloc):\n robot = urllib.robotparser.RobotFileParser('%s://%s/%s' % (scheme, netloc, config.ROBOTS))\n try:\n robot.read()\n except ValueError:\n raise urllib.error.URLError('<urlopen error no protocol given>')\n\n return robot.can_fetch(config.USER_AGENT, url)", "def fetch_file(url, filename):\n from clinica.utils.exceptions import ClinicaException\n from urllib.request import Request, urlopen\n from urllib.error import URLError\n import shutil\n import ssl\n import os.path\n from clinica.utils.stream import cprint\n\n head_tail = os.path.split(filename)\n if not os.path.exists(head_tail[0]):\n cprint('Path to the file does not exist')\n cprint('Stop Clinica and handle this error')\n\n # Download the file from `url` and save it locally under `file_name`:\n cert = ssl.get_server_certificate((\"aramislab.paris.inria.fr\", 443))\n gcontext = ssl.SSLContext()\n req = Request(url)\n try:\n response = urlopen(req, context=gcontext)\n except URLError as e:\n if hasattr(e, 'reason'):\n cprint('We failed to reach a server.')\n cprint(['Reason: ' + e.reason])\n elif hasattr(e, 'code'):\n cprint('The server could not fulfill the request.')\n cprint(['Error code: ' + e.code])\n else:\n try:\n with open(filename, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except OSError as err:\n cprint(\"OS error: {0}\".format(err))", "def file_url(self, url):\n return self.is_regex_url(url, self.is_file_regex)", "def is_downloadable(self,imgurl):\n h = requests.head(imgurl, allow_redirects=True)\n header = h.headers\n content_type = header.get('content-type')\n if 'text' in content_type.lower():\n return None\n if 'html' in content_type.lower():\n return None\n return True", "def check_link(url):\n try:\n\n r = requests.get(url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n print('Connection Failed!!!')", "def testGetFileTimeout(self):\n try:\n # Test to make sure get request times out\n fU = FileUtil(timeout=0.00001)\n remoteLocator = self.__largeHttpsFileUrl\n fn = fU.getFileName(remoteLocator)\n lPath = os.path.join(self.__workPath, fn)\n ok = fU.get(remoteLocator, lPath)\n self.assertFalse(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def download_if_stale(filepath, fileurl):\n if not os.path.exists(filepath) or needs_refreshing(filepath):\n try:\n urllib.request.urlretrieve(fileurl, filepath)\n except urllib.error.HTTPError:\n print('The {0} is not reachable'.format(fileurl))", "def _check_url_file (url, path_download, outfile) :\n if \"http://\" in url.lower () :\n dest = outfile if outfile != None else _get_file_url (url, path_download)\n down = False\n nyet = dest + \".notyet\"\n \n if os.path.exists (dest) and not os.path.exists (nyet) :\n try :\n fLOG(\"trying to connect\", url)\n f1 = urllib.urlopen (url)\n down = _first_more_recent (f1, dest)\n newdate = down\n f1.close ()\n except IOError :\n fLOG(\"unable to connect Internet, working offline for url\", url)\n down = False\n else : \n down = True\n newdate = False\n \n if down :\n if newdate : fLOG (\" downloading (updated) \", url)\n else : fLOG (\" downloading \", url)\n \n if len (url) > 4 and url [-4].lower () in [\".txt\", \".csv\", \".tsv\", \".log\"] :\n fLOG (\"creating text file \", dest)\n format = \"w\"\n else : \n fLOG (\"creating binary file \", dest)\n format = \"wb\"\n \n if os.path.exists (nyet) :\n size = os.stat (dest).st_size\n fLOG (\"resume downloading (stop at\", size, \") from \", url)\n request = urllib.request.Request(url) \n request.add_header(\"Range\", \"bytes=%d-\" % size)\n fu = urllib.request.urlopen (request) \n f = open (dest, format.replace (\"w\", \"a\"))\n else :\n fLOG (\"downloading \", url)\n request = urllib.request.Request(url) \n fu = urllib.request.urlopen (url)\n f = open (dest, format)\n \n open (nyet, \"w\").close ()\n c = fu.read (2**21)\n size = 0\n while len (c) > 0 :\n size += len (c)\n fLOG(\" size\", size)\n f.write (c)\n f.flush ()\n c = fu.read (2**21)\n fLOG (\"end downloading\")\n f.close ()\n fu.close ()\n os.remove (nyet)\n \n url = dest\n return url", "def main(url, localfile):\n ph.download_file(url, localfile)", "def send_get_request(url, file_name=None):\r\n request = urllib.request.Request(url, headers={'User-Agent': AGENT})\r\n with urllib.request.urlopen(request) as response:\r\n response_context = response.read()\r\n if file_name is None:\r\n return response_context\r\n with open(file_name, 'bw+') as f:\r\n f.write(response_context)\r\n return response_context", "def maybe_download(filename, expected_bytes):\n filepath = datapath + filename\n if not os.path.exists(filepath):\n # urlretrieve returns a tuple of saved filepath and info() of the downloaded file\n filepath, _ = urllib.request.urlretrieve(url+filename, filepath)\n statinfo = os.stat(filepath)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filepath)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filepath + '. Can you get to it with a browser?')\n return filepath", "def t_getfile(self, link, filename, session):\n\n self.sema.acquire()\n\n filepath = os.path.join(os.getcwd() + '/Downloads/' + str(filename))\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n if not os.path.isfile(filepath):\n self.download_new_file(link, filepath, session)\n else:\n\n current_bytes = os.stat(filepath).st_size\n\n headers = requests.head(link).headers\n\n print(headers)\n if 'content-length' not in headers:\n print(f\"server doesn't support content-length for {link}\")\n self.sema.release()\n return\n\n total_bytes = int(requests.head(link).headers['content-length'])\n\n print(total_bytes)\n\n if current_bytes < total_bytes:\n #\n self.continue_file_download(link, filepath, session, current_bytes, total_bytes)\n print(f\"Current byte < total - remaining {total_bytes - current_bytes}\")\n else:\n print(f\"already done: {filename}\")\n\n self.sema.release()", "def fetch(file_url):\n\n tmp_file_handle = NamedTemporaryFile(delete=True)\n headers = {'User-Agent': 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'}\n\n # download file and save to temp object\n with requests.get(file_url, headers=headers, stream=True) as r:\n tmp_file_handle.write(r.content)\n\n tmp_file_handle.flush()\n\n return tmp_file_handle", "def grab_file(url, filename):\n with RemoteZip(url) as zip:\n filenames = zip.namelist()\n for fname in filenames:\n zinfo = zip.getinfo(fname)\n if filename in zinfo.filename and not \".plist\" in zinfo.filename:\n filename = zinfo.filename.split(\"/\")[-1]\n print(\"[i] downloading %s\" % filename)\n extract_and_clean(zip, zinfo.filename, filename)\n return filename\n return filename", "def test_retrieve_files_single(self):\n os.makedirs('/tmp/remote_pacha/localhost/another_dir')\n os.makedirs('/tmp/remote_pacha/localhost/single_dir')\n remote_file = open('/tmp/remote_pacha/localhost/single_dir/remote.txt', 'w')\n remote_file.write(\"remote file\")\n remote_file.close()\n self.assertTrue(os.path.isfile('/tmp/remote_pacha/localhost/single_dir/remote.txt'))\n server = \"%s@%s\" % (self.username, host.hostname()) \n run = rebuild.Rebuild(server=server,\n hostname='localhost', \n source='/tmp/remote_pacha',\n directory='single_dir')\n run.retrieve_files()\n result = os.path.isfile('/tmp/localhost/single_dir/remote.txt')\n line = open('/tmp/localhost/single_dir/remote.txt')\n remote_line = line.readline()\n self.assertEqual(remote_line, \"remote file\")\n self.assertTrue(result)", "def browse(self, web_resource):\n url = web_resource.url\n\n # We don't need destination anchors\n current_full_url = url.split(\"#\")[0]\n # Url without query string\n current = current_full_url.split(\"?\")[0]\n # Get the dirname of the file\n currentdir = \"/\".join(current.split(\"/\")[:-1]) + \"/\"\n\n # Timeout must not be too long to block big documents\n # (for exemple a download script)\n # and not too short to give good results\n socket.setdefaulttimeout(self.timeout)\n\n headers = {}\n headers[\"user-agent\"] = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\n try:\n resp = self.h.send(web_resource, headers=headers)\n except socket.timeout:\n self.excluded.append(url)\n return False\n except requests.exceptions.Timeout:\n self.excluded.append(url)\n return False\n except socket.error, msg:\n if msg.errno == 111:\n print(_(\"Connection refused!\"))\n self.excluded.append(url)\n return False\n except Exception, e:\n print(_(\"Exception in lswww.browse: {0}\").format(e))\n self.excluded.append(url)\n return False\n\n info = resp.getHeaders()\n code = resp.getCode()\n info[\"status_code\"] = code\n\n if not url in self.link_encoding:\n self.link_encoding[url] = \"\"\n\n proto = url.split(\"://\")[0]\n if proto == \"http\" or proto == \"https\":\n if not isinstance(proto, unicode):\n proto = unicode(proto)\n # Check the content-type first\n # if not info.has_key(\"content-type\"):\n # Sometimes there's no content-type...\n #so we rely on the document extension\n # if (current.split(\".\")[-1] not in self.allowed)\n # and current[-1] != \"/\":\n # return info\n # elif info[\"content-type\"].find(\"text\") == -1:\n # return info\n\n # No files more than 2MB\n if \"content-length\" in info:\n if int(info[\"content-length\"]) > 2097152:\n return False\n\n page_encoding = None\n resp_encoding = resp.getEncoding()\n content_type = resp.getHeaders().get('content-type', '')\n mime_type = content_type.split(';')[0].strip()\n swf_links = []\n js_links = []\n\n # Requests says it found an encoding... the content must be some HTML\n if resp_encoding and any(mime_type.startswith(t) for t in self.allowed_types):\n # But Requests doesn't take a deep look at the webpage,\n # so check it with BeautifulSoup\n page_encoding = BeautifulSoup.BeautifulSoup(resp.getRawPage()).originalEncoding\n if page_encoding and page_encoding.upper() != resp_encoding:\n # Mismatch ! Convert the response text to the encoding detected by BeautifulSoup\n resp.setEncoding(page_encoding)\n data = resp.getPage()\n else:\n # Can't find an encoding... beware of non-html content\n data = resp.getRawPage()\n if \"application/x-shockwave-flash\" in mime_type or web_resource.file_ext == \"swf\":\n try:\n flash_parser = swf_parser.swf_parser(data)\n swf_links = flash_parser.getLinks()\n except Exception, err_data:\n swf_links = err_data[1]\n data = \"\"\n elif \"/x-javascript\" in mime_type or \"/x-js\" in mime_type or \"/javascript\" in mime_type:\n js_links = lamejs.lamejs(data).getLinks()\n data = \"\"\n\n # Manage redirections\n if \"location\" in info:\n redir = self.correctlink(info[\"location\"], current, current_full_url, currentdir, proto, None)\n if redir is not None:\n if self.__inzone(redir) == 0:\n self.link_encoding[redir] = self.link_encoding[url]\n redir = HTTP.HTTPResource(redir)\n # Is the document already visited of forbidden ?\n if (redir in self.browsed) or (redir in self.tobrowse) or \\\n self.isExcluded(redir):\n pass\n else:\n # No -> Will browse it soon\n self.tobrowse.append(redir)\n\n htmlSource = data\n if page_encoding:\n bs = BeautifulSoup.BeautifulSoup(htmlSource)\n # Look for a base tag with an href attribute\n if bs.head:\n baseTags = bs.head.findAll(\"base\")\n for base in baseTags:\n # BeautifulSoup doesn't work as excepted with the \"in\" statement, keep this:\n if base.has_key(\"href\"):\n # Found a base url, now set it as the current url\n current = base[\"href\"].split(\"#\")[0]\n # We don't need destination anchors\n current = current.split(\"?\")[0]\n # Get the dirname of the file\n currentdir = \"/\".join(current.split(\"/\")[:-1]) + \"/\"\n break\n\n #if page_encoding != None:\n # htmlSource = unicode(data, page_encoding, \"ignore\")\n #else:\n # htmlSource = data\n\n p = linkParser(url)\n try:\n p.feed(htmlSource)\n except HTMLParser.HTMLParseError:\n htmlSource = BeautifulSoup.BeautifulSoup(htmlSource).prettify()\n if not isinstance(htmlSource, unicode) and page_encoding is not None:\n htmlSource = unicode(htmlSource, page_encoding, \"ignore\")\n try:\n p.reset()\n p.feed(htmlSource)\n except HTMLParser.HTMLParseError:\n p = linkParser2(url, self.verbose)\n p.feed(htmlSource)\n\n # Sometimes the page is badcoded but the parser doesn't see the error\n # So if we got no links we can force a correction of the page\n if len(p.liens) == 0:\n if page_encoding is not None:\n try:\n htmlSource = BeautifulSoup.BeautifulSoup(htmlSource).prettify(page_encoding)\n p.reset()\n p.feed(htmlSource)\n except UnicodeEncodeError:\n # The resource is not a valid webpage (for example an image)\n htmlSource = \"\"\n except HTMLParser.HTMLParseError:\n p = linkParser2(url, self.verbose)\n p.feed(htmlSource)\n\n found_links = p.liens + swf_links + js_links\n for lien in found_links:\n if (lien is not None) and (page_encoding is not None) and isinstance(lien, unicode):\n lien = lien.encode(page_encoding, \"ignore\")\n lien = self.correctlink(lien, current, current_full_url, currentdir, proto, page_encoding)\n if lien is not None:\n if self.__inzone(lien) == 0:\n # Is the document already visited of forbidden ?\n lien = HTTP.HTTPResource(lien, encoding=page_encoding, referer=url)\n if ((lien in self.browsed) or\n (lien in self.tobrowse) or\n self.isExcluded(lien) or\n self.__inzone(lien.url) != 0):\n pass\n elif self.nice > 0:\n if self.__countMatches(lien) >= self.nice:\n # don't waste time next time we found it\n self.excluded.append(lien.url)\n return False\n else:\n self.tobrowse.append(lien)\n else:\n # No -> Will browse it soon\n self.tobrowse.append(lien)\n # Keep the encoding of the current webpage for the future requests to the link\n # so we can encode the query string parameters just as a browser would do.\n # Of course websites encoding may be broken :(\n self.link_encoding[lien] = page_encoding\n\n for form in p.forms:\n action = self.correctlink(form[0], current, current_full_url, currentdir, proto, page_encoding)\n if action is None:\n action = current\n if self.__inzone(action) != 0:\n continue\n\n # urlencode the POST parameters here\n params = form[1]\n post_params = []\n files = []\n for kv in params:\n if isinstance(kv[0], unicode):\n kv[0] = kv[0].encode(page_encoding, \"ignore\")\n\n if isinstance(kv[1], list):\n fname = kv[1][0]\n if isinstance(fname, unicode):\n fname = fname.encode(page_encoding, \"ignore\")\n files.append([kv[0], [fname, kv[1][1]]])\n else:\n if isinstance(kv[1], unicode):\n kv[1] = kv[1].encode(page_encoding, \"ignore\")\n post_params.append([kv[0], kv[1]])\n\n form_rsrc = HTTP.HTTPResource(action,\n method=\"POST\",\n post_params=post_params,\n file_params=files,\n encoding=page_encoding,\n referer=url)\n if form_rsrc not in self.forms:\n self.forms.append(form_rsrc)\n if not (form_rsrc in self.browsed or form_rsrc in self.tobrowse):\n self.tobrowse.append(form_rsrc)\n if files:\n if form_rsrc not in self.uploads:\n self.uploads.append(form_rsrc)\n # We automaticaly exclude 404 urls\n if code == \"404\":\n self.excluded.append(url)\n #return {} # exclude from scan but can be useful for some modules maybe\n\n return True", "def fetch_file(self, path, content_type, response=settings.HTTP_OK):\n try:\n with open(path) as fp:\n self.fetch_content(fp.read(), content_type, response)\n except IOError:\n self.send_error(settings.HTTP_INTERNAL_SERVER_ERROR)", "def test_get_url(self):\n package = make_package(version=\"1.1+g12345\")\n response = self.storage.download_response(package)\n\n parts = urlparse(response.location)\n self.assertEqual(parts.scheme, 'https')\n self.assertEqual(parts.netloc, 'abcdef.cloudfront.net')\n self.assertEqual(parts.path, '/bcc4/mypkg/mypkg-1.1%2Bg12345.tar.gz')\n query = parse_qs(parts.query)\n self.assertItemsEqual(query.keys(), ['Key-Pair-Id', 'Expires',\n 'Signature'])\n self.assertTrue(int(query['Expires'][0]) > time.time())\n self.assertEqual(query['Key-Pair-Id'][0],\n self.settings['storage.cloud_front_key_id'])", "def file_downloaded(filename):\n fc = pathlib.Path(filename)\n if fc.is_file():\n return True\n else:\n return False", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print ('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify' + filename + '. Can you get to it with a browser?')\n return filename", "def get_remote_file(sid, path):\n with slycat.web.server.remote.get_session(sid) as session:\n return session.get_file(path)", "def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False", "def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def download_http(self, url):\n\n # Set things up.\n # ==============\n\n out = None\n headers = {}\n if (url.username is not None) and (url.password is not None):\n tmp = base64.b64encode(':'.join([url.username, url.password]))\n headers['Authorization'] = \"Basic %s\" % tmp\n\n\n # Toe the waters.\n # ===============\n # We start with an HTTP HEAD request to check the status.\n\n conn = httplib.HTTPConnection(url.netloc)\n conn.request(\"HEAD\", url.path, '', headers)\n r = conn.getresponse()\n conn.close()\n if self.verbose:\n print >> sys.stderr, url, r.status, ''\n\n\n # Bail.\n # =====\n # Short-cut when we just care whether it's a package.\n\n if url.path.endswith('/'):\n out = r.status == 200\n\n\n elif r.status == 200:\n\n # Wade in.\n # ========\n # If the status is positive we check to see if we've already\n # downloaded the latest copy.\n\n etag = r.getheader('etag', '')\n lm = r.getheader('last-modified', '')\n key = sha.new(str(url) + etag + lm).hexdigest()\n\n if not self.cachedir:\n raise ValueError(\"netimp.importer.cachedir not set\")\n if not os.path.isdir(self.cachedir):\n raise IOError( \"netimp.importer.cachedir not found \"\n + \"(%s)\" % self.cachedir\n )\n\n path = join(self.cachedir, key)\n if os.path.isfile(path):\n out = open(path, 'rb')\n else:\n\n # Dive in!\n # ========\n # We don't have this module locally yet: download it for real.\n\n conn = httplib.HTTPConnection(url.netloc)\n conn.request(\"GET\", url.path, '', headers)\n r = conn.getresponse()\n if r.status == 200: # just in case!\n fp = open(path, 'w+b')\n fp.write(r.read())\n fp.flush()\n fp.close()\n out = open(path, 'rb')\n conn.close()\n\n return out", "def getFile( self, path, localPath = False ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n for src_url in urls:\n fileName = os.path.basename( src_url )\n if localPath:\n dest_file = \"%s/%s\" % ( localPath, fileName )\n else:\n dest_file = \"%s/%s\" % ( os.getcwd(), fileName )\n gLogger.debug( \"DIPStorage.getFile: Executing transfer of %s to %s\" % ( src_url, dest_file ) )\n res = self.__getFile( src_url, dest_file )\n if res['OK']:\n successful[src_url] = res['Value']\n else:\n failed[src_url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def anon_download(url: str):\n if verify(url):\n location = download(url)\n return location\n return 6", "def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1", "def fetch_file_from_web(server_url, path, transform_func=json.loads):\n artifact_url = \"{0}/{1}\".format(server_url, path)\n r = requests.get(artifact_url)\n r.raise_for_status()\n if transform_func:\n return transform_func(r.text)\n else:\n return r.text", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def fetch(self, url, timeout=None):\n\n # ISO-8859-1 is the default encoding for text files per the specs for\n # HTTP 1.0 (RFC 1945 sec 3.6.1) and HTTP 1.1 (RFC 2616 sec 3.7.1).\n # ref: http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1\n encoding = \"iso-8859-1\"\n content = \"\"\n expires_header = None\n content_type_header = None\n self._response_code = 0\n self._source_url = url\n\n if self.user_agent:\n req = urllib_request.Request(url, None, \n { 'User-Agent' : self.user_agent })\n else:\n req = urllib_request.Request(url)\n\n try:\n if timeout:\n f = urllib_request.urlopen(req, timeout=timeout)\n else:\n f = urllib_request.urlopen(req)\n\n content = f.read(MAX_FILESIZE)\n if VERBOSE:\n print 'Response Headers:'\n print f.info()\n\n # As of Python 2.5, f.info() looks like it returns the HTTPMessage\n # object created during the connection. \n expires_header = f.info().get(\"expires\")\n content_type_header = f.info().get(\"Content-Type\")\n # As of Python 2.4, this file-like object reports the response \n # code, too. \n if hasattr(f, \"code\"):\n self._response_code = f.code\n else:\n self._response_code = 200\n f.close()\n except urllib_error.URLError:\n # This is a slightly convoluted way to get the error instance,\n # but it works under Python 2 & 3. \n error_instance = sys.exc_info()\n if len(error_instance) > 1:\n error_instance = error_instance[1]\n if hasattr(error_instance, \"code\"):\n self._response_code = error_instance.code\n if VERBOSE:\n print 'Code:%d\\nConnect to %s timeout.'%(self._response_code, url)\n \n # MK1996 section 3.4 says, \"...robots should take note of Expires \n # header set by the origin server. If no cache-control directives \n # are present robots should default to an expiry of 7 days\".\n \n # This code is lazy and looks at the Expires header but not \n # Cache-Control directives.\n self.expiration_date = None\n if self._response_code >= 200 and self._response_code < 300:\n # All's well.\n if expires_header:\n self.expiration_date = email_utils.parsedate_tz(expires_header)\n \n if self.expiration_date:\n # About time zones -- the call to parsedate_tz() returns a\n # 10-tuple with the time zone offset in the 10th element. \n # There are 3 valid formats for HTTP dates, and one of \n # them doesn't contain time zone information. (UTC is \n # implied since all HTTP header dates are UTC.) When given\n # a date that lacks time zone information, parsedate_tz() \n # returns None in the 10th element. mktime_tz() interprets\n # None in the 10th (time zone) element to mean that the \n # date is *local* time, not UTC. \n # Therefore, if the HTTP timestamp lacks time zone info \n # and I run that timestamp through parsedate_tz() and pass\n # it directly to mktime_tz(), I'll get back a local \n # timestamp which isn't what I want. To fix this, I simply\n # convert a time zone of None to zero. It's much more \n # difficult to explain than to fix. =)\n # ref: http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.3.1\n if self.expiration_date[9] == None: \n self.expiration_date = self.expiration_date[:9] + (0,)\n \n self.expiration_date = email_utils.mktime_tz(self.expiration_date)\n if self.use_local_time: \n # I have to do a little more converting to get this \n # UTC timestamp into localtime.\n self.expiration_date = time.mktime(time.gmtime(self.expiration_date)) \n #else:\n # The expires header was garbage.\n\n if not self.expiration_date: self.expiration_date = self._now() + SEVEN_DAYS\n\n if (self._response_code >= 200) and (self._response_code < 300):\n # All's well.\n media_type, encoding = _parse_content_type_header(content_type_header)\n # RFC 2616 sec 3.7.1 -- \n # When no explicit charset parameter is provided by the sender, \n # media subtypes of the \"text\" type are defined to have a default\n # charset value of \"ISO-8859-1\" when received via HTTP.\n # http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1\n if not encoding: \n encoding = \"iso-8859-1\"\n elif self._response_code in (401, 403):\n # 401 or 403 ==> Go away or I will taunt you a second time! \n # (according to MK1996)\n content = \"User-agent: *\\nDisallow: /\\n\"\n elif self._response_code == 404:\n # No robots.txt ==> everyone's welcome\n content = \"\"\n else: \n # Uh-oh. I punt this up to the caller. \n _raise_error(urllib_error.URLError, self._response_code)\n\n if ((PY_MAJOR_VERSION == 2) and isinstance(content, str)) or \\\n ((PY_MAJOR_VERSION > 2) and (not isinstance(content, str))):\n # This ain't Unicode yet! It needs to be.\n \n # Unicode decoding errors are another point of failure that I punt \n # up to the caller.\n try:\n content = content.decode(encoding)\n except UnicodeError:\n _raise_error(UnicodeError,\n \"Robots.txt contents are not in the encoding expected (%s).\" % encoding)\n except (LookupError, ValueError):\n # LookupError ==> Python doesn't have a decoder for that encoding.\n # One can also get a ValueError here if the encoding starts with \n # a dot (ASCII 0x2e). See Python bug 1446043 for details. This \n # bug was supposedly fixed in Python 2.5.\n _raise_error(UnicodeError,\n \"I don't understand the encoding \\\"%s\\\".\" % encoding)\n if VERBOSE:\n print 'Response:'\n print content\n\n\n if not content:\n # 响应为空,清空自身数据集,跳过解析步骤\n self._sitemaps = [ ]\n self.__rulesets = [ ]\n return False\n else:\n # Now that I've fetched the content and turned it into Unicode, I \n # can parse it.\n self.parse(content)\n return True", "def url_exist(url:str) -> bool:\r\n with closing(requests.head(url, allow_redirects=True)) as r:\r\n return r.ok", "def is_downloadable(url):\n h = requests.head(url, allow_redirects=True)\n header = h.headers\n content_type = header.get('content-type')\n if 'text' in content_type.lower():\n return False\n if 'html' in content_type.lower():\n return False\n return True", "def _download_file(self, url, local_filepath, timeout=None, auth=None,\n continuation=True, cache=False, method=\"GET\",\n head_safe=False, **kwargs):\n\n if head_safe:\n response = self._session.request(\"HEAD\", url,\n timeout=timeout, stream=True,\n auth=auth, **kwargs)\n else:\n response = self._session.request(method, url,\n timeout=timeout, stream=True,\n auth=auth, **kwargs)\n\n response.raise_for_status()\n if 'content-length' in response.headers:\n length = int(response.headers['content-length'])\n if length == 0:\n log.warn('URL {0} has length=0'.format(url))\n else:\n length = None\n\n if ((os.path.exists(local_filepath)\n and ('Accept-Ranges' in response.headers)\n and continuation)):\n open_mode = 'ab'\n\n existing_file_length = os.stat(local_filepath).st_size\n if length is not None and existing_file_length >= length:\n # all done!\n log.info(\"Found cached file {0} with expected size {1}.\"\n .format(local_filepath, existing_file_length))\n return\n elif existing_file_length == 0:\n open_mode = 'wb'\n else:\n log.info(\"Continuing download of file {0}, with {1} bytes to \"\n \"go ({2}%)\".format(local_filepath,\n length - existing_file_length,\n (length-existing_file_length)/length*100))\n\n # bytes are indexed from 0:\n # https://en.wikipedia.org/wiki/List_of_HTTP_header_fields#range-request-header\n end = \"{0}\".format(length-1) if length is not None else \"\"\n self._session.headers['Range'] = \"bytes={0}-{1}\".format(existing_file_length,\n end)\n\n response = self._session.request(method, url,\n timeout=timeout, stream=True,\n auth=auth, **kwargs)\n response.raise_for_status()\n del self._session.headers['Range']\n\n elif cache and os.path.exists(local_filepath):\n if length is not None:\n statinfo = os.stat(local_filepath)\n if statinfo.st_size != length:\n log.warning(f\"Found cached file {local_filepath} with size {statinfo.st_size} \"\n f\"that is different from expected size {length}\")\n open_mode = 'wb'\n else:\n log.info(\"Found cached file {0} with expected size {1}.\"\n .format(local_filepath, statinfo.st_size))\n response.close()\n return\n else:\n log.info(\"Found cached file {0}.\".format(local_filepath))\n response.close()\n return\n else:\n open_mode = 'wb'\n if head_safe:\n response = self._session.request(method, url,\n timeout=timeout, stream=True,\n auth=auth, **kwargs)\n response.raise_for_status()\n\n blocksize = astropy.utils.data.conf.download_block_size\n\n log.debug(f\"Downloading URL {url} to {local_filepath} with size {length} \"\n f\"by blocks of {blocksize}\")\n\n bytes_read = 0\n\n # Only show progress bar if logging level is INFO or lower.\n if log.getEffectiveLevel() <= 20:\n progress_stream = None # Astropy default\n else:\n progress_stream = io.StringIO()\n\n with ProgressBarOrSpinner(length, f'Downloading URL {url} to {local_filepath} ...',\n file=progress_stream) as pb:\n with open(local_filepath, open_mode) as f:\n for block in response.iter_content(blocksize):\n f.write(block)\n bytes_read += len(block)\n if length is not None:\n pb.update(bytes_read if bytes_read <= length else length)\n else:\n pb.update(bytes_read)\n response.close()\n return response", "def _local_fopen(self,url):\n\n m = re.search(\"(black|malware)\", url)\n\n if m:\n if m.group(1) == \"black\":\n return open(\"gsb_phishing.html\")\n elif m.group(1) == \"malware\":\n return open(\"gsb_malware2.html\")", "def _get(self, url):\n response = requests.get(url)\n if response.status_code == requests.codes.ok:\n return response\n else:\n raise HTTPError", "def _getFile(url, cachedFile=True, return_filename=False):\n assert url, \"WHY are you trying to load an empty string url?!?! Nothing good will come of this! In fact, I will assure that! %s\" % (url)\n md5 = hashlib.md5(url).hexdigest()\n filename = os.path.join(config.WEB_CACHE_DIR, md5)\n if os.path.exists(filename) and cachedFile:\n ret = open(filename, 'r').read()\n else:\n opener = urllib.FancyURLopener()\n ret = opener.open(url).read()\n o = open(filename, 'wb') # had to open in binary mode so PIL's Image.Open() function would work\n o.write(ret)\n o.close()\n if return_filename:\n return filename\n else:\n return ret", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "def test_404_url(self):\r\n url = 'http://lococast.net/archives/001'\r\n read = ReadUrl.parse(url)\r\n\r\n self.assertTrue(\r\n read.status == 404, \"The status is 404: \" + str(read.status))\r\n self.assertTrue(\r\n not read.is_image(), \"The content is not an image\")\r\n self.assertTrue(\r\n read.content is None, \"Content should be none\")", "def test_get_url(self):\n package = make_package()\n response = self.storage.download_response(package)\n\n parts = urlparse(response.location)\n self.assertEqual(parts.scheme, 'https')\n self.assertEqual(parts.netloc, 'mybucket.s3.amazonaws.com')\n self.assertEqual(parts.path, '/' + self.storage.get_path(package))\n query = parse_qs(parts.query)\n self.assertItemsEqual(query.keys(), ['Expires', 'Signature',\n 'AWSAccessKeyId'])\n self.assertTrue(int(query['Expires'][0]) > time.time())\n self.assertEqual(query['AWSAccessKeyId'][0],\n self.settings['storage.access_key'])", "def _url_exists(url):\n h = httplib2.Http()\n try:\n resp = h.request(url, 'HEAD')\n if resp[0].status == 200:\n return True\n except (httplib2.RelativeURIError, httplib2.ServerNotFoundError):\n return False", "def testZipUrl(self):\n try:\n remoteLocator = self.__zipFileUrl\n # fn = self.__fileU.getFileName(remoteLocator)\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n lPath = os.path.join(self.__workPath, self.__fileU.getFileName(self.__zipFileUrl))\n ok = self.__fileU.get(remoteLocator, lPath)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n fp = self.__fileU.uncompress(lPath, outputDir=self.__workPath)\n ok = fp.endswith(\"Food_Display_Table.xlsx\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def retrieve(url_and_path):\n try:\n urlretrieve(url_and_path[0], url_and_path[1])\n except HTTPError:\n pass", "def test_no_io_on_url():\n file = get_image_cache_file()\n file.url\n assert not file.storage.exists.called\n assert not file.storage.open.called", "def __getFile_urllib(self, _src, _dst):\n\n #-------------------- \n # Open the local destination file \n # so that it can start reading in the buffers.\n #-------------------- \n try:\n dstDir = os.path.dirname(_dst) \n if not os.path.exists(dstDir):\n os.makedirs(dstDir)\n dstFile = open(_dst, \"wb\")\n except Exception as e:\n self.__downloadFailed(_src, _dst, dstFile, str(e))\n return\n\n\n\n #-------------------- \n # Construct the request and authentication handler\n #-------------------- \n xnatUrl = Xnat.path.makeXnatUrl(self.host, _src)\n request = urllib.request.Request(xnatUrl)\n request.add_header(\"Authorization\", \n self.authHeader['Authorization'])\n\n\n\n #-------------------- \n # Get the response from the XNAT host.\n #-------------------- \n try:\n response = urllib.request.urlopen(request)\n\n\n\n\n #-------------------- \n # If the urllib.request version fails then use http.client.\n # See get_http.client for more details.\n #-------------------- \n #except urllib.request.HTTPError, e:\n except Exception as e:\n #print(str(e))\n #print(f\"{_src} {_dst}\")\n #print(d)\n self.__downloadFailed(_src, _dst, dstFile, str(e))\n return\n\n\n #-------------------- \n # Get the content size, first by checking log, then by reading \n # header\n #-------------------- \n self.downloadTracker['downloadedSize']['bytes'] = 0 \n self.downloadTracker['totalDownloadSize'] = \\\n self.getFileSize(xnatUrl)\n if not self.downloadTracker['totalDownloadSize']['bytes']:\n # If not in log, read the header\n if response.headers and \"Content-Length\" in response.headers:\n self.downloadTracker['totalDownloadSize']['bytes'] = \\\n int(response.headers[\"Content-Length\"]) \n self.downloadTracker['totalDownloadSize']['MB'] = \\\n Xnat.utils.bytesToMB(\\\n self.downloadTracker['totalDownloadSize']['bytes'])\n\n\n #-------------------- \n # Start the buffer reading cycle by\n # calling on the buffer_read function above.\n #-------------------- \n bytesRead = self.__bufferRead(xnatUrl, dstFile, response)\n dstFile.close()", "def check_playable(url):\r\n import urllib\r\n import requests\r\n try:\r\n headers = url.rsplit('|', 1)[1]\r\n except:\r\n headers = ''\r\n headers = urllib.quote_plus(headers).replace('%3D', '=') if ' ' in headers else headers\r\n headers = dict(urlparse.parse_qsl(headers))\r\n\r\n result = None\r\n try:\r\n if url.startswith('http') and '.m3u8' in url:\r\n result = requests.head(url.split('|')[0], headers=headers, timeout=5)\r\n if result is None:\r\n return None\r\n\r\n elif url.startswith('http'):\r\n result = requests.head(url.split('|')[0], headers=headers, timeout=5)\r\n if result is None:\r\n return None\r\n except:\r\n pass\r\n\r\n return result", "def get_web_file(\n url: str,\n download_fn: str,\n days_old: int = settings.UPDATE_CYCLE_DAYS,\n force_download: bool = False,\n) -> Tuple[bool, str]:\n\n need_download = False\n rmod_date = None\n lmod_date = None\n\n if (\n not os.path.exists(download_fn) or force_download\n ): # local file doesn't exist or force is set - download needed\n need_download = True\n else: # local file exists AND not forced, so check the remote counterpart for their last modified time and compare\n try:\n r = requests.get(url)\n last_modified = r.headers.get(\"Last-Modified\", False)\n if last_modified:\n rmod_date_parsed = parser.parse(last_modified)\n rmod_date_local = rmod_date_parsed.replace(tzinfo=datetime.timezone.utc).astimezone(\n tz=None\n )\n rmod_date = rmod_date_local.strftime(\"%Y%m%d\")\n except requests.ConnectionError:\n log.warning(\"Cannot connect to the given URL.\")\n finally:\n local_file_mtime_ts = os.path.getmtime(download_fn)\n lmod_date = timestamp_to_date(local_file_mtime_ts)\n\n if (\n not need_download\n ): # still not sure whether to download or not - need to check/compare rmod date and lmod date\n if (\n rmod_date is None\n ): # if the remote file modified date cannot be found, compare with the days_old variable\n check_date = (datetime.datetime.now() - datetime.timedelta(days=days_old)).strftime(\n \"%Y%m%d\"\n )\n if lmod_date > check_date:\n msg = f\"{download_fn} < {days_old} days old; will not re-download (remote file mtime unavailable).\"\n return False, msg\n else:\n need_download = True\n elif rmod_date > lmod_date:\n need_download = True\n\n if need_download:\n\n if not re.search(\"\\.gz$\", url):\n file_open_fn = gzip.open\n else:\n file_open_fn = open\n\n with urllib.request.urlopen(url) as response, file_open_fn(download_fn, \"wb\") as out_file:\n shutil.copyfileobj(response, out_file)\n\n msg = f\"Remote file downloaded as {download_fn}.\"\n return True, msg\n else:\n msg = f\"No download needed; remote file is not newer than local file {download_fn}.\"\n return False, msg", "def get_file(url, file_name=None):\n cache_dir = os.path.join(os.path.expanduser(\"~\"), \".jhML\")\n\n if file_name is None:\n file_name = url[url.rfind('/') + 1:]\n file_path = os.path.join(cache_dir, file_name)\n\n if not os.path.exists(cache_dir):\n os.mkdir(cache_dir)\n\n if os.path.exists(file_path):\n return file_path\n\n print(\"Downloading: \" + file_name)\n try:\n urllib.request.urlretrieve(url, file_path, show_progress)\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(file_path):\n os.remove(file_path)\n raise\n print(\" Done\")\n\n return file_path", "def load_file_from_url(self, url: str) -> bytes:\n cached_content = self.cache_get(url)\n if cached_content is not None:\n return cached_content\n try:\n req = requests.get(url, timeout=self.requests_timeout)\n req.raise_for_status()\n content = req.content\n self.cache_set(url, content)\n except requests.RequestException as err:\n self.log_error(err)\n repl_content = self.get_replacement_file(url)\n if repl_content is None:\n raise ImageNotFound(err)\n content = repl_content\n return content", "def get_remote_file_size(url: str = '', httpresponse: object = False) -> int:\n need_to_close = False\n if not httpresponse:\n httpresponse = url_is_alive(url)\n if not httpresponse:\n error_open_mess(url)\n return 0\n need_to_close = True\n\n content_length = httpresponse.getheader('Content-Length')\n if need_to_close:\n httpresponse.close()\n\n return int(content_length) if content_length else 0", "def load_from_remote(self, url: Optional[str] = None) -> None:\n raise NotImplementedError" ]
[ "0.7467245", "0.7420734", "0.72722775", "0.71854055", "0.7185126", "0.71281976", "0.7114665", "0.70842046", "0.70659125", "0.706313", "0.6903022", "0.68697876", "0.6751283", "0.671117", "0.6564248", "0.654372", "0.65414447", "0.6529368", "0.6481161", "0.64615464", "0.64566016", "0.64140755", "0.64056796", "0.6402659", "0.63944215", "0.639321", "0.6387876", "0.6386081", "0.63829803", "0.63477814", "0.6339109", "0.6339075", "0.63362974", "0.6319559", "0.6319423", "0.6315855", "0.6313685", "0.62878287", "0.6287106", "0.62734365", "0.62646043", "0.62598354", "0.62598354", "0.62598354", "0.62590975", "0.6255068", "0.62524337", "0.6245925", "0.62408304", "0.623219", "0.6229401", "0.6227472", "0.62036735", "0.6192532", "0.6188502", "0.6187232", "0.61690474", "0.6167598", "0.61655635", "0.61651516", "0.6162778", "0.61358494", "0.6130169", "0.6128561", "0.61252284", "0.611024", "0.61089873", "0.610803", "0.61052686", "0.60959685", "0.60959685", "0.60906935", "0.60906935", "0.60873854", "0.60774267", "0.6072785", "0.60704106", "0.6043198", "0.6042458", "0.60363746", "0.6027657", "0.60253906", "0.60233194", "0.6018445", "0.6017692", "0.6013888", "0.6011921", "0.60062706", "0.60038173", "0.60014516", "0.5997297", "0.599537", "0.59909344", "0.598803", "0.598608", "0.5979843", "0.59793925", "0.5975183", "0.5974943", "0.59719795", "0.5970868" ]
0.0
-1
Kludge in running testsuistes as a sub process. Testsuite objects have already been made,and used to allow test selection. Having done this, the objects are dumped, and new ones are started up in sub shells.
def runTestSuites(self): self.testsuitesToXML() tss = [] jobStatus = {} for t in self.testsuites: d = t.testsuitedir runner = os.path.join(self.basepath, 'testSuiteRunner.py') tdir = os.path.join(d, 'testsuite.out') cmd = 'python %s %s>& %s' % (runner, d,tdir) #print 'about to popen the cmd: %s' % cmd tss.append((t.name, popen2.Popen3(cmd))) jobStatus[t.name] = ('running', nowSecs()) ntests = len(tss) printJobStatus(jobStatus) while tss: toRemove = [p for p in tss if p[1].poll() != -1] if toRemove: [tss.remove(p) for p in toRemove] for p in toRemove: jobStatus[p[0]] = ('completed', nowSecs()) printJobStatus(jobStatus) time.sleep(10) print 'all %d tests have completed' % ntests
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RunTest(self):\n self.TestLs()\n self.TestTerminate()\n self.TestMultipleProcesses()", "def __main() :\n launchTests()", "def runtest(self):", "def startTestRun(self):", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def runTest(self):\n self.testsActivated = True\n self.session = subprocess.Popen([\"sudo\", \"python3\", TEST_RUNNER_PATH], preexec_fn=os.setsid)", "def runTests(self):\n \n pass", "def RunTestAll(ss):\n ss.StopNow = False\n ss.TestAll()\n ss.Stopped()", "def main():\n # add all new test suites per test module here\n suite_date = test_date.suite()\n suite_ng = test_ng.suite()\n suite_page = test_page.suite()\n suite_container = test_container.suite()\n\n # add the suite to be tested here\n alltests = unittest.TestSuite((suite_date,\n suite_ng,\n suite_page,\n suite_container))\n\n # run the suite\n runner = unittest.TextTestRunner()\n runner.run(alltests)", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def setUpSuite():\n global _output_dir\n global _suite_configured\n\n if _suite_configured:\n return\n\n def remove_output_dir():\n global _output_dir\n if _output_dir != '':\n try:\n shutil.rmtree(_output_dir)\n except FileNotFoundError:\n pass\n\n atexit.register(remove_output_dir)\n _output_dir = tempfile.mkdtemp(dir=TESTS_DIR)\n\n os.environ['VOC_BUILD_DIR'] = os.path.join(_output_dir, 'build')\n os.environ['VOC_DIST_DIR'] = os.path.join(_output_dir, 'dist')\n\n # If the code has been precompiled, we don't have to\n # compile it as part of the test suite setup.\n precompile = os.environ.get('PRECOMPILE', 'true').lower() == 'true'\n if not precompile:\n _suite_configured = True\n return\n\n proc = subprocess.Popen(\n \"ant java\",\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n )\n\n try:\n out, err = proc.communicate(timeout=30)\n except subprocess.TimeoutExpired:\n proc.kill()\n out, err = proc.communicate()\n raise\n\n if proc.returncode != 0:\n raise Exception(\"Error compiling java sources: \" + out.decode('ascii'))\n\n _suite_configured = True", "def test_run_started(self):", "def do_TestSuite(suite):\n cl = suite.__class__\n name = mangle_test_name(suite.test_name)\n dbsuite = get_or_create_TestSuite(name=name, valid=True, \n suiteimplementation=\"%s.%s\" % (cl.__module__, cl.__name__))\n dbsuite.subsuites = []\n dbsuite.testcases = []\n\n memo = set()\n for testentry in suite:\n if testentry.inst.__class__ in memo:\n continue\n memo.add(testentry.inst.__class__)\n if isinstance(testentry, core.SuiteEntry):\n newsuite = do_TestSuite(testentry.inst)\n dbsuite.subsuites.append(newsuite)\n else: # a TestEntry or TestSeriesEntry\n dbcase = do_TestEntry(testentry)\n dbsuite.testcases.append(dbcase)\n _dbsession.commit()\n return dbsuite", "def do_test(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not self.build['dotest']:\n\t\t\tself.log('Tests configured off, not running',level=logging.DEBUG)\n\t\t\treturn\n\t\t# Test in reverse order\n\t\tself.log('PHASE: test', level=logging.DEBUG)\n\t\tself.stop_all()\n\t\tself.start_all()\n\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t# Only test if it's installed.\n\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\tself.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not self.shutit_map[module_id].test(self):\n\t\t\t\t\tself.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tself.logout(echo=False)", "def test_suite():\n testSuite = unittest.TestSuite()\n\n testSuite.addTest(test_classfactory(\"test_inheritance\"))\n return testSuite", "def runtests():\r\n\r\n app_abspath = os.path.dirname(os.path.dirname(__file__))\r\n models_abspath = os.path.join(app_abspath, 'models.py')\r\n models_exists = os.path.isfile(models_abspath)\r\n urls_abspath = os.path.join(app_abspath, 'urls.py')\r\n urls_exists = os.path.isfile(urls_abspath)\r\n views_abspath = os.path.join(app_abspath, 'views')\r\n views_exists = os.path.isdir(views_abspath)\r\n tpls_abspath = os.path.join(app_abspath, 'templates')\r\n tpls_exists = os.path.isdir(tpls_abspath)\r\n\r\n for f in [models_abspath, urls_abspath]:\r\n if os.path.isfile(f):\r\n subprocess.call('cp {} {}.orig'.format(f, f), shell=True)\r\n\r\n if views_exists:\r\n subprocess.call('cp -r {} {}.orig'.format(views_abspath, views_abspath), shell=True)\r\n\r\n if tpls_exists:\r\n subprocess.call('cp -r {} {}.orig'.format(tpls_abspath, tpls_abspath), shell=True)\r\n\r\n overwrite_project_language('ja')\r\n subprocess.call('python manage.py generatescaffold test_app I18nModel title:string', shell=True)\r\n time.sleep(1)\r\n overwrite_project_language('en-us')\r\n time.sleep(1)\r\n\r\n subprocess.call('python manage.py generatescaffold test_app GeneratedNoTimestampModel title:string description:text --no-timestamps', shell=True)\r\n time.sleep(2) # Give time for Django's AppCache to clear\r\n\r\n subprocess.call('python manage.py generatescaffold test_app GeneratedModel title:string description:text', shell=True)\r\n\r\n test_status = subprocess.call('python manage.py test --with-selenium --with-selenium-fixtures --with-cherrypyliveserver --noinput', shell=True)\r\n\r\n if models_exists:\r\n subprocess.call('mv {}.orig {}'.format(models_abspath, models_abspath), shell=True)\r\n else:\r\n subprocess.call('rm {}'.format(models_abspath), shell=True)\r\n\r\n if urls_exists:\r\n subprocess.call('mv {}.orig {}'.format(urls_abspath, urls_abspath), shell=True)\r\n else:\r\n subprocess.call('rm {}'.format(urls_abspath), shell=True)\r\n\r\n if views_exists:\r\n subprocess.call('rm -rf {}'.format(views_abspath), shell=True)\r\n subprocess.call('mv {}.orig {}'.format(views_abspath, views_abspath), shell=True)\r\n else:\r\n subprocess.call('rm -rf {}'.format(views_abspath), shell=True)\r\n\r\n if tpls_exists:\r\n subprocess.call('rm -rf {}'.format(tpls_abspath), shell=True)\r\n subprocess.call('mv {}.orig {}'.format(tpls_abspath, tpls_abspath), shell=True)\r\n else:\r\n subprocess.call('rm -rf {}'.format(tpls_abspath), shell=True)\r\n\r\n subprocess.call('rm {}/*.pyc'.format(app_abspath), shell=True)\r\n\r\n sys.exit(test_status)", "def test_functionality(self):\n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()", "def main():\n run_test_all()", "def test_functionality(self):\n \n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()\n \n self.logout()", "def run_test_suite():\n local('. fabric_factory/ve/bin/activate; fabric_factory/src/project/manage.py test')", "def run():\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)", "def test_subsystems(self):\n pass", "def test():\n\t\treturn [\"vice.core.objects.tests\",\n\t\t\t[\n\t\t\t\tagb.test_agb_grid_constructor(),\n\t\t\t\tagb.test_agb_grid_destructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_constructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_destructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_constructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_destructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_constructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_destructor(),\n\t\t\t\tchannel.test_channel_constructor(),\n\t\t\t\tchannel.test_channel_destructor(),\n\t\t\t\telement.test_element_constructor(),\n\t\t\t\telement.test_element_destructor(),\n\t\t\t\tfromfile.test_fromfile_constructor(),\n\t\t\t\tfromfile.test_fromfile_destructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_constructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_destructor(),\n\t\t\t\timf.test_imf_constructor(),\n\t\t\t\timf.test_imf_destructor(),\n\t\t\t\tintegral.test_integral_constructor(),\n\t\t\t\tintegral.test_integral_destructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_constructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_destructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_constructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_destructor(),\n\t\t\t\tism.test_ism_constructor(),\n\t\t\t\tism.test_ism_destructor(),\n\t\t\t\tmdf.test_mdf_constructor(),\n\t\t\t\tmdf.test_mdf_destructor(),\n\t\t\t\tmigration.test_migration_constructor(),\n\t\t\t\tmigration.test_migration_destructor(),\n\t\t\t\tmultizone.test_multizone_constructor(),\n\t\t\t\tmultizone.test_multizone_destructor(),\n\t\t\t\tsinglezone.test_singlezone_constructor(),\n\t\t\t\tsinglezone.test_singlezone_destructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_constructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_destructor(),\n\t\t\t\tssp.test_ssp_constructor(),\n\t\t\t\tssp.test_ssp_destructor(),\n\t\t\t\ttracer.test_tracer_constructor(),\n\t\t\t\ttracer.test_tracer_destructor()\n\t\t\t]\n\t\t]", "def tearDown(self):\n # check clean test run flag\n print time.ctime(), '----> enter %s tearDown' % (self._testMethodName)\n #pdb.set_trace()\n\n testHaDR_AdvancedParameters_MultiTier.testTearDownSuccess = False\n\n if self._testMethodName == 'test201ErrorProvokeDVEKeyT2':\n (c, o) = self.site2.runProgramInGuest(self.site2.getHost(\"WORKER1\"), \"chmod uog+w /usr/sap/$SAPSYSTEMNAME/SYS/global/hdb/security/ssfs/SSFS_$SAPSYSTEMNAME.DAT\", siduser = True, returnOutput = True)\n print time.ctime(), \"rc=%s\" % c, o\n elif self._testMethodName == 'test202ErrorProvokeDVEKeyT3':\n (c, o) = self.site3.runProgramInGuest(self.site3.getHost(\"WORKER1\"), \"chmod uog+w /usr/sap/$SAPSYSTEMNAME/SYS/global/hdb/security/ssfs/SSFS_$SAPSYSTEMNAME.DAT\", siduser = True, returnOutput = True)\n print time.ctime(), \"rc=%s\" % c, o\n elif self._testMethodName == 'test220INIParaReplication':\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"enable_tier_3\", None)\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"enable_tier_3\", None)\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"enable_tier_3\", None)\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"replicate_tier_3\", None)\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"replicate_tier_3\", None)\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"replicate_tier_3\", None)\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"interval\", None)\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"interval\", None)\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"interval\", None)\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"replicate\", None)\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"replicate\", None)\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"CUSTOMER\", \"inifile_checker\", \"replicate\", None)\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER1\"), \"indexserver.ini\", \"CUSTOMER\", \"communication\", \"maxchannels\", None)\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER1\"), \"indexserver.ini\", \"CUSTOMER\", \"communication\", \"maxchannels\", None)\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER1\"), \"indexserver.ini\", \"CUSTOMER\", \"communication\", \"maxchannels\", None)\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"HOST\", \"expensive_statement\", \"maxfilesize\", None)\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"HOST\", \"expensive_statement\", \"maxfilesize\", None)\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"HOST\", \"expensive_statement\", \"maxfilesize\", None)\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER1\"), \"preprocessor.ini\", \"CUSTOMER\", \"lexicon\", \"abort_time\", None)\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER1\"), \"preprocessor.ini\", \"CUSTOMER\", \"lexicon\", \"abort_time\", None)\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER1\"), \"preprocessor.ini\", \"CUSTOMER\", \"lexicon\", \"abort_time\", None)\n self.site1.setConfigParameterPerLayer(self.site1.getHost(\"WORKER1\"), \"preprocessor.ini\", \"HOST\", \"lexicon\", \"abort_time\", None)\n self.site2.setConfigParameterPerLayer(self.site2.getHost(\"WORKER1\"), \"preprocessor.ini\", \"HOST\", \"lexicon\", \"abort_time\", None)\n self.site3.setConfigParameterPerLayer(self.site3.getHost(\"WORKER1\"), \"preprocessor.ini\", \"HOST\", \"lexicon\", \"abort_time\", None)\n\n\n\n # clean up if the case failed\n if not testHaDR_AdvancedParameters_MultiTier.testRunSuccess:\n self.site1.databaseLandscapeInfo()\n self.site2.databaseLandscapeInfo()\n self.site3.databaseLandscapeInfo()\n self.site1.systemReplicationStatus()\n self.site2.systemReplicationStatus()\n self.site3.systemReplicationStatus()\n print time.ctime(), \" #####################################\"\n print time.ctime(), \" ### clean up after test execution ###\"\n print time.ctime(), \" #####################################\"\n # in case the last case failed, and the primary is happened to be host2,\n # there's no chance to disable full_sync anymore, maybe lead to the next profile failed\n if self.globalCfg['sync_mode'] == 'sync' and self.site1.fullSync:\n self.site1.srDisableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = False\n t1 = threading.Thread(target = self.site1.cleanUp)\n t2 = threading.Thread(target = self.site2.cleanUp)\n t3 = threading.Thread(target = self.site3.cleanUp)\n t1.start()\n t2.start()\n t3.start()\n t1.join()\n t2.join()\n t3.join()\n self.site1.startDatabaseLandscapeAsWhole()\n self.site2.startDatabaseLandscapeAsWhole()\n self.site3.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site1)\n self.waitForDatabaseLandscapeStartedByPY(self.site2)\n self.waitForDatabaseLandscapeStartedByPY(self.site3)\n print time.ctime(), \" #####################################\"\n print time.ctime(), \" ############### done ################\"\n print time.ctime(), \" #####################################\"\n return\n\n # restore the sys-rep\n if self._testMethodName == 'test070Disaster1Tier2Takeover' or self._testMethodName == 'test075OfflineTakeover260' or self._testMethodName == 'test180Recovery' or self._testMethodName == 'test203RootKeyVersionConsistencyAfterTakeOver' or self._testMethodName == 'test230DisasterTakeover' or self._testMethodName == 'test450OfflineTakeover190':\n # restore to s1 -- s2 -- s3\n #pdb.set_trace()\n if self.globalCfg['full_sync']:\n self.site1.srDisableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = False\n\n if self._testMethodName == 'test180Recovery':\n self.site3.stopDatabaseLandscapeAsWhole()\n self.site3.waitForDatabaseLandscapeStopped()\n self.site3.srUnregister(self.site3.getHost(\"WORKER1\"))\n self.site3.startDatabaseLandscapeAsWhole()\n self.site3.waitForDatabaseLandscapeStartedByPY()\n\n self.site2.stopDatabaseLandscapeAsWhole()\n self.site3.stopDatabaseLandscapeAsWhole()\n self.site1.srCleanUp(self.site1.getHost(\"WORKER1\"), \"--force\")\n\n self.site1.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site1)\n self.site1.srEnable(self.site1.getHost(\"WORKER1\"), self.site1.getSiteName())\n self.site2.waitForDatabaseLandscapeStopped()\n self.site2.srRegister(self.site2.getHost(\"WORKER1\"), self.site2.getSiteName(), self.site2.remoteInstance, self.site2.remoteHost, self.globalCfg['sync_mode'], self.globalCfg['op_mode'])\n self.site2.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site2)\n\n #Temporary fix\n if self._testMethodName == 'test230DisasterTakeover' or self._testMethodName == 'test450OfflineTakeover190':\n self.checkSecondaryActive(self.site1)\n self.site2.srEnable(self.site2.getHost(\"WORKER1\"), self.site2.getSiteName())\n self.site3.waitForDatabaseLandscapeStopped()\n self.site3.srRegister(self.site3.getHost(\"WORKER1\"), self.site3.getSiteName(), self.site3.remoteInstance, self.site3.remoteHost, self.globalCfg['sync_mode_1'], self.globalCfg['op_mode_1'])\n self.site3.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site3)\n\n if self.globalCfg['full_sync']:\n self.site1.srEnableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = True\n elif self._testMethodName == 'test080Disaster2Tier3Takeover' or self._testMethodName == 'test130DisasterTakeover':\n self.site3.stopDatabaseLandscapeAsWhole()\n self.site1.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site1)\n self.site2.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site2)\n self.site3.waitForDatabaseLandscapeStopped()\n self.site3.srRegister(self.site3.getHost(\"WORKER1\"), self.site3.getSiteName(), self.site3.remoteInstance, self.site3.remoteHost, self.globalCfg['sync_mode_1'], self.globalCfg['op_mode_1'])\n self.site3.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site3)\n elif self._testMethodName == 'test090Failback1':\n # must restore to the original site and role relationship, since there will be recovery test followed,\n # in which the data/log copying will only can be done through pre-configured ssh access without passwd\n #pdb.set_trace()\n if self.globalCfg['full_sync']:\n self.site1.srDisableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = False\n\n self.site3.srTakeover(self.site3.getHost(\"WORKER1\"))\n self.site3.stopDatabaseLandscapeAsWhole()\n self.site3.waitForDatabaseLandscapeStopped()\n self.site3.srCleanUp(self.site3.getHost(\"WORKER1\"), \"--force\")\n\n\n\n self.site1.stopDatabaseLandscapeAsWhole()\n self.site2.stopDatabaseLandscapeAsWhole()\n self.site3.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site3)\n self.site3.srEnable(self.site3.getHost(\"WORKER1\"), self.site3.getSiteName())\n\n self.site1.waitForDatabaseLandscapeStopped()\n self.site1.srRegister(self.site1.getHost(\"WORKER1\"), self.site1.getSiteName(), self.site1.remoteInstance, self.site1.remoteHost, self.globalCfg['sync_mode'], self.globalCfg['op_mode'])\n self.site1.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site1)\n\n self.site1.srEnable(self.site1.getHost(\"WORKER1\"), self.site1.getSiteName())\n self.site2.waitForDatabaseLandscapeStopped()\n self.site2.srRegister(self.site2.getHost(\"WORKER1\"), self.site2.getSiteName(), self.site2.remoteInstance, self.site2.remoteHost, self.globalCfg['sync_mode_1'], self.globalCfg['op_mode_1'])\n self.site2.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site2)\n (self.site1, self.site2, self.site3) = self.restoreSiteRoles(self.site3, self.site1, self.site2)\n\n if self.globalCfg['full_sync']:\n self.site1.srEnableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = True\n elif self._testMethodName == 'test240Failback':\n # must restore to the original site and role relationship, since there will be recovery test followed,\n # in which the data/log copying will only can be done through pre-configured ssh access without passwd\n #pdb.set_trace()\n if self.globalCfg['full_sync']:\n self.site1.srDisableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = False\n\n self.site3.srTakeover(self.site3.getHost(\"WORKER1\"))\n (self.site1, self.site2, self.site3) = self.restoreSiteRoles(self.site3, self.site1, self.site2)\n\n self.site2.stopDatabaseLandscapeAsWhole()\n self.site3.stopDatabaseLandscapeAsWhole()\n self.site2.waitForDatabaseLandscapeStopped()\n self.site2.srRegister(self.site2.getHost(\"WORKER1\"), self.site2.getSiteName(), self.site2.remoteInstance, self.site2.remoteHost, self.globalCfg['sync_mode'], self.globalCfg['op_mode'])\n self.site2.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site2)\n\n self.site2.srEnable(self.site2.getHost(\"WORKER1\"), self.site2.getSiteName())\n self.site3.waitForDatabaseLandscapeStopped()\n self.site3.srRegister(self.site3.getHost(\"WORKER1\"), self.site3.getSiteName(), self.site3.remoteInstance, self.site3.remoteHost, self.globalCfg['sync_mode_1'], self.globalCfg['op_mode'])\n self.site3.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site3)\n\n if self.globalCfg['full_sync']:\n self.site1.srEnableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = True\n elif self._testMethodName == 'test190HAFStopWorker' or self._testMethodName == 'test200HAFStopMaster' or self._testMethodName == 'test290HAFStopWorker' or self._testMethodName == 'test300HAFStopMaster':\n # restart to make sure real role is the same with config role\n #pdb.set_trace()\n if self.globalCfg['full_sync']:\n self.site1.srDisableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = False\n\n self.site1.stopDatabaseLandscapeAsWhole()\n self.site1.waitForDatabaseLandscapeStopped()\n self.site1.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site1)\n\n if self.globalCfg['full_sync']:\n self.site1.srEnableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = True\n\n elif self._testMethodName == 'test01LogshippingTier3' or self._testMethodName == 'test02LogshippingTier2' or self._testMethodName == 'test03LogshippingGenScenarios':\n print '---> Unset ES_LOG_BACKUP_INTERVAL'\n self.site1.setLogRetentionOptions(None,None)\n self.setConfigParameter(self.getHost(\"WORKER1\"), \"esserver.ini\", \"ConfigMgrPy.CUSTOMER\", \"database\", \"ES_LOG_BACKUP_INTERVAL\", None)\n # Restart primary for options to take effect\n print time.ctime(), '----> stopping primary...'\n self.site1.stopDatabaseLandscapeAsWhole()\n self.site1.waitForDatabaseLandscapeStopped()\n print time.ctime(), '----> Restarting primary...'\n self.site1.startDatabaseLandscapeAsWhole()\n self.waitForDatabaseLandscapeStartedByPY(self.site1)\n\n self.checkSecondaryActive(self.site1)\n\n print time.ctime(), 'leaving tearDown...'\n print time.ctime(), \"host-role-mappings site1: \"\n for h in range(1, self.site1.getHostNo()):\n print time.ctime(), \" \", self.site1.getLandscape()[(h, \"hostname\")].split(\".\")[0], \"=>\", self.site1.getLandscape()[(h, \"role\")]\n print time.ctime(), \"host-role-mappings site2: \"\n for h in range(1, self.site2.getHostNo()):\n print time.ctime(), \" \", self.site2.getLandscape()[(h, \"hostname\")].split(\".\")[0], \"=>\", self.site2.getLandscape()[(h, \"role\")]\n print time.ctime(), \"host-role-mappings site3: \"\n for h in range(1, self.site3.getHostNo()):\n print time.ctime(), \" \", self.site3.getLandscape()[(h, \"hostname\")].split(\".\")[0], \"=>\", self.site3.getLandscape()[(h, \"role\")]\n\n self.site1.databaseLandscapeInfo()\n self.site2.databaseLandscapeInfo()\n self.site3.databaseLandscapeInfo()\n\n testHaDR_AdvancedParameters_MultiTier.testTearDownSuccess = True", "def collectTests(self, global_ctx):\n pass", "def run_tests():\n os.environ['WORKDIR'] = CONFIG['workdir']\n os.environ['REPORTDIR'] = CONFIG['reportFolder']\n stdout = subprocess.DEVNULL\n if CONFIG['verbose']:\n stdout = None\n # cycle throught version\n total = 0\n valid = 0\n start = time.time()\n for version in utils.get_dirs(CONFIG['versionsFolder']):\n os.environ['VERSION'] = version\n utils.copy_dir(os.path.join(CONFIG['versionsFolder'], version), CONFIG['workdir']\\\n , CONFIG['clearWorkdir'])\n # cycle throught use case\n for usecase in utils.get_dirs(CONFIG['testsFolder']):\n os.environ['TESTDIR'] = usecase\n if not CONFIG['quiet']:\n print('UseCase test: {}'.format(usecase))\n log_msg('info', 'UseCase test: {}'.format(usecase))\n try:\n folder = os.path.join(CONFIG['testsFolder'], usecase)\n with open(os.path.join(folder, CONFIG['useConfig'])) as usefp:\n jconfig = json.load(usefp)\n # clear workdir if desired\n if 'clearWorkdir' in jconfig.keys() and jconfig['clearWorkdir']:\n utils.copy_dir(os.path.join(CONFIG['versionsFolder'], version)\\\n , CONFIG['workdir'], CONFIG['clearWorkdir'])\n # print('clearing')\n # raise\n cmd = ['py', os.path.join(folder, jconfig['entrypoint'])]\n total += 1\n if jconfig['runType'] == 'single':\n subprocess.run(cmd, stdout=stdout, stderr=subprocess.PIPE, check=True)\n else:\n for step in range(jconfig['numRuns']):\n if not CONFIG['quiet']:\n print('\\r >Step {}/{} '.format(step+1, jconfig['numRuns'])\\\n , end='', flush=True)\n log_msg('info', 'Step {}/{}'.format(step+1, jconfig['numRuns']))\n subprocess.run(cmd, stdout=stdout, stderr=subprocess.PIPE, check=True)\n if step+1 != jconfig['numRuns']:\n time.sleep(jconfig['interval'])\n except subprocess.CalledProcessError as excp:\n if not CONFIG['quiet']:\n print('Error msg:{}'\\\n .format(excp.stderr.decode().replace('\\r', '').replace('\\n', '|')))\n log_msg('error', excp.stderr.decode())\n else:\n valid += 1\n if not CONFIG['quiet']:\n print('{}.....Passed'.format(usecase))\n log_msg('info', '{} Passed'.format(usecase))\n\n elapse = time.time()-start\n log_msg('info', 'Ran {} tests in {:.3f}s with {} passed'.format(total, elapse, valid))\n print('-'*20)\n print('Ran {} tests in {:.3f}s with {} passed.'.format(total, elapse, valid))\n return total-valid", "def _run_test(self, test_cases):\n # type: (List[TestCaseInterface]) -> None\n if not test_cases:\n return\n self._test_names_to_test_states.update({\n test_cases[0].get_name(): TestCaseState(test_cases[0], test_cases[1:])})\n self._test_names_to_processes.update(\n {test_cases[0].get_name(): subprocess.Popen(\n test_cases[0].run_test_command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)})\n print('Started executing: {}'.format(test_cases[0].get_name()))", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def setUp(self):\n # create temporary directory\n if not usedir:\n self.test_dir = tempfile.mkdtemp()\n os.chdir(self.test_dir)\n else:\n os.chdir(usedir) \n\n super(SimpleTest, self).setUp()\n\n import SFramework\n self.manager = SFramework.TSStatisticsManager()\n self.manager.getWorkspaces().addObject(self.makeWS())", "def run_tests(self):\n raise NotImplementedError", "def collect_test(test_description):\n #pdb.set_trace()\n print test_description\n test_name = test_description.invoke_string\n env = test_description.env\n print \"starting\", test_name\n os.environ.update(env)\n start_time = datetime.datetime.now()\n stdout_temp = tempfile.TemporaryFile(\"rw\")\n stderr_temp = tempfile.TemporaryFile(\"rw\")\n proc = subprocess.Popen(\n construct_command(test_name),\n shell=True,\n stdin=subprocess.PIPE,\n stdout=stdout_temp,\n stderr=stderr_temp)\n stds = proc.communicate()\n end_time = datetime.datetime.now()\n\n \n\n # need to seek to 0 so that we can read from this file\n stdout_temp.seek(0)\n stderr_temp.seek(0)\n \n print \" finished \", test_name, proc.returncode\n return TestResult(\n test_name, proc.returncode,\n end_time - start_time,\n stdout_temp.read(),\n stderr_temp.read())", "def run(self, test, env):\n\n raise NotImplementedError", "def run(self):\n self.check_config()\n\n ports = None\n\n args = self.universal_args()\n\n test_results = []\n command_dir = None\n\n qemu_proc = None\n\n # Resource exists in multiple functions, wants to use the same\n # cleanup block regardless\n self.dtb = None\n\n try:\n if self.config.linux:\n args += self.gen_dtb(args)\n\n # Prepend the machine since we don't need to edit it as in gen_dtb\n args = [\"-machine\", self.MACHINE] + args\n\n # This codepath should go away when test_runner is changed to\n # not use semihosting exit to report\n if self.boot_tests:\n return [self.semihosting_run(args)]\n\n # Logging and terminal monitor\n args += [\"-serial\", \"mon:stdio\"]\n\n # If we're noninteractive (e.g. testing) we need a command channel\n # to tell the guest to exit\n if not self.interactive:\n command_dir, command_args = gen_command_dir()\n args += command_args\n\n # Reserve ADB ports\n ports = alloc_ports()\n # Forward ADB ports in qemu\n args += forward_ports(ports)\n\n qemu_cmd = [self.config.qemu] + args\n qemu_proc = subprocess.Popen(\n qemu_cmd,\n cwd=self.config.atf,\n stdin=self.stdin,\n stdout=self.stdout,\n stderr=self.stderr)\n\n try:\n # Bring ADB up talking to the command port\n self.adb_up(ports[1])\n\n # Run android tests\n for android_test in self.android_tests:\n test_result = self.adb([\"shell\", android_test],\n timeout=(60 * 5),\n force_output=True)\n test_results.append(test_result)\n if not test_result:\n break\n # Finally is used here to ensure that ADB failures do not take away\n # the user's serial console in interactive mode.\n finally:\n if self.interactive:\n # The user is responsible for quitting QEMU\n qemu_proc.wait()\n finally:\n # Clean up generated device tree\n if self.dtb:\n os.remove(self.dtb.name)\n\n unclean_exit = qemu_exit(command_dir, qemu_proc)\n\n fcntl.fcntl(0, fcntl.F_SETFL,\n fcntl.fcntl(0, fcntl.F_GETFL) & ~os.O_NONBLOCK)\n\n if self.adb_transport:\n # Disconnect ADB and wait for our port to be released by qemu\n self.adb_down(ports[1])\n\n if unclean_exit:\n raise RunnerGenericError(\"QEMU did not exit cleanly\")\n return test_results", "def prepare_test(self):\n\n VorpatestLibrary._test_index += 1\n self._status = -1\n self._preserve_files = []\n\n test_variables = _get_test_variables()\n\n test_name = test_variables['${TEST NAME}']\n logger.info(\"Setup test %s\" % test_name)\n\n test_name = test_name.replace(' ', '_').replace('.', '_')\n test_name = \"%03d_%s\" % (VorpatestLibrary._test_index, test_name)\n\n self._execdir = os.path.join(test_variables['${EXECDIR}'], 'run', test_name)\n self._log(\"Execution directory: %s\" % self._execdir)\n\n shutil.rmtree(self._execdir, True)\n os.makedirs(self._execdir)\n os.chdir(self._execdir)", "def _run_suite(suite):\n os.chdir(suite)\n logger.info(\"Submitting suite from %s\", suite)\n proc = subprocess.Popen([\"rose\", \"suite-run\"], stdout=subprocess.PIPE)\n out, err = proc.communicate()\n logger.info(\"Rose communications: %s %s\", str(out), str(err))", "def run_tests():\n test_command = \"pytest -s \" + os.path.join(root_path, \"cases\", \"test_cases.py::TestCases::test_cases\") + \" --html=\" + os.path.join(root_path, \"reports\", \"qa_testing_report.html\")\n\n subprocess.run(test_command, shell=True)", "def runTest(self):\n self.setUp()\n self.test_ExtendSpine1()", "def test_generate_all_testing(self):\n pass", "def test_suite():\n\tsuite = unittest.TestSuite()\n\tsuite.addTest(unittest.makeSuite(TestPloneDbFormsManager))\n\treturn suite", "def test_create_system_entire(self):\n pass", "def run_tests(args, applog):\n try:\n additional_args = []\n additional_args.extend([\"--pyargs\", \"dent_os_testbed.test.test_suite\", \"--strict-markers\"])\n additional_args.append(\"--durations=0\")\n if args.stdout:\n additional_args.append(\"--capture=tee-sys\")\n\n suite_groups = args.suite_groups if args.suite_groups else PYTEST_SUITE_GROUPS.keys()\n for sg_name in suite_groups:\n sg = PYTEST_SUITE_GROUPS[sg_name]\n pytest_args = []\n if not sg:\n continue\n pytest._current_suite = sg_name\n pytest_args.append(\"-m\")\n markers_string = sg[0] + \"\".join([(\" or %s\" % suite) for suite in sg[1:]])\n pytest_args.append(markers_string)\n pytest_args.append(\"--html=%s/report_%s.html\" % (LOGDIR, sg_name))\n pytest_args.append(\"--junitxml=%s/junit_%s.xml\" % (LOGDIR, sg_name))\n pytest_args.append(\"--self-contained-html\")\n if args.suite_tests:\n pytest_args.append(\"-k\")\n pytest_args.append(args.suite_tests)\n input_args = additional_args + pytest_args\n applog.info(\"Triggering pytest with args : %s\" % input_args)\n pytest.main(input_args)\n except Exception as e:\n applog.exception(\"Error running tests\", exc_info=e)\n raise", "def _suite(self):\n import mpi.test_application\n import mpi.test_communicator\n import mpi.test_launcher\n\n test_cases = []\n for mod in [\n mpi.test_application,\n mpi.test_communicator,\n mpi.test_launcher,\n ]:\n test_cases += mod.test_classes()\n \n suite = unittest.TestSuite()\n for test_case in test_cases:\n suite.addTest(unittest.makeSuite(test_case))\n\n return suite", "def tearDownClass(cls):\n\n\t\tfor u in (cls.standard_user, cls.manager_user, cls.admin_user):\n\t\t\tLMC.users.del_User(u, no_archive=True)\n\n\t\tfor g in (cls.standard_group, cls.system_group, cls.privileged_group):\n\t\t\tLMC.groups.del_Group(g, no_archive=True)\n\n\t\t# We must delete everything, else they won't be garbage collected\n\t\t# immediately and next run of the same testsuite will not run\n\t\t# properly (this is related to #769, but not exactly the same).\n\t\tdel u, g, cls.manager_user, cls.standard_user, cls.admin_user, \\\n\t\t\tcls.standard_group, cls.system_group, cls.privileged_group\n\n\t\t#gc.collect()\n\n\t\tlogging.notice(_(u'TS: waiting for background events to finish…'))\n\t\ttime.sleep(1.5)", "def pytest_started_handling_group(session, worker):", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_polarization.test_suite())\n testSuite.addTest(test_xray.test_suite())\n testSuite.addTest(test_emspectrum.test_suite())\n return testSuite", "def before_productline_steps():\n cmd = ['phantomjs', '--webdriver', '4444']\n click.echo(\"Running command\" + subprocess.list2cmdline(cmd))\n process = subprocess.Popen(cmd)\n RUNNING_TEST_PROCESSES.append(process)", "def start_next_test(self):\n next_test_num = self.test_numbers.popleft()\n self.tests.append(\n self.TEST(\n process=Popen(COMMANDS[next_test_num],\n stdout=PIPE,\n stderr=PIPE),\n number=next_test_num))", "def run(cls): \n tests_to_run = cls.config.TESTS # A list of 5-tuple elements specifying the tests to run. See the\n # 'Test Setup' section in config.py.template for more info.\n test_group_name = \"Alchemist Tests\" # A short string identifier for this test run.\n output_dir = cls.config.OUTPUT_DIR # The output file where we write results.\n \n try:\n os.makedirs(output_dir,0o777)\n except:\n pass\n num_tests_to_run = len(tests_to_run)\n\n print(OUTPUT_DIVIDER_STRING)\n if num_tests_to_run == 1:\n print(\"Running %d test in %s\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Running %d tests in %s\" % (num_tests_to_run, test_group_name))\n failed_tests = []\n\n cls.before_run_tests()\n \n spark_settings = []\n for i in cls.config.SPARK_SETTINGS:\n spark_settings.append(i.to_array()[0])\n \n output_settings = []\n for i in cls.config.OUTPUT_SETTINGS:\n output_settings.append(i.to_array()[0])\n \n main_class = \"altest.AlTest\"\n\n for meta_data, opt_sets in tests_to_run:\n print(OUTPUT_DIVIDER_STRING + '\\n')\n# print(\"Running test command: '%s' ... \" % main_class)\n \n meta = {}\n meta_pairs = [i.to_tuple() for i in meta_data]\n for mp in meta_pairs:\n meta[mp[0].replace('-', '_')] = mp[1].replace('0x20', ' ')\n \n meta_settings = []\n for i in meta_data:\n meta_settings.append(i.to_array()[0])\n \n# stdout_filename = \"%s/%s.out\" % (output_dir, meta['short_name'])\n# stderr_filename = \"%s/%s.err\" % (output_dir, meta['short_name'])\n# \n# out_file = open(output_dir + \"/\" + meta['short_name'] + \".out\", 'w')\n\n # Run a test for all combinations of the OptionSets given, then capture\n # and print the output.\n opt_set_arrays = [i.to_array() for i in opt_sets]\n for opt_list in itertools.product(*opt_set_arrays):\n\n cmd = cls.get_spark_submit_cmd(spark_settings, main_class, output_settings, meta_settings, opt_list)\n# print(\"\\nSetting env var SPARK_SUBMIT_OPTS: %s\" % java_opts_str)\n# test_env[\"SPARK_SUBMIT_OPTS\"] = java_opts_str\n print(\"Running command:\")\n print(\"%s\\n\" % cmd)\n Popen(cmd, shell=True, env=test_env).wait()\n\n try:\n src = output_dir + meta['short_name'] + '_latest/'\n src_files = os.listdir(src)\n src_file = src_files[0][:-4]\n new_dir = output_dir + src_file\n os.makedirs(new_dir)\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if (os.path.isfile(full_file_name)):\n shutil.copy(full_file_name, new_dir)\n except:\n pass\n \n# result_string = cls.process_output(config, meta['short_name'], opt_list,\n# stdout_filename, stderr_filename)\n# print(OUTPUT_DIVIDER_STRING)\n# print(\"\\nResult: \" + result_string)\n# print(OUTPUT_DIVIDER_STRING)\n# if \"FAILED\" in result_string:\n# failed_tests.append(meta['short_name'])\n# \n# \n# out_file.write(result_string + \"\\n\")\n# out_file.flush()\n\n if num_tests_to_run == 1:\n print(\"Finished running %d test in %s.\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Finished running %d tests in %s.\" % (num_tests_to_run, test_group_name))\n# print(\"\\nNumber of failed tests: %d, failed tests: %s\" %\n# (len(failed_tests), \",\".join(failed_tests)))\n print(OUTPUT_DIVIDER_STRING)", "def startTestRun(self, event):\n self.prof = cProfile.Profile()\n event.executeTests = self.prof.runcall", "def startTestHook(self):", "def test(self):\n \"\"\"WARNING: IT IS HIGHLY RECOMMENDED TO HAVE ONE TEST ONLY TO ISOLATE FUNCTIONAL TESTS FROM EACH OTHER. i.e. \n Start a new Python Interpreter and JVM for each test. In the end, it means only one test in this class. \"\"\"\n \n logger.info('**Starting test**')\n q = Queue()\n\n p = Process(target=self.client_process1, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n logger.debug(\"Restarting dataClay\")\n self.mock.mock.restartDataClay()\n p = Process(target=self.client_process2, args=(q,))\n p.start()\n result = q.get()\n p.join()\n self.assertEqual(result, \"OK\") \n\n logger.info(\"** Test OK!\")", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def main_test():\n full = unittest.TestSuite()\n full.addTest(unittest.makeSuite(TestToolOptions))\n full.addTest(unittest.makeSuite(TestBadConfiguration))\n full.addTest(unittest.makeSuite(TestBasicEndpoints))\n full.addTest(unittest.makeSuite(TestMultipleEPG))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpoints))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpointsAddPolicyLater))\n full.addTest(unittest.makeSuite(TestExportPolicyRemoval))\n full.addTest(unittest.makeSuite(TestBasicEndpointsWithContract))\n full.addTest(unittest.makeSuite(TestBasicEndpointMove))\n full.addTest(unittest.makeSuite(TestPolicyChangeProvidedContract))\n full.addTest(unittest.makeSuite(TestChangeL3Out))\n full.addTest(unittest.makeSuite(TestDuplicates))\n full.addTest(unittest.makeSuite(TestDuplicatesTwoL3Outs))\n full.addTest(unittest.makeSuite(TestDeletions))\n\n unittest.main()", "def main():\r\n run_processes('tests.csv', 'labs.csv')", "def run_tests(self):\n manifest = manifestparser.TestManifest(\n manifests=[os.path.join(self.repository_path, self.manifest_path)],\n strict=False)\n\n tests = manifest.active_tests(**mozinfo.info)\n self._mozmill.run(tests, self.options.restart)\n\n # Whenever a test fails it has to be marked, so we quit with the correct exit code\n self.last_failed_tests = self.last_failed_tests or self._mozmill.results.fails\n\n self.testrun_index += 1", "def run_test_suite(self, test_config):\n # Folder to store suite results\n test_config['test_suite_start_time'] = datetime.datetime.now().strftime(\n '%Y%m%dT%H%M%S')\n\n instance = cluster_local.UseLocalInstances()\n for i in range(test_config['repeat']):\n self.run_benchmark(test_config, instance, copy=i)\n\n suite_dir_name = '{}_{}'.format(test_config['test_suite_start_time'],\n test_config['test_id'])\n reporting.process_folder(\n os.path.join(self.workspace, 'results', suite_dir_name),\n report_config=self.auto_test_config)", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def test_memleaks():\n build()\n sh(\"%s psutil\\\\tests\\\\test_memleaks.py\" % PYTHON)", "def runTest(self):\n self.setUp()\n self.test_ProstateReporting1()", "def test_create_run(self):\n pass", "def run_self_test(self):\n par_values = list(SELF_TEST_LIST)\n\n\n if True:\n # Only ERASynth+ and ERASynth++ have this functionality\n par_values += [(\"reference_tcxo_ocxo\", \"tcxo\")]\n\n num_tests = len(par_values)\n for i, (name, val) in enumerate(par_values):\n print(f\"\\r[{i+1:2d}/{num_tests}] Running...\", end=\"\")\n self.set(name, val)\n\n print(\"\\nDone!\")", "def test_run_exec(self):\n from multiprocessing import Process, Queue\n output = Queue()\n repodir = \"~/codes/ci/tests/repo\"\n processes = []\n for i in range(3):\n processes.append(Process(target=run_exec, args=(repodir, \"ls -la\", output, i)))\n processes[-1].start()\n \n #Wait for the unit tests to all finish.\n for p in processes:\n p.join()\n results = [output.get() for p in processes]\n ordered = {o[\"index\"]: o for o in results}\n\n #We consider the test successful if the output files were created and the end time\n #is not None. That means that the process ran correctly and python didn't lose\n #control of the subprocess.\n from os import path\n fullrepo = path.expanduser(repodir)\n for i in range(3):\n self.assertTrue(path.isfile(path.join(fullrepo, \"{}.cidat\".format(i))))\n self.assertIsNotNone(ordered[i][\"end\"])\n self.assertEqual(ordered[i][\"code\"], 0)", "def test_testutils():\n build()\n sh(\"%s psutil\\\\tests\\\\test_testutils.py\" % PYTHON)", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def run_test(self):\n\n # populate *_ps sets\n self.enter_project_file()\n\n # populate *_dir sets\n self.enter_directories()\n\n # The files in the directories makes up the largest possible set of files\n self.result_files = self.result_files_dir\n self.design_files = self.design_files_dir\n self.design_space_files = self.design_space_files_dir\n self.test_bench_files = self.test_bench_files_dir\n\n # populate *_ms sets\n self.enter_meta_results_file()\n\n # populate *_OK sets\n self.check_analysis_status()\n\n df = {'design_files_dir' : list(self.design_files_dir),'design_files_pr' : list(self.design_files_pr),\n 'design_files_ms' : list(self.design_files_ms), 'design_files_OK' : list(self.design_files_OK)}\n\n ds = {'design_space_files_dir' : list(self.design_space_files_dir),\n 'design_space_files_pr' : list(self.design_space_files_pr)}\n\n rs = {'result_files_dir' : list(self.result_files_dir), 'result_files_ms' : list(self.result_files_ms),\n 'result_files_OK' : list(self.result_files_OK)}\n\n tb = {'test_bench_files_dir' : list(self.test_bench_files_dir),\n 'test_bench_files_ms' : list(self.test_bench_files_ms)}\n\n srl = SummaryReportsLinks(self.result_files_dir)\n\n lf = {'files_linked_from_sum_reps' : srl.get_files(),\n 'folders_linked_from_sum_reps' : srl.get_folders()}\n\n # 'test_bench_files_pr' : list(self.test_bench_files_pr),\n \n json_test = {'design_files' : df, 'design_space_files' : ds, 'result_files' : rs,\n 'test_bench_files' : tb, 'stat_files' : self.stat_files,\n 'files_linked_from_sum_reps' : lf}\n\n with open('test_run.json','wb') as f_out:\n json.dump(json_test, f_out, indent=4)", "def run_tests():\n parser = ArgumentParser()\n parser.add_argument('name',nargs='?',default=None,help=\"Suite or test name\")\n parser.add_argument('-b','--bin-dir',help=\"Directory where Firebird binaries tools are\")\n parser.add_argument('-d','--db-dir',help=\"Directory to use for test databases\")\n parser.add_argument('--archive',action='store_true',help=\"Save last run results to archive\")\n parser.add_argument('--rerun',action='store_true',help=\"Run only tests that don't PASSed in last run\")\n parser.add_argument('--untested',action='store_true',help=\"Run only tests that were UNTESTED in last run\")\n parser.add_argument('-v','--verbose',action='store_true',help=\"Be more verbose\")\n parser.add_argument('--verbosity',type=int,choices=[0,1,2],default=1,help=\"Set verbosity; --verbosity=2 is the same as -v\")\n parser.add_argument('-q','--quiet',action='store_true',help=\"Be less verbose\")\n parser.add_argument('-x','--xunit',action='store_true',help=\"Provides test results also in the standard XUnit XML format\")\n parser.add_argument('-e','--expect',type=str,metavar=\"FILENAME\",help=\"Test results file to be used as expeted outcomes\")\n if rpyc_available:\n parser.add_argument('--remote',action='store_true',help=\"Connect to remote fbtest server\")\n\n parser.add_argument('-u','--update',action='store_true',help=\"Update last run results with re-run results\")\n parser.add_argument('-w','--password',help=\"SYSDBA password\")\n parser.add_argument('-o','--host',help=\"Remote Firebird or fbtest host machine identification\")\n parser.add_argument('-p','--person',help=\"QA person name\")\n parser.add_argument('-a','--arch',help=\"Firebird architecture: SS, CS, SC, EM\")\n parser.add_argument('-s','--sequence',type=int,help=\"Run sequence number for this target\")\n parser.add_argument('-k','--skip',help=\"Suite or test name or name of file with suite/test names to skip\")\n parser.add_argument('-c','--client',help=\"Use specified Firebird client library\")\n parser.set_defaults(rerun=False,untested=False,update=False,server=False,register=False,\n remote=False,host='localhost',password='masterkey',\n sequence=1,arch='SS',person=UNKNOWN)\n\n script_runner.run_tests(parser.parse_args())", "def test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestIntegration))\n suite.addTest(unittest.makeSuite(TestSection))\n return suite", "def test_script(self) -> None:\n main()", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_spec(\"test_cmd_parser\"))\n return testSuite", "def test_suite():\n suite = unittest.TestSuite()\n suite.addTests(unittest.makeSuite(PrimesTests))\n suite.addTests(unittest.makeSuite(OtherTests))\n return suite", "def suite():\n\n testSuite = common.unittest.TestSuite()\n\n cdatafuncs = [niclassdata] # non-indexing data tests\n cdatafuncs.append(iclassdata) # indexing data tests\n\n heavy = common.heavy\n # Choose which tests to run in classes with autogenerated tests.\n if heavy:\n autoprefix = 'test' # all tests\n else:\n autoprefix = 'test_l' # only light tests\n\n niter = 1\n for i in range(niter):\n # Tests on query data.\n for cdatafunc in cdatafuncs:\n for cdata in cdatafunc():\n class_ = eval(cdata[0])\n if heavy or not class_.heavy:\n suite_ = common.unittest.makeSuite(class_,\n prefix=autoprefix)\n testSuite.addTest(suite_)\n # Tests on query usage.\n testSuite.addTest(common.unittest.makeSuite(ScalarTableUsageTestCase))\n testSuite.addTest(common.unittest.makeSuite(MDTableUsageTestCase))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage1))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage2))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage3))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage4))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage5))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage6))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage7))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage8))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage9))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage10))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage11))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage12))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage13))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage14))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage15))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage16))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage17))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage18))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage19))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage20))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage21))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage22))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage23))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage24))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage25))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage26))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage27))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage28))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage29))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage30))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage31))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage32))\n\n return testSuite", "def getTestSuite():\n\n suite1 = unittest.TestLoader().loadTestsFromTestCase(TestDataProcs)\n return unittest.TestSuite([suite1,suite2])", "def _prepare_test_cases(ptfhost, request):\n logger.info(\"Preparing SAI test environment.\")\n _create_sai_test_folders(ptfhost)\n _copy_sai_test_cases(ptfhost, request)", "def run_transcript_tests(self, callargs):\n class TestMyAppCase(Cmd2TestCase):\n cmdapp = self\n\n self.__class__.testfiles = callargs\n sys.argv = [sys.argv[0]] # the --test argument upsets unittest.main()\n testcase = TestMyAppCase()\n runner = unittest.TextTestRunner()\n runner.run(testcase)", "def run_tests(virtual_env):\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n args = [\n 'python',\n 'setup.py',\n 'nosetests',\n '--with-coverage',\n '--with-xunit',\n ]\n subprocess.call(args, cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n for plugin_dir in open(join(HOLLAND_ROOT, 'plugins', 'ACTIVE')):\n plugin_dir = plugin_dir.rstrip()\n plugin_path = join(HOLLAND_ROOT, 'plugins', plugin_dir)\n subprocess.call(args, cwd=plugin_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n for addon_dir in open(join(HOLLAND_ROOT, 'addons', 'ACTIVE')):\n addon_dir = addon_dir.rstrip()\n addon_path = join(HOLLAND_ROOT, 'addons', addon_dir)\n subprocess.call(args, cwd=addon_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n #return subprocess.call(args, env=virtual_env)", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_h5fs(\"test_mode\"))\n testSuite.addTest(test_h5fs(\"test_path_splitting\"))\n testSuite.addTest(test_h5fs(\"test_link_mixing\"))\n return testSuite", "def main(argv):\n global g_test_root_dir\n global g_temp_filename\n\n if len(argv) < 2:\n print(\"invoke this script as python collectUnitTestRunTime.py 10 'python run.py_path/run.py --wipe \"\n \"--test dir_to_test/test1,python run.py_path/run.py --wipe --test dir_to_test2/test2,...' True\\n\")\n sys.exit(1)\n else: # we may be in business\n repeat_number = int(argv[1]) # number of times to run a unit test\n command_lists = argv[2] # list of unit tests to run\n\n for command in command_lists.split(','): # for each command in the list\n # run command repeat_number of times and collect results into result_dict\n run_commands(command, repeat_number, g_temp_filename)", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def start_test_exec(cls):\n time_str = cls.get_current_time()\n os.system(\"robot -l ./logs/log_{0}.html -r ./logs/report_{0}.html -o ./logs/output_{0}.xml \\\n ./test_suite/{1}\".format(time_str, test_suite))", "def run_test(self):\n raise NotImplementedError", "def main(args):\n\n run_all_tests = len(args) == 1 and args[0] == 'all'\n\n python = 'python -m ' # how to call python \n if len(sys.argv) > 1 and sys.argv[1] == 'wine':\n python = 'C:\\\\Python26\\\\python.exe ' # works for wine\n \n data_path = ' ' + prepare_data(run_all_tests)\n \n command = ' bbob_pproc ' # + join_path(os.path.dirname(os.path.realpath(__file__)), 'rungeneric.py ')\n \n copy_latex_templates()\n print('LaTeX templates copied.')\n \n print('*** testing module bbob_pproc ***')\n t0 = time.time()\n print(python + command + '--conv' + ' --no-svg --settings=grayscale' +\n join_path(data_path, 'BFGS_ros_noiseless.tgz'))\n result = os.system(python + command + '--conv' + ' --no-svg --settings=grayscale' +\n join_path(data_path, 'BFGS_ros_noiseless.tgz'))\n print('** subtest 1 finished in ', time.time() - t0, ' seconds')\n assert result == 0, 'Test failed: rungeneric on one algorithm with option --conv.'\n\n result = run_latex_template(\"templateBBOBarticle.tex\")\n assert not result, 'Test failed: error while generating pdf from templateBBOBarticle.tex.'\n\n t0 = time.time()\n print(python + command + '--no-svg --settings=grayscale' + join_path(data_path, 'RS.tgz'))\n result = os.system(python + command + '--no-svg --settings=grayscale' + join_path(data_path, 'RS.tgz'))\n print('** subtest 1 finished in ', time.time() - t0, ' seconds')\n assert result == 0, 'Test failed: rungeneric on one bi-objective algorithm.'\n\n # Latex templates are not prepared yet for bi-objective case. \n# result = run_latex_template(\"templateBBOBarticle.tex\")\n# assert not result, 'Test failed: error while generating pdf from templateBBOBarticle.tex.'\n\n if run_all_tests: \n t0 = time.time()\n print(time.asctime())\n result = os.system(python + command + # ' --omit-single ' +\n join_path(data_path, 'BIPOP-CMA-ES_hansen_noiseless.tgz') +\n join_path(data_path, 'MCS_huyer_noiseless.tgz') +\n join_path(data_path, 'NEWUOA_ros_noiseless.tgz') +\n join_path(data_path, 'RANDOMSEARCH_auger_noiseless.tgz') +\n join_path(data_path, 'BFGS_ros_noiseless.tgz'))\n print('** subtest 2 finished in ', time.time() - t0, ' seconds')\n assert result == 0, 'Test failed: rungeneric on many algorithms.'\n \n result = run_latex_template(\"templateBBOBmany.tex\")\n assert not result, 'Test failed: error while generating pdf from templateBBOBmany.tex.'\n \n t0 = time.time()\n result = os.system(python + command + '--conv' +\n join_path(data_path, 'SMAC-BBOB_hutter_noiseless.tgz') +\n join_path(data_path, 'lmm-CMA-ES_auger_noiseless.tgz'))\n print('** subtest 3 finished in ', time.time() - t0, ' seconds')\n assert result == 0, 'Test failed: rungeneric on two algorithms with option --conv.'\n \n result = run_latex_template(\"templateBBOBcmp.tex\")\n assert not result, 'Test failed: error while generating pdf from templateBBOBcmp.tex.'\n \n t0 = time.time()\n result = os.system(python + command + ' --omit-single ' +\n join_path(data_path, 'DE-PSO_garcia-nieto_noiseless.tgz') +\n join_path(data_path, 'VNS_garcia-martinez_noiseless.tgz'))\n print('** subtest 4 finished in ', time.time() - t0, ' seconds')\n assert result == 0, 'Test failed: rungeneric on two algorithms with option --omit-single.'\n \n result = run_latex_template(\"templateBBOBcmp.tex\")\n assert not result, 'Test failed: error while generating pdf from templateBBOBcmp.tex.'\n \n t0 = time.time()\n result = os.system(python + command + ' --expensive ' +\n join_path(data_path, 'VNS_garcia-martinez_noiseless.tgz'))\n print('** subtest 5 finished in ', time.time() - t0, ' seconds')\n assert result == 0, 'Test failed: rungeneric on one algorithm with option --expensive.'\n \n result = run_latex_template(\"templateBBOBarticle.tex\")\n assert not result, 'Test failed: error while generating pdf from templateBBOBarticle.tex.'\n\n print('launching doctest (it might be necessary to close a few pop up windows to finish)')\n t0 = time.time()\n\n if 1 < 3:\n failure_count = 0\n test_count = 0\n #doctest.testmod(report=True, verbose=True) # this is quite cool!\n # go through the py files in the bbob_pproc folder\n currentPath = os.getcwd() \n newPath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n os.chdir(newPath) \n for root, dirnames, filenames in os.walk(os.path.dirname(os.path.realpath(__file__))):\n for filename in fnmatch.filter(filenames, '*.py'):\n current_failure_count, current_test_count = doctest.testfile(\n os.path.join(root, filename), report=True, module_relative=False) \n failure_count += current_failure_count\n test_count += current_test_count\n if current_failure_count:\n print('doctest file \"%s\" failed' % os.path.join(root, filename))\n os.chdir(currentPath)\n else:\n stdout = sys.stdout\n fn = '_bbob_pproc_doctest_.txt'\n try:\n with open(fn, 'w') as f:\n sys.stdout = f\n doctest.testmod(report=True)\n finally:\n sys.stdout = stdout\n process_doctest_output(fn)\n print('** doctest finished in ', time.time() - t0, ' seconds')\n # print(' more info in file _bbob_pproc_doctest_.txt)')\n print('*** done testing module bbob_pproc ***')\n \n if (failure_count > 0):\n raise ValueError('%d of %d tests failed' % (failure_count, test_count))", "def runTest(self):\r\n self.setUp()\r\n self.test_SegmentEditor1()", "def add_test(self,test):\n l = test.id.split('.')\n s_obj = self\n while len(l) > 0:\n s_name = l.pop(0)\n if len(l) > 0:\n if s_name in s_obj.suites:\n s_obj = s_obj.suites[s_name]\n else:\n new_suite = Suite(s_name,parent=s_obj)\n s_obj.suites[s_name] = new_suite\n s_obj = new_suite\n s_obj.tests.append(test)", "def runTest(self):\n self.setUp()\n self.test_SegmentEditor1()", "def test_launch_composition(self):\n pass", "def test_ducts_with_subprocess(self):\n assert_that(SUBPROCESS_TEST_SCRIPT).exists()\n proc = None\n parent = None\n try:\n parent = MessageDuctParent.psuedo_anonymous_parent_duct()\n parent.bind()\n proc = subprocess.Popen(\n [sys.executable, SUBPROCESS_TEST_SCRIPT, parent.listener_address], env={'PYTHONPATH': ROOT_DIR}\n )\n assert_that(parent.listen()).is_true()\n for _ in range(100):\n parent.send(\"pingpong\")\n parent.poll(1)\n assert_that(parent.recv()).is_equal_to(\"pingpong\")\n parent.send(None)\n time.sleep(1)\n finally:\n if parent:\n parent.close()\n if proc:\n proc.terminate()", "def main():\n\n run_manual_session()\n # run_automated_session()", "def django_run_tests_to_scratch_buffer():\r\n \r\n app = wingapi.gApplication\r\n cmdline, dirname, err = _get_base_cmdline()\r\n if err is not None:\r\n title = _(\"Failed to run Django unit tests\")\r\n msg = _(\"Could not run Django : %s\") % err\r\n app.ShowMessageDialog(title, msg)\r\n return\r\n cmdline += ['test']\r\n editor = app.ScratchEditor(_(\"Django Unit Tests\"), 'text/plain')\r\n doc = editor.GetDocument()\r\n doc.SetText(_('Starting Django Unit Tests at %s:\\n') % time.ctime())\r\n\r\n handler = app.AsyncExecuteCommandLine(cmdline[0], dirname, *cmdline[1:])\r\n timeout = time.time() + 120\r\n def poll(timeout=timeout):\r\n kill = time.time() > timeout\r\n if kill or handler.Iterate():\r\n stdout, stderr, err, status = handler.Terminate(kill)\r\n if kill:\r\n msg = _(\"Could not run Django unit tests: Sub-process timed out\")\r\n elif err is not None:\r\n msg = _(\"Could not run Django unit tests: Sub-process failed with exit_status=%s, errno=%s\") % (str(status), str(err))\r\n else:\r\n msg = _(\"Django unit tests passed successfully\")\r\n if stderr:\r\n msg += '\\n\\nSTDERR:\\n\\n' + stderr\r\n msg += '\\n\\n' + _kMissingPythonMessage\r\n if stdout:\r\n msg += '\\n\\nSTDOUT:\\n\\n' + stdout\r\n if msg == _(\"Django unit tests passed successfully\"):\r\n title = msg\r\n msg = _(\"Django unit tests passed successfully with no output\")\r\n app.ShowMessageDialog(title, msg)\r\n else:\r\n editor = app.ScratchEditor(_(\"Django Unit Tests\"), 'text/plain')\r\n doc = editor.GetDocument()\r\n doc.InsertChars(doc.GetLength(), msg)\r\n return False\r\n else:\r\n editor = app.ScratchEditor(_(\"Django Unit Tests\"), 'text/plain')\r\n doc = editor.GetDocument()\r\n doc.InsertChars(doc.GetLength(), ''.join(handler.stderr))\r\n handler.stderr = []\r\n return True\r\n \r\n wingapi.gApplication.InstallTimeout(100, poll)", "def getTestSuite():\n test_suite = unittest.TestSuite([])\n\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistReaders))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPySnpTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistributedBed))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFileCache))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestUtilTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestIntRangeSet))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKrDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpGen))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGenerate))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestExampleFile))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstMemMap))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpMemMap))\n test_suite.addTests(NaNCNCTestCases.factory_iterator())\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstReader))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKernelReader))\n\n return test_suite", "def runTest(self):\n self.setUp()\n self.test_MarkupsInViewsSelfTest1()", "def setUp(self):\n self.shell = DummyShell()\n self.executor = executor.Executor(self.shell)", "def tests():", "def testsuite():\n \n tests = unittest.TestSuite()\n\n parse_tests = unittest.makeSuite(ParseTestCase, 'test')\n tests = unittest.TestSuite( (tests, parse_tests) )\n\n return tests", "def test_jam_attempt(self):\n self.run_test_suites(self.jam_test_suite_list)", "def tearDownClass(cls):\n for stack in cls.experiment:\n stack.restore_directory()", "def testRunSmoke(self):\n stage = self.ConstructStage()\n with self.OutputCapturer():\n stage.Run()", "def setUpModule():\n global primary_ecu_key\n global key_timeserver_pub\n global key_timeserver_pri\n global clock\n\n \n\n # Load the private key for this Primary ECU.\n key_pub = demo.import_public_key('primary')\n key_pri = demo.import_private_key('primary')\n primary_ecu_key = uptane.common.canonical_key_from_pub_and_pri(\n key_pub, key_pri)\n\n # Load the public timeserver key.\n key_timeserver_pub = demo.import_public_key('timeserver')\n key_timeserver_pri = demo.import_private_key('timeserver')\n\n # Generate a trusted initial time for the Primary.\n clock = tuf.formats.unix_timestamp_to_datetime(int(time.time()))\n clock = clock.isoformat() + 'Z'\n tuf.formats.ISO8601_DATETIME_SCHEMA.check_match(clock)\n\n # Currently in development.\n\n # Start the timeserver, director, and oem repo for this test,\n # using subprocesses, and saving those processes as:\n #process_timeserver\n #process_director\n #process_oemrepo\n # to be stopped in tearDownModule below.", "def test_functionality(self):\n self.templateName = \"Test Template\"\n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()", "def execute_testsets(testsets):\n group_results = dict() #results, by group\n group_failure_counts = dict()\n total_failures = 0\n myinteractive = False\n\n for testset in testsets:\n mytests = testset.tests\n myconfig = testset.config\n mybenchmarks = testset.benchmarks\n\n #Make sure we actually have tests to execute\n if not mytests and not mybenchmarks:\n # no tests in this test set, probably just imports.. skip to next test set\n break\n\n myinteractive = True if myinteractive or myconfig.interactive else False\n\n #Run tests, collecting statistics as needed\n for test in mytests:\n #Initialize the dictionaries to store test fail counts and results\n if test.group not in group_results:\n group_results[test.group] = list()\n group_failure_counts[test.group] = 0\n\n result = run_test(test, test_config = myconfig)\n result.body = None # Remove the body, save some memory!\n\n if not result.passed: #Print failure, increase failure counts for that test group\n logging.error('Test Failed: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group+\" HTTP Status Code: \"+str(result.response_code))\n\n if test.validators is not None:\n for validator in test.validators:\n if validator.passed == False:\n logging.warning(\" Validation Failed: \" + str(validator))\n\n #Increment test failure counts for that group (adding an entry if not present)\n failures = group_failure_counts[test.group]\n failures = failures + 1\n group_failure_counts[test.group] = failures\n\n else: #Test passed, print results\n logging.info('Test Succeeded: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group)\n\n #Add results for this test group to the resultset\n group_results[test.group].append(result)\n\n # handle stop_on_failure flag\n if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:\n print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'\n break\n\n for benchmark in mybenchmarks: # Run benchmarks, analyze, write\n if not benchmark.metrics:\n logging.debug('Skipping benchmark, no metrics to collect')\n continue\n\n logging.info(\"Benchmark Starting: \"+benchmark.name+\" Group: \"+benchmark.group)\n curl = configure_curl(benchmark, myconfig)\n benchmark_result = run_benchmark(curl, benchmark, myconfig)\n print benchmark_result\n logging.info(\"Benchmark Done: \"+benchmark.name+\" Group: \"+benchmark.group)\n\n if benchmark.output_file: # Write file\n write_method = OUTPUT_METHODS[benchmark.output_format]\n my_file = open(benchmark.output_file, 'w') # Overwrites file\n logging.debug(\"Benchmark writing to file: \" + benchmark.output_file)\n write_method(my_file, benchmark_result, benchmark, test_config = myconfig)\n my_file.close()\n\n if myinteractive:\n # a break for when interactive bits are complete, before summary data\n print \"===================================\"\n\n #Print summary results\n for group in sorted(group_results.keys()):\n test_count = len(group_results[group])\n failures = group_failure_counts[group]\n total_failures = total_failures + failures\n if (failures > 0):\n print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n else:\n print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n\n return total_failures" ]
[ "0.67804116", "0.63147813", "0.62815255", "0.62448347", "0.6162691", "0.611946", "0.61153394", "0.60913515", "0.60768825", "0.6026399", "0.59964746", "0.59423214", "0.59258866", "0.5913008", "0.59060097", "0.59013414", "0.5897372", "0.5894514", "0.5878061", "0.58522886", "0.5847071", "0.58143854", "0.5766134", "0.57527477", "0.5734746", "0.5729053", "0.5706236", "0.5696112", "0.56879985", "0.56817776", "0.56783485", "0.5671613", "0.5651149", "0.5644916", "0.56365854", "0.5625074", "0.56249744", "0.5623856", "0.56182367", "0.56151706", "0.5614454", "0.56111544", "0.560754", "0.56037253", "0.5602394", "0.56004137", "0.5599987", "0.5593795", "0.5577717", "0.5576752", "0.55763733", "0.55684423", "0.55682874", "0.55598205", "0.55583864", "0.5551201", "0.5547507", "0.55468124", "0.5545386", "0.5538934", "0.55386746", "0.55376047", "0.5533091", "0.5532945", "0.5532945", "0.5522785", "0.5520489", "0.5518876", "0.5517144", "0.55066144", "0.5501", "0.5498669", "0.5491816", "0.54868853", "0.54866415", "0.54834247", "0.54802966", "0.54761505", "0.5475142", "0.54711664", "0.5469856", "0.546984", "0.5467723", "0.5464876", "0.5461055", "0.5459068", "0.54564285", "0.54522294", "0.5449176", "0.5447767", "0.5445996", "0.54367006", "0.5433599", "0.542902", "0.5420924", "0.54181314", "0.54169136", "0.54164606", "0.54125863", "0.541187" ]
0.60532445
9
TEST THE USER ADD SUCCESS
def test_create_valid_user_success(self): payload = { "email": "test@gmail.com", "name": "Test", 'password': 'test123' } res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) user = get_user_model().objects.get(**res.data) self.assertTrue(user.check_password(payload['password'])) self.assertNotIn('password', res.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_user(self):\n pass", "def testAdd1(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userA\", \"password\"))", "def test_addUser(self):\n self.new_user.saveUser()\n self.assertEqual(len(User.users_list),1)", "def test_main_add_user(self):\n with self.client:\n response = self.client.post(\n '/',\n data=dict(username='michael', email='michael@sonotreal.com'),\n follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'<h1>All Users</h1>', response.data)\n self.assertNotIn(b'<p>No users!</p>', response.data)\n self.assertIn(b'michael', response.data)", "def test_resource_user_resource_add_user_post(self):\n pass", "def test_add_user(self):\n with self.client:\n auth_header = login_test_user(self.client)\n response = self.client.post('/users', \n data=json.dumps(dict(\n username=\"neil\",\n email=\"neilb14@mailinator.com\",\n password=\"password123\"\n )),\n content_type='application/json',\n headers=auth_header\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertIn('neilb14@mailinator.com was added!', data['message'])\n self.assertIn('success', data['status'])", "def test_000_add_user(self):\n # This case is always passed because it's tested in setup_module,\n # If setup module fails, this case will never run\n pass", "def testAdd2(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userC\", \"password\"))\n self.assertEquals(models.SUCCESS, self.users.add(\"userD\", \"password\"))", "def test_resource_user_resource_add_users_post(self):\n pass", "def test_add_new_user(self):\n\n result = self.client.post(\"/add_new_user\",\n data={\"user_name\": \"rachel\", \"password\": \"123\", \"email\": \"rachel@rachel.com\"},\n follow_redirects=True)\n self.assertIn(\"<p>Please sign in</p>\", result.data)", "def testAddExists(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userB\", \"password\"))\n self.assertEquals(models.ERR_USER_EXISTS, self.users.add(\"userB\", \"password\"))", "def test_add_user(self):\n\n with self.client:\n result = self.client.post('/users', data={\n \"user-name\": \"cool-guy-johnny-B\",\n \"first-name\": \"johnny\",\n \"last-name\": \"bravo\",\n \"image-url\": \"https://static.independent.co.uk/s3fs-public/thumbnails/image/2018/06/25/14/cat-lizard.jpg\"\n }, follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b'<h5 class=\"card-title\">cool-guy-johnny-B</h5>',\n result.data)", "def test_user_add(self):\n\n result = self.client.post(\"/login\", data={\"user_email\": \"bobbybob@gmail.com\", \"user_password\": \"1234\"},\n follow_redirects=True)\n self.assertIn(b\"Bobby\", result.data)", "def test_add_user(self, api):\n resp = api.add_user(api.get_user())\n self.builder.delete_user(api.get_user())\n assert resp.status_code == 201", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_011_add_same_user(self):\n testflow.step(ADD_USR_MSG, TEST_USER1)\n assert not USER_CLI.run('add', TEST_USER1)[0]", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_add_form(self):\n\n with self.client:\n result = self.client.get('/users/new')\n self.assertEqual(result.status_code, 200)\n self.assertIn(b'<h1 class=\"display-5 mt-4\">Create a user</h1>', result.data)", "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.user_list), 1)", "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.user_list), 1)", "def test_adduser(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.assertTrue(self.run_function(\"group.adduser\", [self._group, self._user]))\n group_info = self.run_function(\"group.info\", [self._group])\n self.assertIn(self._user, str(group_info[\"members\"]))\n # try add a non existing user\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._group, self._no_user])\n )\n # try add a user to non existing group\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._no_group, self._user])\n )\n # try add a non existing user to a non existing group\n self.assertFalse(\n self.run_function(\"group.adduser\", [self._no_group, self._no_user])\n )", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_success_register():\n assert not register(\"abc123\", \"qwerty123456\", \"Bob\", \"John\", \"abc@def.com\")\n\n # Check that user data was updated and that the user is logged in\n new_user = data.users.get(\"abc123\")\n assert new_user\n assert new_user.logged_in == True", "def test_users_post(self):\n pass", "def test_users_post(self):\n pass", "def test_user_registration(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"stephenochieng955@mail.com\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(1,result,\"User registration successful\")", "def test_check_user(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n test_user.check_user(\"test\", \"walIas15\")", "def test_add_user_to_g(self):\r\n\r\n with app.test_request_context():\r\n u1 = User.query.filter_by(username='testuser').one()\r\n\r\n add_user_to_g()\r\n self.assertIsNone(g.user)\r\n do_login(u1)\r\n add_user_to_g()\r\n self.assertEqual(g.user, u1)", "def test_save_users(self):\n\n self.new_users.save_users() # saving the new user\n self.assertEqual(len(User.user_list), 1)", "def test_create_user(self):\n self.login()\n res = self.submit()\n\n assert res.status_code == 200", "def test_new_user(self):\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n data = json.loads(resp.data)\n for key in ['first_name', 'last_name', 'userid', 'groups']:\n assert key in data\n assert data['first_name'] == self.test_user1_first\n assert data['last_name'] == self.test_user1_last\n assert data['userid'] == self.test_user1_userid\n for groupid in self.test_user1_groups:\n assert groupid in data['groups']", "def test_add_duplicate(self, api):\n self.builder.add_user(api.get_user())\n resp = api.add_user(api.get_user())\n self.builder.del_user(api.get_user())\n assert resp.status_code == 304", "def test_register_user_correct(self):\n result = self.client.post(\"/users\", data={\"username\":\"test_user2\"}, follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Player created! Please login\", result.data)", "def test_createuser():\n url = baseUrl + userurl\n payload = user_payload\n logging.info(\"Create a user: %s\" % payload)\n r = requests.post(url, data=json.dumps(payload), headers=header)\n assert r.status_code == 201\n resp = r.text\n assert resp == 'Success'", "def test_api_can_create_users(self):\n res = self.client().post('/api/v1/user/', data = self.req)\n self.assertEquals(res.status_code, 200)\n self.assertIn('mary', str(res.data))", "def test_api_user_post(self):\n pass", "async def test_add_user(\n hass: HomeAssistant, provider, capsys, hass_storage: dict[str, Any]\n) -> None:\n data = provider.data\n await script_auth.add_user(\n hass, provider, Mock(username=\"paulus\", password=\"test-pass\")\n )\n\n assert len(hass_storage[hass_auth.STORAGE_KEY][\"data\"][\"users\"]) == 1\n\n captured = capsys.readouterr()\n assert captured.out == \"Auth created\\n\"\n\n assert len(data.users) == 1\n data.validate_login(\"paulus\", \"test-pass\")", "def test_teams_add_user_to_team_v2(self):\n pass", "def test_add_user_view(self):\n target_url = url_for('users.add_user')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def test_append_user(self):\n print('(' + self.test_append_user.__name__+')',\n self.test_append_user.__doc__)\n new_username = self.connection.append_user(\n NEW_PATIENT_USERNAME, NEW_PATIENT)\n # test appended ok\n self.assertIsNotNone(new_username)\n # check appended the same user data\n self.assertEqual(new_username, NEW_PATIENT_USERNAME)\n # check the added user in db has the same data\n get_new_patient = self.connection.get_user(new_username)\n self.assertDictContainsSubset(\n NEW_PATIENT['restricted_profile'], get_new_patient['restricted_profile'])\n self.assertDictContainsSubset(\n NEW_PATIENT['public_profile'], get_new_patient['public_profile'])", "def test_create_user(self):\n #open the django admin page.\n self.selenium.get(\n '%s%s' % (self.live_server_url, \"/admin\")\n )\n\n #fill in login information of admin\n username = self.selenium.find_element_by_id(\"id_username\")\n username.send_keys(\"admin\")\n password = self.selenium.find_element_by_id(\"id_password\")\n password.send_keys(\"admin\")\n\n #locate login button and click it.\n self.selenium.find_element_by_xpath('//input[@value=\"Inloggen\"]').click()\n self.selenium.get(\n '%s%s' % (self.live_server_url, \"/admin/auth/user/add/\")\n )\n\n # Fill the create user form with username and password\n self.selenium.find_element_by_id(\"id_username\").send_keys(\"test\")\n self.selenium.find_element_by_id(\"id_password1\").send_keys(\"test1234\")\n self.selenium.find_element_by_id(\"id_password2\").send_keys(\"test1234\")\n\n # Forms can be submitted directly by calling its method submit\n self.selenium.find_element_by_id(\"user_form\").submit()\n self.assertIn(\"Change user\", self.selenium.title)", "def test_create_user_page(self):\n\n # Get the admin url and send a GET request\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n # Assertions\n self.assertEqual(res.status_code, 200)", "def test_user(self):\n return True", "def test_add_basic(self, db_session: Session) -> None:\n user_service = get_user_service(db_session)\n profile = RandomDbAdder().random_profile(db_session)\n basic_user_dict = InputDictGenerator().random_basic_user(profile.name)\n\n response = user_service.add_user(**basic_user_dict)\n\n assert response == {\n \"status\": \"success\",\n \"code\": 200,\n \"data\": {\"message\": f\"User '{basic_user_dict['username']}' successfully added to database.\"},\n }\n actual_user = db_session.query(Users).filter_by(username=basic_user_dict[\"username\"]).one()\n assert actual_user.id.startswith(\"us-\")\n assert actual_user.auth_type == AuthType.basic\n assert actual_user.role == \"user\"\n assert actual_user.username == basic_user_dict[\"username\"]\n assert actual_user.password_hash != basic_user_dict[\"password\"]\n assert actual_user.email is None\n assert actual_user.identity_provider_id is None\n assert actual_user.profile_id == profile.id\n assert actual_user.budget is None\n assert actual_user.name is None", "def test_teams_add_user_to_team_v1(self):\n pass", "def testLogin(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userG\", \"password\"))\n self.assertTrue(self.users.login(\"userG\", \"password\"))", "def test_user_register_adds_a_user_to_list_of_users(self):\n\n user_randint = randint(0, 1000)\n test_user = {'username': 'TESTUSERNAME%d' % user_randint,\n 'name': 'TESTNAME',\n 'email': 'TESTEMAIL%d@TESTHOST.com' % user_randint,\n 'location': 'TESTLOCATION',\n 'position': 'TESTPOSITION',\n 'password': 'Testpass1'}\n\n # Navigate to registration form\n self.browser.get(self.warno_url)\n self.browser.find_element_by_link_text(\"Sign In\").click()\n self.browser.find_element_by_link_text(\"New here? Register.\").click()\n contents = self.browser.find_element_by_class_name(\"sub-title\")\n self.assertTrue(\"Register\" in contents.text, \"Redirected page's subtitle did not contain 'Register'\")\n\n # Register as a new user with test information\n self.browser.find_element_by_id(\"username\").send_keys(test_user[\"username\"])\n self.browser.find_element_by_id(\"email\").send_keys(test_user[\"email\"])\n self.browser.find_element_by_id(\"name\").send_keys(test_user[\"name\"])\n self.browser.find_element_by_id(\"position\").send_keys(test_user[\"position\"])\n self.browser.find_element_by_id(\"location\").send_keys(test_user[\"location\"])\n self.browser.find_element_by_id(\"password\").send_keys(test_user[\"password\"])\n final_element = self.browser.find_element_by_id(\"retype_password\")\n final_element.send_keys(test_user[\"password\"])\n final_element.submit()\n\n # Navigate to user page, confirm new user is in list\n self.browser.find_element_by_link_text('Users').click()\n contents = self.browser.find_element_by_class_name('sub-title')\n self.assertTrue('User' in self.browser.title, 'Redirected page did not have \"Users\" in subtitle')\n\n user_table = self.browser.find_element_by_id(\"user-table\")\n print user_table.text\n self.assertTrue(test_user[\"username\"] in user_table.text,\n \"List of users does not contain the new username '%s'\" % test_user[\"username\"])", "def test_create(self):\n userValue = {'name': 'User Test 1',\n 'login': 'usertest1',\n 'user_profile_id': self.user_profile2.id,\n }\n Users = self.env['res.users']\n user_test = Users.create(userValue)\n newUser = self.env['res.users'].browse(user_test.id)\n self.assertEqual(userValue['name'], newUser['name'])", "def test_add_user_existing(self):\n project = fake_clients.FakeProject(name=\"parent_project\")\n\n user = fake_clients.FakeUser(\n name=\"test@example.com\", password=\"123\", email=\"test@example.com\"\n )\n\n setup_identity_cache(projects=[project], users=[user])\n\n url = \"/v1/actions/InviteUser\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"test@example.com\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n data = {\n \"email\": \"test@example.com\",\n \"roles\": [\"member\"],\n \"project_id\": project.id,\n }\n response = self.client.post(url, data, format=\"json\", headers=headers)\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_register_user(self):\n response = self.signup_a_user(self.user_data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['message'],\n \"User successfully created. Check email for \"\n \"verification link\")", "def add(self, user: U) -> None:\n ...", "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.UserDetails), 1)", "def test_save_login(self):\n self.new_user.save_login()\n self.assertEqual(len(User.users_list),1)", "def test_register_user(self):\n\n new_user = self.register_user()\n\n self.assertEqual(new_user.status_code, 201)", "def test_adds_user(self):\n admin = IdentityAdmin()\n admin.factory = self.adminFactory\n\n serverTransport = makeFakeServer(admin)\n serverTransport.getQ2QHost = lambda: Q2QAddress('Q2Q Host')\n\n client = AMP()\n pump = connect(admin, serverTransport, client, makeFakeClient(client))\n\n d = client.callRemote(AddUser, name='q2q username',\n password='q2q password')\n pump.flush()\n\n # The username and password are added, along with the domain=q2q\n # host, to the IdentityAdmin's factory's store\n self.assertEqual([call('Q2Q Host', 'q2q username', 'q2q password')],\n self.addUser.calls)\n\n # The server responds with {}\n self.assertEqual({}, self.successResultOf(d))", "def test_020_add_user_to_group(self):\n testflow.step(\"Adding user %s to group %s\", TEST_USER1, TEST_GROUP1)\n assert MANAGE_CLI.run(\n 'useradd',\n TEST_GROUP1,\n user=TEST_USER1\n )[0], \"Failed to add user to group '%s'\" % TEST_GROUP1\n\n testflow.step(\"Adding nonexisting user to group %s\", TEST_GROUP1)\n assert not MANAGE_CLI.run(\n 'useradd',\n TEST_GROUP1,\n user='nonsense'\n )[0], \"Possible to add nonexisting user to group\"\n\n testflow.step(\"Adding user %s to nonexisting group\", TEST_USER2)\n assert not MANAGE_CLI.run(\n 'useradd',\n 'nonsense',\n user=TEST_USER2\n )[0], \"Possible to add user to nonexisting group\"", "def test_admin_calendar_user_admin_add(self):\n response = self.client.get(\"/admin/auth/calendaruser/\")\n self.assertEqual(response.status_code, 200)", "def test_append_existing_user(self):\n print('(' + self.test_append_existing_user.__name__+')',\n self.test_append_existing_user.__doc__)\n self.assertIsNone(self.connection.append_user(\n PATIENT_USERNAME, NEW_PATIENT))", "def test_api_user_put(self):\n pass", "def test_create_user(self):\n \n new_user = {\"username\": \"beny1976\", \"vocab_count\": 0, \"name\": \"beny rood\", \"sex\": \"male\", \"dob\": \"18/10/1979\"}\n msg = app.create_user(predefined_user=new_user)\n self.assertTrue(msg != \"\")", "def testAddEmptyPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userE\", \"\"))", "def test_users_add(mocker):\r\n mocker.patch('subprocess.call')\r\n users.add(user_dict)\r\n subprocess.call.assert_called_with([\r\n 'useradd',\r\n '-p',\r\n password,\r\n '-G',\r\n 'wheel,dev',\r\n 'kevin',\r\n ])", "def test_post_users_post(self):\n pass", "def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])", "def test_create_valied_user_success(self):\n # requirments for creating user\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'abcd1234',\n 'name': 'Test name'\n }\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Check if statuscode returns a HTTP201 exception when created\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Test that the user is actually created\n # response.data is a dic responce like our payload\n # but with an additional id field\n user = get_user_model().objects.get(**response.data)\n # this will assert that the password is true\n self.assertTrue(user.check_password(payload['password']))\n # Ensure that password is not returned in the request\n # because it is a potential security voulnarability\n self.assertNotIn('password', response.data)", "def test_add_new_user_to_db(self):\n\n test_user = 'test_first_user'\n test_password = 'liamNees0n_T4k3n'\n user_object = User(username=test_user, password=test_password)\n db.session.add(user_object)\n db.session.commit()\n self.assertEqual(user_object.username, 'test_first_user')", "def test_main_with_users(self):\n add_user('michael', 'michael@mherman.org')\n add_user('fletcher', 'fletcher@notreal.com')\n with self.client:\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'<h1>All Users</h1>', response.data)\n self.assertNotIn(b'<p>No users!</p>', response.data)\n self.assertIn(b'michael', response.data)\n self.assertIn(b'fletcher', response.data)", "def test_users_can_signup(self):\n for value in self.app.users.values():\n result = self.app.create_user()\n stored_password = value['password']\n expected = {0: {\n 'email': 'demo@email.com', 'username': 'admin', 'password': stored_password\n }}\n self.assertEqual(expected, result)", "def test_add_many_users(self):\n self.new_user.saveUser()\n another_user = User(\"salim\",\"java\")\n another_user.saveUser()\n self.assertEqual(len(User.users_list),2)", "def test_user_auth(self):\n self.new_user.save_login()\n test_user=User(\"trinity\",\"trinity@gmail.com\",\"123\")\n test_user.save_login()\n self.assertTrue(self.new_user.users_auth(\"trinity\",\"123\"))", "def test_save(self):\n user, usrmgr_mock = self.__get_test_instance(\n \"@foouser\", 1337, group=\"foogroup\")\n usrmgr_mock.return_value.add_user.return_value = True\n self.assertTrue(user.save())\n self.assertTrue(usrmgr_mock.return_value.add_user.called)", "def test_empty_user_name_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"stephenochieng955@mail.com\",\"stephenochieng\",\"eat\")\r\n self.assertEqual(2,result,\"Fill in the username field please\")", "def test_manage_user(self):\r\n # First with a new user\r\n user_data = dict(id='1', name='google',\r\n email='g@g.com')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['name'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.google_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['name'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.google_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id='10', name=self.name,\r\n email=self.email_addr)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"User should be the same\"\r\n print user.google_user_id\r\n assert user.google_user_id == '10', err_msg", "def test_create(self, client):\n count = get_user_model().objects.count()\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 302\n\n user = get_user_model().objects.last()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()\n assert get_user_model().objects.count() == count + 1", "def test_user_creation(self):\n self.assertTrue(User.objects.exists())", "def test_handle_add(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n test_user.github_username = \"githubuser\"\n team = Team(\"BRS\", \"brs\", \"web\")\n team.github_team_id = \"githubid\"\n add_user = User(\"anotheruser\")\n add_user.github_username = \"myuser\"\n add_user.github_id = \"otherID\"\n self.db.retrieve.side_effect = [test_user, add_user]\n self.db.query.return_value = [team]\n with self.app.app_context():\n resp, code = self.testcommand.handle(\"team add brs ID\", user)\n team_attach = team.get_attachment()\n expect = {'attachments': [team_attach],\n 'text': 'Added User to brs'}\n self.assertDictEqual(resp, expect)\n self.assertEqual(code, 200)\n self.db.store.assert_called_with(team)\n assert team.has_member(\"otherID\")\n self.gh.add_team_member.assert_called_once_with(\"myuser\", \"githubid\")", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def test_existing_user_registration(self):\n register_user(self, 'Some', 'Name', 'another@gmail.com', 'aaaAAA111')\n with self.client:\n response = register_user(\n self, 'Dalin', 'Oluoch', 'another@gmail.com', 'aaaAAA111')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(\n data['message'] ==\n \"Sorry, email 'another@gmail.com' already exists.\")\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 400)", "def test_new_user_is_added(db_session):\n new_user = User(username=\"test\", password=\"test\")\n db_session.add(new_user)\n query = db_session.query(User).all()\n assert len(query) == 1", "def test_admin_create_user(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Summer Love has been registered')\n self.assertEqual(resp.status_code, 201)", "def test_user_exists(self):\n payload = {'email': 'test@test1.com','password': 'testpass'}\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_register(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(User.objects.get().email, 'testuser@gmail.com')", "def testAddNoneUsernameAndPassword(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, None))", "def test_register_new_user(self):\n with self.client:\n response = self.client.post(\n url_for('register'),\n data=dict(\n first_name='Admin',\n last_name='Admin',\n email='admin@admin.com',\n password='admin2016',\n confirm_password='admin2016'\n ),\n follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)", "def test_new_user(self):\n json_resp = make_user(self.client)\n # check api response\n self.assertEqual(json_resp['status'], 'user registered')\n self.assertEqual(json_resp['username'], 'Dan')\n # check that user is in database\n self.assertEqual(User.query.count(), 1)\n\n # check malformed query\n resp = self.client.post('/user/',\n headers=api_headers(),\n data=json.dumps({'username': 'Dan'}))\n json_resp = json.loads(resp.data.decode('utf-8'))\n # check api response\n self.assertEqual(resp.status, '400 BAD REQUEST')\n self.assertEqual(json_resp['status'], 'missing fields')\n self.assertEqual(json_resp['missing'], ['email', 'password'])", "def step_impl(context):\n id_added = test_app.post(f\"/create_user/{user_id}\")\n account_added = test_app.post(f\"/create_account/{user_id}\")\n assert id_added.status_code == 200, \"Wrong status code\"\n assert account_added.status_code == 200, \"Wrong status code\"", "def test_add(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com'}, follow=True)\n self.assertShortURLCreated(response)", "def testAddNoneUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(None, \"password\"))", "def test_calendar_user_view_add(self):\n request = self.factory.get('/module/calendar_user/add/')\n request.user = self.user\n request.session = {}\n response = calendar_user_add(request)\n self.assertEqual(response.status_code, 200)\n\n response = self.client.post('/module/calendar_user/add/', data=\n {\n \"username\": \"caluser1\",\n \"password\": \"caluser1\",\n \"calendar_setting_id\": 1,\n }, follow=True)\n self.assertEqual(response.status_code, 200)\n\n request = self.factory.post('/module/calendar_user/add/',\n {\n \"username\": \"caluser1\",\n \"password\": \"caluser1\",\n \"calendar_setting_id\": 1\n }, follow=True)\n request.user = self.user\n request.session = {}\n response = calendar_user_add(request)\n self.assertEqual(response.status_code, 200)", "def test_manage_user(self):\r\n # First with a new user\r\n user_data = dict(user_id=1, screen_name='twitter')\r\n token = dict(oauth_token='token', oauth_token_secret='secret')\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['screen_name'], user\r\n assert user.name == user_data['screen_name'], user\r\n assert user.fullname == user_data['screen_name'], user\r\n assert user.twitter_user_id == user_data['user_id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['screen_name'], user\r\n assert user.name == user_data['screen_name'], user\r\n assert user.fullname == user_data['screen_name'], user\r\n assert user.twitter_user_id == user_data['user_id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(user_id=10, screen_name=self.name)\r\n token = dict(oauth_token='token2', oauth_token_secret='secret2')\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.twitter_user_id == 10, err_msg", "def test_create_user(self):\n client = app.test_client()\n mail = Mail(app)\n with mail.record_messages() as outbox:\n response = client.post(\n \"/user/signup/\",\n data=json.dumps(\n dict(\n username=\"user1\",\n password=\"passwd1\",\n email=\"user1@test.com\",\n role=\"STAFF\",\n )\n ),\n content_type=\"application/json\",\n )\n\n assert \"To signup, kindly click on the link\" in outbox[0].body\n assert \"/user/signup/\" in outbox[0].body\n assert response.status_code == 200\n assert (\n response.get_data().decode(\"utf-8\")\n == \"<h2>Created user successfully.</h2>\"\n )\n\n # Now try to create same user again and test the output\n response1 = client.post(\n \"/user/signup/\",\n data=json.dumps(\n dict(\n username=\"user1\",\n password=\"passwd1\",\n email=\"user1@test.com\",\n role=\"STAFF\",\n )\n ),\n content_type=\"application/json\",\n )\n\n assert response1.status_code == 401\n assert response1.get_data().decode(\"utf-8\") == \"User already exists.\"", "def test_post_user_post(self):\n pass", "def test_can_register_new_user(self):\n user_count = User.objects.count()\n self.register_bob()\n self.assertTrue(User.objects.count() == user_count + 1)", "def test_registering_user(self):\n new_user = self.app\n new_user.create_user()\n client = app.test_client(self)\n response = client.post('/login', follow_redirects=True)\n self.assertEqual(response.status_code, 200)", "def test_users_add(mocker):\n mocker.patch('subprocess.call')\n users.add(user_dict)\n subprocess.call.assert_called_with([\n 'useradd',\n '-p',\n password,\n '-G',\n 'wheel,dev',\n 'kevin',\n ])", "def test_create_user_invalid_type(self):\r\n print(\"Create user invalid type\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"password\"\r\n u_type = 5\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)" ]
[ "0.88546467", "0.8262478", "0.82284814", "0.8155481", "0.81260383", "0.81209815", "0.8071225", "0.80570364", "0.8053225", "0.8029433", "0.802561", "0.7957674", "0.7928939", "0.7879095", "0.78571725", "0.773885", "0.77226776", "0.77226776", "0.77226776", "0.77198654", "0.7630471", "0.7630471", "0.7619852", "0.7607644", "0.7607644", "0.7607644", "0.7607644", "0.7550477", "0.75290877", "0.75290877", "0.7525957", "0.75117916", "0.7503416", "0.7485683", "0.7485104", "0.7429834", "0.7375512", "0.73533916", "0.7348366", "0.73425215", "0.7340325", "0.7328811", "0.73266387", "0.73226047", "0.7321014", "0.732024", "0.7314539", "0.7307057", "0.7305621", "0.72981286", "0.72978145", "0.72866946", "0.7272445", "0.72566754", "0.7244995", "0.7232217", "0.72292", "0.72282696", "0.7225567", "0.72231466", "0.72071195", "0.72061026", "0.7194121", "0.7189569", "0.71836716", "0.7183071", "0.71752083", "0.7170366", "0.716593", "0.71650577", "0.7164395", "0.7157602", "0.7157489", "0.71558446", "0.71543986", "0.7142132", "0.7136029", "0.7132873", "0.71326137", "0.7131385", "0.713065", "0.7113699", "0.71090597", "0.71079713", "0.7105769", "0.7102579", "0.70908076", "0.70805454", "0.7079416", "0.7077715", "0.70770013", "0.7073034", "0.7072267", "0.70719343", "0.7070508", "0.7069509", "0.7063885", "0.70638263", "0.7057663", "0.70533985", "0.7051773" ]
0.0
-1
TEST CREATING A USER THAT ALREADY EXISTS
def test_create_user_exists(self): payload = { "email": "test@gmail.com", "name": "Test", 'password': 'test123' } create_user(**payload) res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user_creation(self):\n self.assertTrue(User.objects.exists())", "def test_existing_user(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details)\n self.assertDictEqual(actual, {'is_new': False, 'user': user})", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_07_create_user_exists(self):\n\n _, user = self.get_random_item(models.User)\n success, error = utils.create_user(user, session=self.session)\n db_user = db_utils.get_item(\n models.User, filters={\"id\": user[\"id\"]}, session=self.session\n )\n user[\"password\"] = db_user.password\n self.assertTrue(db_user)\n db_user = db_user.as_dict()\n items_equal = utils.is_equal(user, db_user)\n self.assertTrue(items_equal)\n self.assertTrue(success)\n self.assertFalse(error)", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_user_exists(self):\n # requirments for creating user\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'abcd1234',\n 'name': 'Test',\n }\n\n # call the create function above\n create_user(**payload)\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Check if statuscode returns a HTTP400 bad request\n # becos user already exist\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_exists(self):\n payload = {\n 'email': 'test@123.ru',\n 'password': '123PassW0rd',\n 'name': 'Test Name'\n }\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_is_really_create():\n response = api_helper.get_user(user_name=pytest.test_user.name)\n assert response.status_code == 200\n response_json_data = response.json()[\"data\"]\n assert len(response_json_data) == 1\n check_user_data_in_response(response_json_data[0])\n pytest.test_user.id = response_json_data[0]['id']", "def test_user_exists(self):\r\n payload = {\r\n 'email': 'test@net.net',\r\n 'password': 'testpass',\r\n 'name': 'Maks'\r\n }\r\n create_user(**payload)\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_exists(self):\n\n payload = {\n 'email': 'test2@fff.com',\n 'password': 'test11',\n 'name': \"test name\"\n }\n\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_existence(self):\n\n credentials = {\n 'email': 'testuser@gmail.com',\n 'password': 'Testpass12',\n 'name': 'Test Name'\n }\n get_user_model().objects.create_user(**credentials)\n\n # Check that this is a bad request since the user does already exists.\n response = self.client.post(URL_CREATE_USER, credentials)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_user_exists(self):\n payload = {'email': 'jon@snow.com', 'password': 'password'}\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_exists(self):\n payload = {'email': 'test@test1.com','password': 'testpass'}\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_exists(self):\n payload = {\n \"user\": {\n \"email\": \"user4@user.com\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test',\n 'last_name': 'JustUser'\n }\n sample_user(\n payload['user']['email'], payload['user']['password'],\n payload['user']['is_doctor'], payload['user']['is_hospital_admin']\n ),\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_user_exists(self):\n create_mock_user(**self.mock_user)\n\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_dont_create_user(self):\n self.assertFalse(User.objects.exists())", "def test_user_exists(self):\n payload = {\n 'email': 'haider@gmail.com',\n 'password': 'testpass123'\n }\n create_user(**payload)\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_already_exists(self):\n User.objects.create_user(\n 'existing@example.com',\n 'existing@example.com',\n '123existing'\n )\n response = self.client.post('/o/register', {\n 'email': 'existing@example.com',\n 'password': '123existing',\n 'terms_acceptance': True,\n })\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'auth/login.html')\n self.assertContains(\n response,\n 'Użytkownik o podanym emailu już istnieje',\n )\n self.assertNotIn('_auth_user_id', self.client.session)\n self.assertEqual(User.objects.all().count(), 1)", "def testAddExists(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userB\", \"password\"))\n self.assertEquals(models.ERR_USER_EXISTS, self.users.add(\"userB\", \"password\"))", "def test_create_user(self) -> None:\n\n u1 = self.register_user(\"u1\", \"pass\")\n\n u1stats = self._get_current_stats(\"user\", u1)\n\n assert u1stats is not None\n\n # not in any rooms by default\n self.assertEqual(u1stats[\"joined_rooms\"], 0)", "def test_user_exists(setup_client):\n client = setup_client\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'testpass',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n create_user(**payload)\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def test_get_user_if_exists(self):\n user = User.objects.create(username=self.username)\n actual = get_user_if_exists(None, self.details, user=user)\n self.assertDictEqual(actual, {'is_new': False})", "def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])", "def test_duplicate_user(self):\n json_resp = make_user(self.client)\n json_resp = make_user(self.client, username='Blah')\n # email should be taken\n self.assertEqual(json_resp['status'], 'email taken')\n # check only one user in the db\n self.assertEqual(User.query.count(), 1)\n # username should be taken\n json_resp = make_user(self.client, email='other@test.com')\n # check api response\n self.assertEqual(json_resp['status'], 'username taken')", "def test_create_user(self):\r\n self._auto_auth()\r\n self.assertEqual(User.objects.count(), 1)\r\n self.assertTrue(User.objects.all()[0].is_active)", "def test_create_duplicate_user(self):\n # setup\n user = self.generate_username_password()\n resp = self.create_user(user)\n resp_body = resp.json()\n try:\n assert resp.status_code == 201\n assert resp.headers[\"Content-Type\"] == \"application/json; charset=utf-8\"\n assert resp_body[\"username\"] == user[\"userName\"]\n assert resp_body[\"userID\"] != \"\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp.request)\n self.pprint_response(resp)\n\n # test\n resp2 = self.create_user(user)\n resp_body2 = resp2.json()\n try:\n assert resp2.status_code == 406\n assert resp_body2[\"code\"] == \"1204\"\n assert resp_body2[\"message\"] == \"User exists!\"\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp2.request)\n self.pprint_response(resp2)\n\n # teardown:\n resp3 = self.delete_user_basic_auth(resp_body[\"userID\"], user)\n try:\n assert resp3.status_code == 204\n except AssertionError:\n raise\n finally:\n self.pprint_request(resp3.request)\n self.pprint_response(resp3)", "def test_user_creation(self):\n self.assertEqual(CustomUser.objects.count() - self.user_count, 2)\n self.assertEqual(self.new_student_user.username, 'Student')\n self.assertTrue(self.new_student_user.password, 'student12345')\n self.assertEqual(self.new_instructor_user.username, 'Instructor')\n self.assertTrue(self.new_instructor_user.password, 'instructor12345')", "def test_create_user(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n\n user = User.query.filter_by(id=6).first()\n self.assertTrue(user)\n self.assertEqual(user.firstname, \"John\")\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)", "def test_create_user_nickname_exists(self):\n create_mock_user(**self.mock_user)\n second_mock_user = {\n 'email': 'test@test1.com',\n 'password': 'test123',\n 'name': 'Pablo Picasso',\n 'nickname': 'Mona',\n }\n\n res = self.client.post(CREATE_USER_URL, second_mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create(self):\n userValue = {'name': 'User Test 1',\n 'login': 'usertest1',\n 'user_profile_id': self.user_profile2.id,\n }\n Users = self.env['res.users']\n user_test = Users.create(userValue)\n newUser = self.env['res.users'].browse(user_test.id)\n self.assertEqual(userValue['name'], newUser['name'])", "def test_create_user_same_username(self):\n first_name = \"a\"\n last_name = \"a\"\n username = \"a\"\n email = \"a\"\n password = \"a\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertFalse(result)", "def test_create_user(self):\n \n new_user = {\"username\": \"beny1976\", \"vocab_count\": 0, \"name\": \"beny rood\", \"sex\": \"male\", \"dob\": \"18/10/1979\"}\n msg = app.create_user(predefined_user=new_user)\n self.assertTrue(msg != \"\")", "def test_existence(self):\n self.assertTrue(User.objects.filter(username='rcm').exists())", "def testCreateIsAllowed(self):\n self.users.create([(u'user', u'secret', u'User', u'user@example.com')])\n user = getUser(u'user')\n self.assertEqual(u'user', user.username)", "def test_already_existing_user(self):\n self.user.registration(\n \"Githeri\", \"githeri.man@yahoo.com\", \"iwantgitheri\", \"iwantgitheri\")\n msg = self.user.registration(\"Githeri\",\n \"githeri.man@yahoo.com\",\n \"iwantgitheri\",\n \"iwantgitheri\")\n self.assertEqual(msg, \"Your Account Already Active. Proceed to login\")", "def test_user_creation_is_successful(self):\n user_1 = User.objects.get(pk=self.user_1.id)\n user_2 = User.objects.get(pk=self.user_2.id)\n user_count = User.objects.count()\n\n self.assertEqual(user_1.first_name, \"John\")\n self.assertEqual(user_2.first_name, \"Kent\")\n self.assertEqual(user_count, 2)", "def test_append_existing_user(self):\n print('(' + self.test_append_existing_user.__name__+')',\n self.test_append_existing_user.__doc__)\n self.assertIsNone(self.connection.append_user(\n PATIENT_USERNAME, NEW_PATIENT))", "def test_user_create(self):\n user_count = User.objects.count()\n user = User.objects.create_user(email='test@test.com', password='test')\n self.assertTrue(User.objects.count() == user_count + 1)\n self.assertFalse(user.is_staff)\n self.assertFalse(user.is_superuser)\n with self.assertRaises(ValueError, msg='The email must be provided'):\n User.objects.create_user(email='', password='test')", "def test_user_signup_valid(self):\n\n u = User.signup(\n username=\"testuser\",\n email=\"test@test.com\",\n password=\"HASHED_PASSWORD\",\n image_url=User.image_url.default.arg\n )\n\n db.session.commit()\n\n u.id = 9999\n\n user = User.query.get(u.id)\n username = user.username\n\n self.assertEqual(username, \"testuser\")", "def test_create_user_invalid_type(self):\r\n print(\"Create user invalid type\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"password\"\r\n u_type = 5\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def test_create_with_username(self):\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n username='chuck',\n email='chuck@norris.org',\n password='secret',\n )\n self.assertEquals(user.getUserName(), 'chuck@norris.org')\n\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=False)\n\n user = api.user.create(\n username='chuck',\n email='chuck@norris.org',\n password='secret',\n )\n self.assertEquals(user.getUserName(), 'chuck')", "def test_check_user(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n test_user.check_user(\"test\", \"walIas15\")", "def test_create_user(self):\n first_name = \"b\"\n last_name = \"b\"\n username = \"b\"\n email = \"b\"\n password = \"b\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertTrue(result)\n\n user = User.objects.get(username=username)\n self.assertEqual(first_name, user.first_name)\n self.assertEqual(last_name, user.last_name)\n self.assertEqual(username, user.username)\n self.assertEqual(email, user.email)\n self.assertEqual(password, user.testdata.password)\n self.assertEqual(username, user.testdata.username)\n self.assertEqual(email, user.testdata.email)\n self.assertNotEqual(user.authtests, None)", "def test_create_user_invalid_password(self):\r\n print(\"Create user invalid password (empty)\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_find_user_by_username(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n user_exists = User.user_exist(\"test\")\n self.assertTrue(user_exists)", "def test_registered_with_already_registered_user(self):\n\n print(\" ------------ Test 2 - Registration an user already registed ------------------\")\n\n user_id = uuid.uuid4()\n account = Account(user_id=user_id, password=\"my-precious\", currency=Currency(\"EUR\"))\n db.session.add(account)\n db.session.commit()\n\n response = register_user(user_id, \"my-precious\", \"EUR\")\n data = response.json()['message']\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'User already exists. Please Log in')\n self.assertTrue(response.headers['Content-Type'] == 'application/json')\n self.assertEqual(response.json()['code'], 202)", "def test_add_user(self):\n pass", "def users_create():", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='chuck@norris.org',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), 'chuck@norris.org')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='chuck@norris.org', password='secret'\n )", "def test_create_user_with_no_role(setup_client):\n client = setup_client\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'password',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def test_api_can_create_users(self):\n res = self.client().post('/api/v1/user/', data = self.req)\n self.assertEquals(res.status_code, 200)\n self.assertIn('mary', str(res.data))", "def test_create_defined_user(self):\r\n self._auto_auth(\r\n username='robot', password='test',\r\n email='robot@edx.org', full_name=\"Robot Name\"\r\n )\r\n\r\n # Check that the user has the correct info\r\n user = User.objects.get(username='robot')\r\n self.assertEqual(user.username, 'robot')\r\n self.assertTrue(user.check_password('test'))\r\n self.assertEqual(user.email, 'robot@edx.org')\r\n\r\n # Check that the user has a profile\r\n user_profile = UserProfile.objects.get(user=user)\r\n self.assertEqual(user_profile.name, \"Robot Name\")\r\n\r\n # By default, the user should not be global staff\r\n self.assertFalse(user.is_staff)", "def test_register_existing(self):\n\n data = {'username': User.objects.all()[0].username, 'password': \"123test\", 'email': 'dschien@gmail.com',\n 'device': self.device, 'newsletter': 'true', 'research': 'true'}\n\n response = self.requestRegistration(data)\n self.assertTrue(response.status_code == status.HTTP_400_BAD_REQUEST)\n self.assertTrue(not 'client_id' in response.data)\n self.assertTrue('User with this Username already exists.' in response.data['username'])", "def test_create_user_object(self):\n print('(' + self.test_create_user_object.__name__+')',\n self.test_create_user_object.__doc__)\n # Query to get users and users_profile for the patient\n query = 'SELECT users.*, users_profile.* FROM users, users_profile \\\n WHERE users.user_id = users_profile.user_id'\n # assert if result doesn't contain patient\n self.assertDictContainsSubset(self.connection._create_user_object(\n execute_query(self, query, 'one')), PATIENT)", "def test_user_exist(self):\n data = {\n 'email': 'test@test.com',\n 'password': 'testtest',\n 'first_name': 'Test test',\n 'last_name': 'Test'\n } \n sigin_in_user(**data)\n res = self.client.post(SIGN_IN_USER_URL, data)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_creation(self):\r\n \r\n self.assertIsInstance(self.user, User)", "def test_dont_save_new_user(self):\n self.assertEqual(get_user_model().objects.exists(), 1)", "def getTestUser():\n allUsers = User.objects.all()\n if len(allUsers) > 0 :\n return allUsers[0]\n else :\n return User.objects.create_user(username='profiletester',\n email='profiletester@mapstorytests.com',\n password='superduperpassword2000')", "def test_specific_user(global_config, test_specific_email, id_api):\n yield id_api.create_user_if_not_exists(test_specific_email, global_config.users.default.password)", "def create_user(self, username, password, firstname, lastname): # create gameuser, tested\r\n conn = self.get_db()\r\n with conn:\r\n c = conn.cursor()\r\n c.execute('SELECT COUNT(*) from gameuser WHERE username=%s',(username,))\r\n n = int(c.fetchone()[0])\r\n # print 'num of rfdickersons is ' + str(n)\r\n if n == 0:\r\n hashedpass = md5.new(password).hexdigest()\r\n c.execute('INSERT INTO gameuser (username, password, firstname, lastname) VALUES (%s,%s,%s,%s)', \r\n (username, hashedpass, firstname, lastname))\r\n conn.commit()\r\n # return True\r\n else:\r\n # return False\r\n raise UserAlreadyExistsException('{} user already exists'.format((username)) )", "def test_user_recreate(self):\n self.test_user_creation()\n\n with self.assertRaisesMessage(\n IntegrityError, \"UNIQUE constraint failed: auths_user.username\"\n ):\n UserModel.objects.create_user(\n username=\"saimer\"\n )", "def test_username_not_unique(self, client, users):\n data = factory.build(dict, FACTORY_CLASS=UserFactory, username=users[1].username)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 200\n assert 'A user with that username already exists.' in str(response.content)", "def test_user_id(self):\n new_user = self.app\n self.assertTrue(new_user.user_id, 0)\n new_user.create_user()\n self.assertTrue(new_user.user_id, 1)\n for key in new_user.users:\n self.assertEqual(new_user.user_id, key)", "def test_create_user_with_preexisting_username(self):\n data = {\n 'username': 'test_user',\n 'email': 'test@example.com',\n 'password': 'testpassword'\n }\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['username']), 1)", "def test_new_user(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"1c6cd9c1-ca4c-41fe-b369-912075a5d3ce\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"newuser@example.com\",\n \"lis_person_sourcedid\": \"new_user\",\n },\n passport,\n )\n\n self.assertEqual(\"new_user\", new_user.public_username)\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"newuser@example.com\", new_user.email)\n self.assertEqual(\"new_user@consumer\", new_user.username)\n self.assertEqual(user_count + 1, get_user_model().objects.count())", "def test_create(self, client):\n count = get_user_model().objects.count()\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 302\n\n user = get_user_model().objects.last()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()\n assert get_user_model().objects.count() == count + 1", "def test_new_user(self):\n json_resp = make_user(self.client)\n # check api response\n self.assertEqual(json_resp['status'], 'user registered')\n self.assertEqual(json_resp['username'], 'Dan')\n # check that user is in database\n self.assertEqual(User.query.count(), 1)\n\n # check malformed query\n resp = self.client.post('/user/',\n headers=api_headers(),\n data=json.dumps({'username': 'Dan'}))\n json_resp = json.loads(resp.data.decode('utf-8'))\n # check api response\n self.assertEqual(resp.status, '400 BAD REQUEST')\n self.assertEqual(json_resp['status'], 'missing fields')\n self.assertEqual(json_resp['missing'], ['email', 'password'])", "def test_admin_cannot_create_users_with_same_username(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n user = dict(\n name='Paul Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'This username is already taken!')\n self.assertEqual(resp.status_code, 400)", "def test_create_valid_user_success(self):\n payload = {\n 'email': 'test@test1.com',\n 'password': 'testpass',\n 'name': 'TestName'\n }\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n \n user = get_user_model().objects.get(**res.data)\n \n self.assertTrue(user.check_password(payload['test@test1.com', \n 'testpass']))\n self.assertNotIn('testpass', res.data)", "def test_duplicate_user(self, mapp, existing_user_id):\n\n mapp.create_user(user=existing_user_id, password=1234,\n email=existing_user_id + \"@example.com\", code=409)", "def test_manage_user(self):\r\n # First with a new user\r\n user_data = dict(id='1', name='google',\r\n email='g@g.com')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['name'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.google_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['name'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.google_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id='10', name=self.name,\r\n email=self.email_addr)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"User should be the same\"\r\n print user.google_user_id\r\n assert user.google_user_id == '10', err_msg", "def test_create_user(self):\n self.assertIsInstance(\n User.objects.create_user(username=\"username\", email=\"username@mail.com\", password=\"password\"), User)", "def test_can_register_new_user(self):\n user_count = User.objects.count()\n self.register_bob()\n self.assertTrue(User.objects.count() == user_count + 1)", "def test_users_can_signup(self):\n for value in self.app.users.values():\n result = self.app.create_user()\n stored_password = value['password']\n expected = {0: {\n 'email': 'demo@email.com', 'username': 'admin', 'password': stored_password\n }}\n self.assertEqual(expected, result)", "def test_register(self):\n username = \"testusername\"\n password = \"testpassword\"\n email = \"testemail@gmail.com\"\n fname = \"testfirstname\"\n lname = \"testlastname\"\n role = \"engineer\"\n\n registered = db.session.query(User).filter(or_( User.username == username, \n User.email == email)).first()\n if registered is not None:\n self.assertTrue(self.userExists(username))\n else:\n hashed_password = sha256_crypt.hash(str(password))\n newUser = User( username = username, \n password = hashed_password, \n email = email, \n fname = fname, \n lname = lname, \n role = role)\n db.session.add(newUser)\n db.session.commit()\n self.assertTrue(self.userExists(username))", "def create_testuser(app, created_models, verbosity, **kwargs):\n if not settings.DEBUG:\n return\n try:\n auth_models.User.objects.get(username='test')\n except auth_models.User.DoesNotExist:\n print '*' * 80\n print 'Creating test user -- login: test, password: test'\n print '*' * 80\n assert auth_models.User.objects.create_superuser('test', 'x@x.com', 'test')\n else:\n print 'Test user already exists'", "def test_create_user(self):\n user = User(\"Gideon Bamuleseyo\", \"gideon@mail.com\", \"secret\")\n self.assertEqual(user.name, \"Gideon Bamuleseyo\")\n self.assertEqual(user.email, \"gideon@mail.com\")\n self.assertEqual(user.password, \"secret\")", "def test_create_user_endpoint_creates_user(caplog):\n caplog.set_level('INFO')\n\n _request_create_user(SEED_USER_DATA)\n created_user = Advisor.objects.get(email=SEED_USER_DATA['email'])\n\n user_data_keys = SEED_USER_DATA.keys() - set(['token'])\n for key in user_data_keys:\n assert str(getattr(created_user, key)) == SEED_USER_DATA[key]\n\n user_info = [\n 'Creating a user: {',\n f' \"dit_team_id\": \"{SEED_USER_DATA[\"dit_team_id\"]}\",',\n f' \"email\": \"{SEED_USER_DATA[\"email\"]}\",',\n f' \"first_name\": \"{SEED_USER_DATA[\"first_name\"]}\",',\n f' \"last_name\": \"{SEED_USER_DATA[\"last_name\"]}\",',\n f' \"sso_email_user_id\": \"{SEED_USER_DATA[\"sso_email_user_id\"]}\"',\n '}',\n ]\n user_token = f'Created a token `{SEED_USER_DATA[\"token\"]}` for user {created_user.id}.'\n assert caplog.messages == [\n '\\n'.join(user_info),\n user_token,\n ]", "def test_create_user_valid(self):\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotIn('password', res.data)", "def test_register_twice(self):\n body, code = self.post(f\"/users\", bob, {\"phone\": \"+441234567890\", **bob_creds})\n self.assertEqual(400, code)\n self.assertEqual({\"error\": \"User already exists.\"}, body)", "def test_error_user_already_exists(self):\n User.objects.create_user(self.data)\n client = Client()\n client.post('/register/', self.data)\n self.assertRaisesMessage(ValueError, 'user already exists')", "def test_signup(self):\n resp = self.client.post(self.signup_url, self.test_credential)\n self.assertEqual(resp.status_code, 200)\n registed_user = User.objects.filter(username=self.test_credential['username'],\n is_active=False)\n self.assertTrue(registed_user)", "def test_create_new_user(self):\n\n\t\tdata = {'username': u'Test_User',\n\t\t\t\t\t'password': u'test',\n\t\t\t\t\t'work': u'88 7th Avenue, New York, NY, United States',\n\t\t\t\t\t'home': u'152 Lexington Avenue, New York, NY, United States',\n\t\t\t\t\t'homeLngLat': u'-73.98199699999998 40.743772',\n\t\t\t\t\t'workLngLat': u'-74.0014936 40.7396046'}\n\n\t\t# Add Test_User to the database\n\t\tserver.create_new_user(data)\n\n\t\tnew_user = db.session.query(User).filter(User.username=='Test_User').one()\n\n\t\t# new_user would return none if it did not exist in the db\n\t\tself.assertTrue(new_user, 'Test_User was not sucessfully added to db.')\n\t\tself.assertNotEqual(new_user.password, 'password', 'Password likely not hashed before stored in db.')", "def test_user_creation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(new_user.get_username(), 'alice')\n self.assertEqual(new_user.email, 'alice@example.com')\n self.assertTrue(new_user.check_password('swordfish'))\n self.assertFalse(new_user.is_active)\n\n expiration_date = datetime_now() - timedelta(\n settings.ACCOUNT_ACTIVATION_DAYS\n )\n self.assertGreater(new_user.date_joined, expiration_date)", "def test_registration_when_user_already_exists(self):\n # register the user the first time\n self.register_user()\n # register the same user the second time\n result = self.client().post(AuthTestCase.registration, data=self.user)\n response_result = json.loads(result.data.decode())\n self.assertEqual(result.status_code, 409)\n self.assertEqual(response_result['message'], \"user already exists\")", "def test_create_with_duplicate_userid(self):\n\n self.sdkapi.guest_create(self.userid, 1, 1024)\n try:\n self.sdkapi.guest_create(self.userid, 1, 1024)\n except exception.SDKSMUTRequestFailed as err:\n self.assertEqual(err.results['rc'], 400)\n self.assertEqual(err.results['rs'], 8)", "def test_create_user_fails_with_no_username(self):\n user = get_user_model().objects.create(\n email='test@test.test',\n first_name='Test',\n password='pass123456!'\n )\n\n users = User.objects.filter(username='Test')\n\n self.assertEqual(len(users), 0)", "def test_success_register():\n assert not register(\"abc123\", \"qwerty123456\", \"Bob\", \"John\", \"abc@def.com\")\n\n # Check that user data was updated and that the user is logged in\n new_user = data.users.get(\"abc123\")\n assert new_user\n assert new_user.logged_in == True", "def test_good_user_creation(self):\n data = json.dumps({\n \"username\" : \"mark\", \"email\" : \"mark@gmail.com\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 201)", "def test_create_user(self):\n self.assertEqual(self.new_user.first_name, 'John')\n self.assertEqual(self.new_user.last_name, 'Doe')\n self.assertEqual(self.new_user.username, 'john_doe')\n self.assertEqual(self.new_user.email, 'johndoe@example.com')\n self.assertTrue(self.new_user.check_password('test_password'))\n self.assertFalse(self.new_user.is_staff)\n self.assertFalse(self.new_user.is_superuser)\n self.assertFalse(self.new_user.is_active)\n\n with self.assertRaises(ValueError):\n User.objects.create_user(\n first_name='', last_name='', username='', email='', bio='', password=''\n )", "def test_create_profile_on_user_created(self):\n user = User.objects.create_user(\n 'auto_tester', 'auto_tester@example.com', 'auto_tester')\n profile = user.get_profile()\n ok_(profile is not None)\n eq_(False, profile.username_changes)", "def test_get_user_exists(self):\n # First make the user\n resp = self.app.post('/users', data=json.dumps(self.test_user1_data))\n assert resp.status_code == 200\n\n # Now get the user data and verify it is correct\n resp = self.app.get('/users/{}'.format(self.test_user1_userid))\n assert resp.status_code == 200\n data = json.loads(resp.data)\n for key in ['first_name', 'last_name', 'userid', 'groups']:\n assert key in data\n assert data['first_name'] == self.test_user1_first\n assert data['last_name'] == self.test_user1_last\n assert data['userid'] == self.test_user1_userid\n for groupid in self.test_user1_groups:\n assert groupid in data['groups']", "def test_create_user(self):\n User.objects.create_user(username='abc', password='abcpass', email='abc@example.com')\n user_obj = User.objects.get(username='abc')\n self.assertTrue(user_obj.email, \"abc@example.com\")\n self.assertEqual(str(user_obj), \"abc\")", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"test@test.com\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()" ]
[ "0.8342608", "0.8204365", "0.82029146", "0.8191392", "0.8067835", "0.797487", "0.797487", "0.797487", "0.79677063", "0.79604864", "0.7947312", "0.79398227", "0.7937801", "0.79361814", "0.7914857", "0.7861646", "0.78466517", "0.78347856", "0.7821123", "0.781935", "0.7808139", "0.77879626", "0.77639025", "0.7762809", "0.7730326", "0.772579", "0.7685443", "0.7649003", "0.7604171", "0.758106", "0.75532854", "0.7545652", "0.7541062", "0.7537177", "0.75352454", "0.75079644", "0.7487951", "0.7480046", "0.7461413", "0.74510604", "0.7448558", "0.74408305", "0.7417093", "0.7409433", "0.7404932", "0.7401921", "0.73686755", "0.73509353", "0.73301345", "0.7328827", "0.73225933", "0.7319608", "0.73160684", "0.7309459", "0.7307622", "0.7297017", "0.7278812", "0.727046", "0.72660637", "0.725666", "0.7255932", "0.72537005", "0.7242889", "0.723822", "0.72361195", "0.72285736", "0.72248656", "0.72245395", "0.7221039", "0.72195196", "0.7214952", "0.7213145", "0.7208716", "0.7204138", "0.7194873", "0.71925586", "0.71816087", "0.7175453", "0.71631885", "0.7147705", "0.71459097", "0.7145775", "0.7130333", "0.71239513", "0.71231997", "0.71228474", "0.71156436", "0.711157", "0.7110614", "0.71105903", "0.71059835", "0.71034247", "0.7102399", "0.7101464", "0.70993334", "0.7098397", "0.7096057", "0.7093045", "0.70824414", "0.7078843" ]
0.78193086
20
TEST A SHOT PASSWORD
def test_password_too_short(self): payload = { "email": "test@gmail.com", "name": "Test", 'password': 'tTTt' } res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) user_exitst = get_user_model().objects.filter( email=payload['email'] ).exists() self.assertFalse(user_exitst)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_random_password():\n output = sh.random_password()\n assert isinstance(output, str) is True\n assert len(output) == 16", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def test_invalid_password(self):\n pass", "def test_set_user_password(self):\n pass", "def test_short_pwd(self):\n msg = self.user.registration(\"MrShort\",\n \"MrShort@yahoo.com\",\n \"short\",\n \"short\")\n self.assertEqual(\n msg, \"Password is too short\")", "def password(self) -> str:", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def test_password_salts_are_random(self):\n self.user.password = '123456'\n self.user2.password = '123456'\n self.assertTrue(self.user.password_hash != self.user2.password_hash)", "def test_password_set(self):\r\n tst = User()\r\n tst.password = self.test_password\r\n\r\n self.assertEqual(\r\n len(tst.password),\r\n 60,\r\n \"Hashed should be 60 char long: \" + tst.password)\r\n self.assertEqual(\r\n '$2a$',\r\n tst.password[:4],\r\n \"Hash should start with the right complexity: \" + tst.password[:4])", "def test_32_oauth_password(self):\r\n user = User(email_addr=\"johndoe@johndoe.com\",\r\n name=self.user.username,\r\n passwd_hash=None,\r\n fullname=self.user.fullname,\r\n api_key=\"api-key\")\r\n db.session.add(user)\r\n db.session.commit()\r\n res = self.signin()\r\n assert \"Ooops, we didn't find you in the system\" in res.data, res.data", "def test_get_password_from_keyring_if_exists(self, mock_keyring):\n mock_keyring.get_password.return_value = 'TestPass'\n self.assertEqual(\n get_password_from_keyring('TestPass', 'TestUser'), 'TestPass')", "def test_valid_password_valid():\n assert valid_password(\"123456\")\n assert valid_password(\"abcdef\")", "def test_password_verification(self):\n self.user.password = '123456'\n self.assertTrue(self.user.verify_password('123456'))\n self.assertFalse(self.user.verify_password('password'))", "def test_check_password(self):\n user = User.query.filter_by(username='eschoppik').first()\n self.assertTrue(bcrypt.check_password_hash(user.password, 'secret'))\n self.assertFalse(bcrypt.check_password_hash(user.password, 'notsecret'))", "def test_valid_password(self):\n newpass = 'Just Complex Enough'\n m = hashlib.sha512()\n m.update(newpass.encode('utf-8'))\n m.update(self.request.user.salt)\n hashed = m.digest()\n self.request.json_body = deepcopy(self.good_dict)\n self.assertNotEqual(self.request.user.password, hashed)\n self.request.json_body['password'] = newpass\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, dict_from_row(self.request.user, remove_fields=removals))\n self.assertEqual(self.request.user.password, hashed)", "def test_user1_method4():\n assert u.verify_password(USER_CREDENTIALS[\"password\"]), \"Password cannot verify properly\"", "def test_user_password_factory(self):\n user = UserFactory(name='User', password='myownpass')\n assert user.check_password('myownpass')", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_if_pwd_equals_confirmed(self):\n msg = self.user.registration(\"Githeri\", \"githeri.man@yahoo.com\",\n \"iwantgitheri\",\n \"iwantsgitheri\")\n self.assertEqual(msg, \"Your passwords should match\")", "def test_password_match(self):\r\n\r\n tst = User()\r\n tst._password = self.test_hash\r\n\r\n self.assertTrue(\r\n tst._password == self.test_hash, \"Setting should have hash\")\r\n self.assertTrue(\r\n tst.password == self.test_hash, \"Getting should have hash\")\r\n self.assertTrue(\r\n tst.validate_password(self.test_password),\r\n \"The password should pass against the given hash: \" + tst.password)", "def enter_password(self):", "def test_password_is_to_short(setup_client):\n client = setup_client\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'pw',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def test_set_password(matrix, mock_check_output, mock_psycopg2):\n matrix.save_pgsql_conf(db)\n matrix.set_password(\"testuser\", \"testpassword\")\n assert mock_check_output.called_with(\n [\n \"snap\",\n \"run\",\n \"{}.hash_password\".format(matrix.synapse_snap),\n \"-c\",\n matrix.synapse_config,\n \"-p\",\n \"testpassword\",\n ]\n )\n assert mock_check_output.call_count == 1\n assert mock_psycopg2.Cursor.execute.called_with(\n \"UPDATE users SET password_hash='testhash' WHERE name='testuser';\"\n )\n assert mock_psycopg2.Cursor.execute.call_count == 1", "def test_password_setter(self):\n self.user.password = '123456'\n self.assertIsNotNone(self.user.password_hash)", "async def password(self, ctx):\n pass", "def test_password_too_short(self):\n # requirments for creating user\n payload = {\n 'email': 'test@gmail.com',\n 'password': 'pwd',\n 'name': 'Test',\n }\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Ensure that statuscode returns a HTTP400 bad request\n # becos must exist before we can ckeck password\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # chech if user exists true else false\n user_exists = get_user_model().objects.filter(\n email=payload['email']\n ).exists()\n\n self.assertFalse(user_exists)", "def test_hash_password_on_register(client, user_dict):\n client.post('/api/v1/user/?bot=true', user_dict)\n\n user = User.objects.get(username=user_dict['username'])\n\n assert check_password(user_dict['password'], user.password), \"Password is not properly set at registration.\"", "def test_incorrect_password(self):\n input = (\"admin\", \"\")\n if is_travis():\n self.login_test(*input, True)\n else:\n self.login_test(*input)", "def test_user1_method3():\n REGEX_MATCH_BCRYPT_HASH = r\"^\\$2[ayb]\\$.{56}$\"\n hashed_password = u.password.decode()\n assert re.match(REGEX_MATCH_BCRYPT_HASH, hashed_password), \"Password was not hashed correctly\"", "def test_bad_password(self):\n user = 'santos.gallegos'\n passw = '1234'\n result = self.ucuenca.authentication(user, passw)\n self.assertFalse(result['autenticacion'])", "def check_password(pw):\n if (pw == password):\n print('welcome password match')\n\n else:\n print('Wrong password')", "def test_aws_service_api_vm_password_get(self):\n pass", "def testEditPassword(self):\n self._login_user('eschoppik','secret')\n response = self.client.post('/users/1/edit_password?_method=PATCH',\n data=dict(new_password='newpass', confirm_password='newpass',\n old_password='secret'), follow_redirects=True)\n user = User.query.filter_by(username='eschoppik').first()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bcrypt.check_password_hash(user.password, 'newpass'),True)", "def test_new_password(self):\n form_data = self.form_data(self.pwd)\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_empty_password_field(self):\r\n result=self.user.get_user_register(\"Stephen\",\" Ochieng\",\"stephenochieng955@mail.com\",\"stephenochieng\",\"eat\"\")\r\n self.assertEqual(2,result,\"Fill in the password field please\")", "def test__save_password_in_keyring(create_class):\n # Late import\n import keyring\n\n cloud._save_password_in_keyring(\n \"salt.cloud.provider.test_case_provider\",\n \"fake_username\",\n \"fake_password_c8231\",\n )\n stored_pw = keyring.get_password(\n \"salt.cloud.provider.test_case_provider\",\n \"fake_username\",\n )\n keyring.delete_password(\n \"salt.cloud.provider.test_case_provider\",\n \"fake_username\",\n )\n assert stored_pw == \"fake_password_c8231\"", "def test_creation_with_password(self, user):\n user.password = \"is_god\"\n user.save()", "def test_build_command_password(self):\n actual_result = SshpassBaseCommandBuilder(COMMAND)\\\n .set_password(SERVER_PASSWORD)\\\n .to_build()\n self.assertListEqual(actual_result,\n ['sshpass', '-p', 'QWERTY', 'command'])", "def test_passwordsuccess(self):\n form_data = {\n 'username': 'testuser',\n 'password1': '2$n5[]$nnA5Y}2}}^gba',\n 'password2': '2$n5[]$nnA5Y}2}}^gba'\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertTrue(form.is_valid())", "def test_hash_password(matrix, mock_check_output):\n result = matrix.hash_password(\"testpassword\")\n assert result == \"mocked-output\"", "def test_password_too_short(self):\n payload = {\n 'email': 'rest@eamil.com',\n 'password': 'pw',\n 'name': 'test Name'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_short_passwords(self):\n data = json.dumps({\n \"username\" : \"moses\", \"email\" : \"moses@gmail.com\",\n \"password\" : \"1234567\", \"confirm_password\" : \"1234567\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_long_password():\n expect_error(register, InputError,\n \"abcdef\", \"a\" * (MIN_PASSWORD - 1), \"a\", \"A\", \"a\")", "def test_valid_password(self):\n user = User(email=\"test@email.com\", password=\"testpassword\")\n\n self.assertTrue(user.is_valid_password(\"testpassword\"))", "def prompt_pass():\n msg = \"Enter Password: \"\n password = getpass.getpass(msg)\n return password", "def test_invalid_password(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n rv = self.login('Bo_theo5@example.com', 'Bo1905')\n self.assertIn(b'Invalid password! Please try again', rv.data)", "def test_user_password(self):\n app = create_app('mathsonmars.settings.TestConfig')\n db.app = app\n db.drop_all()\n db.create_all()\n with app.app_context():\n admin_role = Role(role_name = RoleTypes.ADMIN)\n db.session.add(admin_role)\n db.session.flush()\n admin = User(role_id = admin_role.id, user_name='admin', password='supersafepassword')\n self.assertEqual('admin', admin.user_name)\n self.assertEqual(True, admin.is_correct_password('supersafepassword'))", "async def check_password(self, login, password):", "def test_upgrade_password_from_sha_to_ssha(self):\n name = u'/no such user/'\n password = '{SHA}jLIjfQZ5yojbZGTqxg2pY0VROWQ=' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def password():\n chars = \"abcdefghijklmnopqsrtuvwxyzABCDEFGHIJKLMNOPQSRTUVWXYZ\"\\\n \"123456890!#%&-_*<>+=()\"\n return ''.join(random.sample(chars, 15))", "def GetPassword(self):\n pass", "def ask_password(db_params: Dict[str, str]) -> None:\r\n db_params[Toml.PASSWORD] = getpass('DB Password: ')", "def test_get_password_from_keyring_if_not_exists(self, mock_keyring):\n mock_keyring.get_password.return_value = None\n self.assertEqual(\n get_password_from_keyring('TestPass', 'TestUser'), None)", "def test_4_is_valid_password(self):\n self.factory = RequestFactory()\n response = self.factory.post('', data={'password': '123123123'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n response = is_valid_password(response)\n self.assertContains(response, 'The password is invalid')\n\n response = self.factory.post('', data={'password': '!pasword123'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n response = is_valid_password(response)\n self.assertContains(response, 'The password is correct')\n\n self.factory = RequestFactory()\n response = self.factory.post('', data={'password': '123123123'})", "def test_valid_password_invalid():\n assert not valid_password(\"\")\n assert not valid_password(\"1234567\")\n assert not valid_password(\"abcdefg\")", "def check_password(self):\n server_address = self.server_address_entry.text\n password = self.password_entry.text\n\n payload = {'password': password}", "def test_password_too_short(self):\n\n payload = {\n \"user\": {\n \"email\": \"user5@user.com\",\n \"password\": \"us\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test1',\n 'last_name': 'JustUser2'\n }\n\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def authenticate_password(self, secret=\"\"):\r\n #hexstr = binascii.b2a_hex(secret)\r\n self.sendAndRecv(\"AUTHENTICATE \\\"%s\\\"\\r\\n\"%secret)", "def password(self, password):\n self.password_hash = generate_password_hash(password)", "def password():\n passw = os.environ[\"CREODIAS_PASSWORD\"]\n if passw:\n return passw\n raise ValueError(\"Set environment variable CREODIAS_PASSWORD\")", "def askPass( user, host ):\n prompt = \"Password for user {} on host {}: \".format( user, host )\n password = getpass.getpass( prompt )\n return password", "def set_fake_pw(password='admin'):\n fake = raw_input('Password (leave blank to use \\'admin\\'):')\n\n with virtualenv():\n run('python manage.py set_fake_passwords --password={0}'\n .format(fake if fake != '' else 'admin'))\n\n print(green('set_fake_pw'))", "def password_builder():\n password = Credentials.password_buidler()\n return password", "def test_validate_credentials(self):\n pass", "async def test_change_password(\n hass: HomeAssistant, provider, capsys, hass_storage: dict[str, Any]\n) -> None:\n data = provider.data\n data.add_auth(\"test-user\", \"test-pass\")\n\n await script_auth.change_password(\n hass, provider, Mock(username=\"test-user\", new_password=\"new-pass\")\n )\n\n assert len(hass_storage[hass_auth.STORAGE_KEY][\"data\"][\"users\"]) == 1\n captured = capsys.readouterr()\n assert captured.out == \"Password changed\\n\"\n data.validate_login(\"test-user\", \"new-pass\")\n with pytest.raises(hass_auth.InvalidAuth):\n data.validate_login(\"test-user\", \"test-pass\")", "def test_password_is_okay():\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('qqqqqqqq') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\t\"\"\"test that valid passwords work\"\"\"\n\tassert password_is_ok('Q8qqqqqqqq') == True\n\tassert password_is_ok('q8qqqqqqqq') == True\n\tassert password_is_ok('Qqqqqqqqqq') == True\n\tassert password_is_ok('qqqqqqqqqq') == True", "def test_registeration_short_password(self):\n response = self.signup_a_user(self.user_short_password)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn(\"token\", response.data)", "def test_serialize_password(capsys):\n users.serialize_password(\"test1234\")\n salted_password = yaml.safe_load(capsys.readouterr().out)\n assert base64.b64decode(salted_password[\"password\"]) == users.hash_password(\n \"test1234\", base64.b64decode(salted_password[\"salt\"])\n )", "def test_password_too_short(self):\n\t\tpayload = {\n\t\t'email': 'test@gmail.com',\n\t\t'password': 'pw',\n\t\t'name': 'test'\n\t\t}\n\n\t\tres = self.client.post(CREATE_USER_URL, payload)\n\t\tself.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)\n\t\tuser_exists = get_user_model().objects.filter(\n\t\t\temail = payload['email']\n\t\t\t).exists()\n\n\t\tself.assertFalse(user_exists)", "def setpassword(self, pwd):\n pass", "def get_password(args):\n for password in args:\n heashed=hash_password(password)\n print(heashed)\n # checked=check_password(heashed)", "def test_password_change_provided(self):\n token = str((jwt.encode(\n {\"email\": \"bagendadeogracious@gmail.com\"}, \n settings.SECRET_KEY)).decode('utf-8')\n )\n self.client.post(self.registration_url, valid_user, format='json')\n response = self.client.patch(\n self.change_password_url+\"?token=\"+token, {\"pwd\": \"bagenda1234\"},\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['errors']\n [0], \"Password field is required.\")", "def test_reset_password(self):\n\n dietitian = Dietitian.query.get(1)\n reset_password(\"newpass\", dietitian)\n\n self.assertEqual(True, dietitian.check_password(\"newpass\"))", "def test_auth_with_des_stored_password(self):\n # Create test user\n name = u'Test User'\n # generated with \"htpasswd -nbd blaze 12345\"\n password = '{DES}gArsfn7O5Yqfo' # 12345\n self.createUser(name, password, True)\n\n try:\n import crypt\n # Try to \"login\"\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.valid\n except ImportError:\n py.test.skip(\"Platform does not provide crypt module!\")", "def test_that_correct_password_returns_true(new_user):\n user, user_data = new_user\n\n assert_that(user.verify_password(user_data.get('password'))).is_true()", "def test_authenticate_invalid_password(self):\r\n print(\"Authenticate user invalid password (wrong)\")\r\n username = \"admin\"\r\n password = \"password9999\"\r\n\r\n self.assertEqual(self.system.authenticate(username, password), False)", "def test05_password_special(self):\n self.set_complexity(length=0, numeric=0, upper=0, lower=0, special=5)\n\n invalid = (\n \"A\",\n \"!!!!\",\n \"!A_B@C£D\",\n \"@@PASSWORD123!!\",\n \"ADMIN\",\n \"A1aB2bC3cD4dE5eF6fG7g\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"_____\",\n \"_!@£$\",\n \"A!B@C£D$F%\",\n \"Tr0ub4dor&3!@£$\",\n \"1234;.,/]1234\",\n \"a!A@0£b$B%0^c&C*0(d)D_0+e\",\n 'password1234\\'\"\"\"\"\"',\n \"p@$$w@*d\",\n )\n self.set_passwords(valid)", "def generate_password(c, user=\"root\"):\n passw = subprocess.run(\n [\n \"nix\",\n \"run\",\n \"--inputs-from\",\n \".#\",\n \"nixpkgs#xkcdpass\",\n \"--\",\n \"-d-\",\n \"-n3\",\n \"-C\",\n \"capitalize\",\n ],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n ).stdout.strip()\n hash = subprocess.run(\n [\n \"nix\",\n \"run\",\n \"--inputs-from\",\n \".#\",\n \"nixpkgs#mkpasswd\",\n \"--\",\n \"-m\",\n \"sha-512\",\n \"-s\",\n ],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n input=passw,\n ).stdout.strip()\n print(\"# Add the following secrets\")\n print(f\"{user}-password: {passw}\")\n print(f\"{user}-password-hash: {hash}\")", "def getpassword(description = \"\"):\n\tif (description != \"\"): \n\t\tsys.stdout.write (\"%s\\n\" % description)\n\t\t\n\tpassword1 = getpass.getpass(\"Password: \");\n\tpassword2 = getpass.getpass(\"Password (confirm): \");\n\n\tif (password1 == password2):\n\t\treturn password1\n\telse:\n\t\tsys.stdout.write (colors.ORANGE + \"[Warning] Password did not match, please try again\" + colors.NO + \"\\n\")\n\t\treturn getpassword()", "def test_check_password_method(self):\n\n self.user.save()\n\n self.assertTrue(self.user.check_password('password'))", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def test_password_is_hashed(self):\n self.request.json_body = deepcopy(self.new_account)\n result = users_post_view(self.request)['d']\n user = self.session.query(User).one()\n expected = dict_from_row(user, remove_fields=removals)\n session = self.session.query(Session).one()\n expected['session'] = dict_from_row(session, remove_fields=removals)\n self.assertEqual(result, expected)\n user = self.session.query(User).one()\n self.assertNotEqual(user.password, self.new_account['password'])", "def test_user_check_password(self):\r\n user = User(username=self.test_username, email=self.test_email)\r\n user.password = generate_password_hash(self.test_password)\r\n self.db.session.add(user)\r\n self.db.session.commit()\r\n assert user.check_password(self.test_password)", "def passsword(self, password):\n self.passwor_harsh = generate_password_hash(password)", "def test_create_user_with_short_password(self):\n data = {\n 'email': 'foobarbaz@example.com',\n 'password': 'foo'\n }\n\n response = self.client.post(self.create_url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(response.data['password']), 1)", "def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))", "def test_upgrade_password_from_des_to_ssha(self):\n # Create test user\n name = u'Test User'\n # generated with \"htpasswd -nbd blaze 12345\"\n password = '{DES}gArsfn7O5Yqfo' # 12345\n self.createUser(name, password, True)\n\n # User is not required to be valid\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.enc_password[:6] == '{SSHA}'", "def _prompt_for_password(self, args):\n if not args.password:\n args.password = getpass.getpass(\n prompt='\"--password\" not provided! Please enter password for host %s and user %s: '\n % (args.host, args.user))\n return args", "def test_pbkdf2_sha256_password_reuse(self):\r\n user = self._user_factory_with_history()\r\n staff = self._user_factory_with_history(is_staff=True)\r\n\r\n # students need to user at least one different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"different\"))\r\n self._change_password(user, \"different\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n\r\n # staff needs to use at least two different passwords before reuse\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self._change_password(staff, \"different\")\r\n\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n self.assertFalse(PasswordHistory.is_allowable_password_reuse(staff, \"different\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"third\"))\r\n self._change_password(staff, \"third\")\r\n\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))", "def test_read(sqlite_db):\n site = \"www.example.com\"\n passwd = smm.read_passwd(site)\n assert passwd == \"TheNewPassword\"\n bad_request = smm.read_passwd(\"NotASite\")\n assert not bad_request", "def test_user_hash_with_salt(self):\n self.assertEqual(get_user_hash(\"johndoe\", salt=\"jane\").hex()[:6], \"fb0bf4\")", "def test_password_strength(self):\n with self.client:\n response = register_user(\n self, 'Dalin', 'Oluoch', 'anothergmail.com', 'asdfasdf')\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Validation errors.')\n self.assertTrue(response.content_type == 'application/json')\n self.assertEqual(response.status_code, 422)", "def test_password_error(self):\n token = str((jwt.encode({\n \"email\": \"bagendadeogracious@gmail.com\"},\n settings.SECRET_KEY)).decode('utf-8')\n )\n self.client.post(self.registration_url, valid_user, format='json')\n response = self.client.patch(\n self.change_password_url+\"?token=\"+token,\n {\"password\": \"bag\"},\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['error'],\n \"password should be atleast 8 characters.\")", "def valid_pwd(name, password, h):\n salt = h.split(',')[0]\n return h == make_pwd_hash(name, password, salt)", "def test_verify_password(self):\n db.session.add(self.user)\n db.session.commit()\n\n user = User.query.filter_by(user_name = \"john_doe\").first()\n\n self.assertTrue(user.verify_password(\"password\"))\n self.assertFalse(user.verify_password(\"pass\"))", "def passwordGen() :\n\treturn __randomString(12)", "async def test_change_password_invalid_user(\n hass: HomeAssistant, provider, capsys, hass_storage: dict[str, Any]\n) -> None:\n data = provider.data\n data.add_auth(\"test-user\", \"test-pass\")\n\n await script_auth.change_password(\n hass, provider, Mock(username=\"invalid-user\", new_password=\"new-pass\")\n )\n\n assert hass_auth.STORAGE_KEY not in hass_storage\n captured = capsys.readouterr()\n assert captured.out == \"User not found\\n\"\n data.validate_login(\"test-user\", \"test-pass\")\n with pytest.raises(hass_auth.InvalidAuth):\n data.validate_login(\"invalid-user\", \"new-pass\")", "def testLoginPassword(self):\n self.assertEquals(models.SUCCESS, self.users.add(\"userI\", \"password\"))\n self.assertEquals(models.ERR_BAD_CREDENTIALS, self.users.login(\"userI\", \"passw0rd\"))", "def test_usernamepassword(self):\n creds = userbase.Preauthenticated('foo@bar')\n self.assertTrue(\n verifyObject(IUsernamePassword, creds),\n \"Preauthenticated does not implement IUsernamePassword\")\n self.assertTrue(\n creds.checkPassword('random string'),\n \"Preauthenticated did not accept an arbitrary password.\")", "def test_auth_with_md5_stored_password(self):\n # Create test user\n name = u'Test User'\n password = '{MD5}$1$salt$etVYf53ma13QCiRbQOuRk/' # 12345\n self.createUser(name, password, True)\n\n # Try to \"login\"\n theuser = user.User(self.request, name=name, password='12345')\n assert theuser.valid" ]
[ "0.78173095", "0.7571552", "0.75606406", "0.75471747", "0.7364602", "0.7311012", "0.7281988", "0.7226423", "0.7189455", "0.7154671", "0.70966953", "0.70853573", "0.7078164", "0.70628875", "0.7048393", "0.7000666", "0.6985219", "0.69709235", "0.6954122", "0.69389814", "0.6925023", "0.69159347", "0.69137853", "0.6905204", "0.6877858", "0.6856007", "0.68529403", "0.68439907", "0.6825884", "0.682173", "0.6821187", "0.67978996", "0.67973304", "0.67972827", "0.679072", "0.6784711", "0.67714906", "0.6755357", "0.6754031", "0.6695967", "0.6679725", "0.666839", "0.66671443", "0.6664706", "0.6656557", "0.6656321", "0.6634659", "0.66310036", "0.6606027", "0.6600439", "0.6600121", "0.65928876", "0.6591874", "0.6588329", "0.6586615", "0.65743244", "0.65693545", "0.6567449", "0.65521055", "0.6551301", "0.65498763", "0.6548721", "0.6544639", "0.653677", "0.6534686", "0.6533204", "0.65309316", "0.6521595", "0.65168947", "0.6516455", "0.6513671", "0.6511457", "0.6509686", "0.6507565", "0.65041596", "0.64836204", "0.64724106", "0.6469249", "0.64679146", "0.6461819", "0.64593834", "0.64497274", "0.64422977", "0.6433587", "0.64284015", "0.6428256", "0.6425184", "0.6420581", "0.6420463", "0.6415866", "0.6410851", "0.6396957", "0.63954806", "0.6394753", "0.63939756", "0.63893104", "0.6388149", "0.63850707", "0.63818765", "0.6379726" ]
0.65491563
61
TEST IF THE AUTH IS REQUIRED
def test_retrieve_user_unauthorized(self): res = self.client.get(ME_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_auth():", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def requires_auth(self):\n return True", "def check_auth(username, password):\n # return username == app.config['USER'] and password == app.config['PASS']\n\n return username == app.config['USER'] and password == app.config['PASS']", "def check_auth(username, password):\n return get_ct_object(username, password) is not None", "def check_auth(username, password, expected_user, expected_pw):\n return username == expected_user and password == expected_pw", "def check_auth(username, password):\n return username == app.config['USERNAME'] and (\n password == app.config['PASSWORD'])", "def check_auth(username, password):\n return (username == app.config['USERNAME'] and\n password == app.config['PASSWORD'])", "def check_auth(_, http_password):\n return (password is not None) and (password == http_password)", "def check_auth(username, password):\n return username == USERNAME and password == PASSWORD", "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "def check_auth(username, password):\n return username == 'admin' and password == 'secret'", "def check_auth(username, password):\n return username == os.environ['USERNAME'] and password == os.environ['PASSWORD']", "def check_auth(username, password):\n return username == 'sammy' and password == 'BasicPassword!'", "def check_auth(username, password):\n return username == 'admin' and password == 'Passw0rd'", "def check_auth(username, password):\n return username == current_app.config['DOC_USERNAME'] and password == current_app.config['DOC_PASSWORD']", "def check_auth(username, password):\n return username == 'admin' and password == 'pebble'", "def check_auth(username, password):\n return username == 'admin' and password == 'password'", "def check_auth(username, password):\n return username == 'daniel' and password == config['redis_auth_key']", "def auth(username, password):\n return username == password", "def check_auth(username, password):\n return username == 'admin' and password == 'worcester'", "def check_auth(username, password):\n return username == 'jeffkoons' and password == 'likesweirdbaloons'", "def check_auth(username, password):\n return username == 'asimov' and password == 'tagada72'", "def check_auth(username, password):\n return username == 'admin' and password == 'admin'", "def check_auth(username, password):\r\n return username == current_app.config['ADMIN_USERNAME'] \\\r\n and password == current_app.config['ADMIN_PASSWORD']", "def check_auth(username, password):\n return password == os.getenv('PASSWORD')", "def check_auth(username, password):\n return username == 'aweber' and password == 'aweber1100'", "def check_auth(username, password):\n return username == 'admin' and password == 'root'", "def check_auth(username, password):\n return username == get_env('UPLOAD_USER') and password == get_env('UPLOAD_PASSWORD')", "def check_auth(username, password):\n return username == 'nicholas' and password == ADMIN_PASS", "def test_auth_required(self):\n\n res = self.client.get(QUIZZES_URL)\n\n self.assertTrue(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_required(self):\n res = self.client.get(RECIPE_URL)\n self.assertEqual(res.status_code,status.HTTP_401_UNAUTHORIZED)", "def test_auth_required(self):\n\n res = self.client.get(SERVICES_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def check_auth(self):\n if self.enterprise_url is not None:\n return True\n try:\n if self.api is not None:\n # Throws AuthenticationFailed if invalid credentials but\n # does not deduct from the rate limit.\n self.api.ratelimit_remaining\n return True\n else:\n self.print_auth_error()\n except AuthenticationFailed:\n self.print_auth_error()\n return False", "def check_auth(username, password):\n return username == c.id and password == c.pw", "def _has_auth(creds: Dict[str, str]) -> bool:\n if creds.get(\"user\") in [None, \"\"] or creds.get(\"passwd\") in [None, \"\"]:\n warnings.warn(\"Credentials were not supplied. Public data access only.\", NoAuthWarning)\n return False\n return True", "def check_authentication():\r\n\r\n #TODO: Reservation based authentication\r\n try:\r\n authenticated_user()\r\n except Exception as e:\r\n return e\r\n\r\n return True", "def test_auth_required(self):\n res = self.client.get(RECIPE_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_required(self):\n res = self.client.get(RECIPE_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def _has_auth_details(self) -> bool:\n\n return all([self.secret is not None, self.api_key is not None])", "def test_authentication_required(self):\n self.auth.validate_token_request(self.request)\n self.mock_validator.client_authentication_required.assert_called_once_with(self.request)", "def check_auth(username, password):\n return username == 'admin' and password in app.config[\"CLAIM_SECRETS\"]", "def test_auth_required(self):\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def is_frozensand_auth_available(self):\n cvar = self.getCvar('auth')\n if cvar:\n auth = cvar.getInt()\n return auth != 0\n else:\n return False", "def auth_isok(self):\n # pylint: disable=W0603\n global KEY\n return_value = False\n if KEY is None:\n return_value = True\n elif self.headers.get('Authorization') == 'Basic ' + KEY:\n return_value = True\n return return_value", "def has_credentials(self):\n return self.username and self.password and self.url and self.xml_rpc", "def test_loggin_required(self):\n response = self.client.get(RESGATE_URL)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_success(self):\n self.assertEqual(Freenas(hostname)._user, 'root')", "def test_auth(self):\n options = Options()\n options.parseOptions([\"--auth\", \"memory:admin:admin:bob:password\"])\n self.assertEqual(len(options[\"credCheckers\"]), 1)\n checker = options[\"credCheckers\"][0]\n interfaces = checker.credentialInterfaces\n registered_checkers = options.service.smtpPortal.checkers\n for iface in interfaces:\n self.assertEqual(checker, registered_checkers[iface])", "def _check_auth(self, group_id):\n return", "def test_auth_required(self):\n res = self.client.get(RECIPES_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def check_auth(username, password):\n try:\n locust_username = os.environ['LOCUST_USER_NAME']\n locust_password = os.environ['LOCUST_PASSWORD']\n return username == locust_username and password == locust_password\n except:\n return True", "def check_auth(username, password):\n ADMIN_USER = config.CONFIG_VARS['ADMIN_USER']\n ADMIN_PASS = config.CONFIG_VARS['ADMIN_PASS']\n return username == ADMIN_USER and password == ADMIN_PASS", "def get_authorization():\n return True", "def test_login_required(self):\n res = self.client.get(RETETA_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def check_auth(username, password):\n session.pop('username', None)\n session.pop('password', None)\n session['username'] = username\n session['password'] = password\n # Test if we can connect to a region\n connect_to_region()\n return True", "def auth(self, username, password):\n return False", "def check_auth(self):\n if self.type_of_auth == BboxConstant.AUTHENTICATION_TYPE_LOCAL:\n access_level_required = self.get_auth_access_needed_for_local()\n else:\n access_level_required = self.get_auth_access_needed_for_remote()\n\n if access_level_required == BboxConstant.AUTHENTICATION_LEVEL_NONE:\n return False\n elif access_level_required == BboxConstant.AUTHENTICATION_LEVEL_PRIVATE:\n return self.is_authentified()\n elif access_level_required == BboxConstant.AUTHENTICATION_LEVEL_PUBLIC:\n return True", "def test_authentication_is_not_required(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_auth_required_recipe(self):\n res = self.client.get(RECIPE_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def noauth(self):\n try:\n # some endpoints dont return json\n return self.json['response'].get('error_id') == 'NOAUTH'\n except:\n return False", "def check_auth(username, password):\n\n config = get_app_configurations()\n\n with open(config[\"credentials\"], \"r\") as fh:\n u, p = fh.readline().rstrip().split(\",\")\n\n return username == u and password == p", "def auth():\n pass", "def auth():\n pass", "def test_validate_credentials(self):\n pass", "def test_login_required(self):\n res = self.client.get(INGREDIENTS_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_login_required(self):\n res = self.client.get(RECIPE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_read_o_auth_client_authorization(self):\n pass", "def test_login_required(self):\n res = self.client.get(INGREDIENT_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_login_required(self):\n res = self.client.get(RAINGAUGEREADING_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_login_requires(self):\n res = self.client.get(VEHICLE_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def is_token_required(self):\n return any([self.app_id, self._login, self._password])", "def auth(self):\n ok = False\n if self.private_token:\n ok = self.token_auth()\n if not ok:\n self.credentials_auth()", "def check_user_and_login(self) -> Response:\n pass", "def test_login_required(self):\n\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_login_required(self):\n\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_required_auth(self):\n res = self.client.get(MOVIES_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_can_login(self):\n user = authenticate(username='jack', password='secret')\n self.assertTrue(user is not None)\n self.assertTrue(user.is_authenticated)", "def check_auth(cls, Configuration):\n if not Configuration.auth_token:\n cls.authorize(Configuration)", "def credentials_work(self):\n good = True\n try:\n self.session.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good", "def test_valid(self):\n auth_tuple = imageroller.main.read_authconfig(\n imageroller.test.get_config_parser(self._valid))\n self.assertTupleEqual(auth_tuple, (AUTH_DATA[\"ApiUser\"],\n AUTH_DATA[\"ApiKey\"]))", "def test_login_required():\n pass", "def check_auth_none(self, username):\n return AUTH_FAILED", "def test_authentication_success():\n d = Dexcom(USERNAME, PASSWORD)\n d._validate_account()\n d._validate_session_id()", "def check_auth_interactive(self, username, submethods):\n return AUTH_FAILED", "def test_valid_login(self):\n self.assertTrue(self.session.authenticate('test@test.com', 'supersecret'))", "def credentials_work(self):\n\n good = True\n try:\n self.authenticate_client()\n except cloudpassage.CloudPassageAuthentication:\n good = False\n return good", "def test_auth_instance(self):\n self.assertIsInstance(Freenas(hostname), Freenas)\n\n\n\n\n #self.assertEqual(freenas.Freenas(hostname).request('auth/check_user',\n # method='POST', data={'username': 'api',\n # 'password': 'api'\n # }\n # ), True)", "def test_ApiWillAuthenticate_ValidCredentials_Successfully(self):\n api = Api(self.userId, self.testToken)\n self.assertTrue(api.connected())", "def _require_login(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + str(self.token))", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def test_not_auth(self):\n rv = self.get('/queue/')\n self.assertJSONError(rv, 'TagalleryMissingLoginInformation')\n return", "def _is_valid(self):\n # TODO: Query Google to validate credentials\n return True", "def check_credentials():\n\n required_variables = ('OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD')\n\n logging.debug(\"checking openstack auth environment variables\")\n ok = True\n for var in required_variables:\n if not var in os.environ:\n logging.warning(\"missing required environment variable: {}\".format(var))\n ok = False\n else:\n logging.debug(\"OpenStack Auth Var: {} = {}\".format(var, os.environ[var]))\n\n return ok", "def user_must_authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def test_user_can_login(self):\n user = authenticate(username='Marry', password='secret')\n self.assertFalse(user is None)\n self.assertTrue(user.is_authenticated)", "def test_auth_json(self):\n\n config = get_config()\n\n if config.getboolean('auth_test', 'enabled'):\n\n # Run only if enabled\n\n try:\n\n timestamp = config.getint('auth_test', 'timestamp')\n\n except ValueError:\n\n # If timestamp is set to a none-integer, we'll just assume\n # that it's unset\n\n timestamp = None\n\n response = authenticate(\n config.get('auth_test', 'url'),\n config.get('auth_test', 'account'),\n config.get('auth_test', 'preauthkey'),\n config.get('auth_test', 'account_by'),\n config.getint('auth_test', 'expires'),\n timestamp,\n request_type='json'\n )\n\n self.assertNotEqual(\n response,\n None,\n \"Authentication with the configured settings \"\n \"was not successful\"\n )", "def valid_credentials(self):\n path = '/api/session-user'\n url = '{}{}'.format(self._url_base, path)\n response, content = super(DSBaseService, self)._request(url,\n headers=self._headers(with_content_type=False))\n return int(response['status']) == 200", "def test_login_required(self):\n\n res = self.client.get(MODULES_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)" ]
[ "0.84601325", "0.7771871", "0.77694154", "0.76264864", "0.7594435", "0.75779384", "0.7522689", "0.75105244", "0.7504356", "0.7393126", "0.7392858", "0.7392858", "0.7383449", "0.7368314", "0.7348574", "0.7347608", "0.7333207", "0.7320931", "0.7320824", "0.73014426", "0.72929215", "0.72852015", "0.7273246", "0.72719854", "0.72673655", "0.7254416", "0.72434497", "0.7242672", "0.72363526", "0.71940273", "0.71787363", "0.7155589", "0.71454424", "0.71236354", "0.71233237", "0.7091727", "0.70757616", "0.705322", "0.705322", "0.70497906", "0.7043884", "0.70247716", "0.70144665", "0.7005081", "0.69629747", "0.69450223", "0.69438416", "0.69277376", "0.6885317", "0.6876948", "0.6874234", "0.68533975", "0.68379307", "0.6822886", "0.6817538", "0.68172264", "0.6817187", "0.68161505", "0.68095195", "0.6791503", "0.6789634", "0.6789634", "0.67709386", "0.6767024", "0.6753422", "0.6753422", "0.67103726", "0.66876537", "0.6675152", "0.6663001", "0.6658999", "0.66503054", "0.66284704", "0.66284084", "0.66231084", "0.6613297", "0.6613046", "0.6613046", "0.6609254", "0.660348", "0.66030157", "0.66022867", "0.6596435", "0.6561738", "0.65603817", "0.6548274", "0.65290755", "0.6527375", "0.65265805", "0.65259314", "0.6512563", "0.6510426", "0.6493202", "0.6483842", "0.64829165", "0.6477959", "0.6469834", "0.6463984", "0.6463079", "0.6458377", "0.6452794" ]
0.0
-1
TEST UPDATE FOR AUTHENTICATED USER
def test_update_user_profile(self): payload = {"name": "Lucifer", 'password': "12346987"} res = self.client.patch(ME_URL, payload) self.user.refresh_from_db() self.assertEqual(self.user.name, payload['name']) self.assertTrue(self.user.check_password(payload['password'])) self.assertEqual(res.status_code, status.HTTP_200_OK)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_user(self):\n pass", "def test_user_update_request(self):\n pass", "def test_update(self):\n user = self.custodian_1_user\n user_client = self.custodian_1_client\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n new_first_name = \"New First Name\"\n data = {\n \"first_name\": new_first_name,\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_2_client],\n \"allowed\": [self.admin_client, user_client]\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.patch(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n new_first_name += '1'\n data['first_name'] = new_first_name\n self.assertEqual(\n client.patch(url, data, format='json').status_code,\n status.HTTP_200_OK\n )\n user.refresh_from_db()\n self.assertEqual(user.first_name, new_first_name)", "def test_patch_user(self):\n pass", "def test_user_is_really_updated():\n response = api_helper.get_user(user_id=pytest.test_user.id)\n check_user_data_in_response(response.json()[\"data\"][0])", "def test_040_update_user(self):\n\n testflow.step(\"Updating user %s\", TEST_USER2)\n assert USER_CLI.run(\n 'edit',\n TEST_USER2,\n attribute='firstName=userX2',\n )[0]", "def test_update_user(self):\n\n update_dict = dict(\n username='test_another_username',\n role='test_new_role',\n department='test_new_department'\n )\n\n # Update non-existing user\n updated = self.user_api.update_user(MAGEN_USER['user_uuid'], update_dict)\n self.assertTrue(updated.success)\n self.assertEqual(updated.count, 0)\n\n # Insert user in Database\n inserted = self.user_api.insert_user(MAGEN_USER)\n self.assertTrue(inserted.success)\n\n # Update existing user\n updated = self.user_api.update_user(MAGEN_USER['user_uuid'], update_dict)\n self.assertTrue(updated.success)\n self.assertEqual(updated.count, 1)\n # Verify that data was updated\n selected = self.user_api.get_user(MAGEN_USER['user_uuid'])\n self.assertTrue(selected.success)\n self.assertEqual(selected.documents['username'], update_dict['username'])\n self.assertEqual(selected.documents['role'], update_dict['role'])\n self.assertEqual(selected.documents['department'], update_dict['department'])", "def test_update_the_created_user():\n pytest.test_user.name += \"Updated\"\n response = api_helper.update_user(pytest.test_user)\n assert response.status_code == 200", "def test_update(self):\n\n user = CustomUser.objects.get(email=\"test@test.test\")\n user.update(first_name=\"UpdatedName\", second_name=\"UpdatedSecondName\")\n\n self.assertEqual(user.first_name, \"UpdatedName\")\n self.assertEqual(user.second_name, \"UpdatedSecondName\")", "def test_update_user_profile(self):\n\n new_credentials = {'name': 'New Name', 'password': 'NewTestpass12'}\n response = self.client.patch(URL_ME, new_credentials)\n\n # Refresh the details of the user from the database.\n self.user.refresh_from_db()\n\n # Check that the update is successful.\n self.assertEqual(self.user.name, new_credentials['name'])\n self.assertTrue(self.user.check_password(new_credentials['password']))\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_update_user_profile(setup_client, setup_user):\n client = setup_client\n user = setup_user\n payload = {\n \"name\": \"New name\",\n \"role\": \"Purchaser\",\n \"password\": \"New password\"\n }\n res = client.patch(ME_URL, payload)\n user.refresh_from_db()\n assert res.status_code == status.HTTP_200_OK\n assert user.name == payload[\"name\"]\n assert user.role == payload[\"role\"]\n assert user.check_password(payload[\"password\"])\n assert res.status_code == status.HTTP_200_OK", "def test_admin_update_user(self):\n resp = self.admin_create_user()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Lover',\n username='lover',\n password='Andela8',\n role='attendant'\n )\n resp = self.client.put(\n '/api/v1/users/2',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'user updated!')\n self.assertEqual(resp.status_code, 200)", "def test_update_user_success(self):\n url = '{0}/'.format(self.path, self.user1['password'])\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data['id'], models.User.get['id'])", "def update_user():", "def test_update_user_profile(self):\n payload = {'name': 'Test name', 'password': 'new_password'}\n res = self.client.patch(ME_URL, payload)\n\n self.user.refresh_from_db()\n self.assertEqual(self.user.name, payload['name'])\n self.assertTrue(self.user.check_password(payload['password']))\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_update_user_profile(self):\n payload = {'name': 'new name', 'password': 'newpassword123'}\n\n res = self.client.patch(ME_URL, payload)\n\n self.user.refresh_from_db()\n self.assertEqual(self.user.name, payload['name'])\n self.assertTrue(self.user.check_password(payload['password']))\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_user_update(self):\n userPK = self.testUser.pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_update_user_profile(self):\n payload = {'name': 'new name', 'password': 'newpassword123'}\n\n response = self.client.patch(ME_URL, payload)\n\n self.user.refresh_from_db()\n self.assertEqual(self.user.name, payload['name'])\n self.assertTrue(self.user.check_password(payload['password']))\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_update_user(self):\n token = self.authenticate_user(self.auth_user_data).data[\"token\"]\n response = self.client.put(self.user_url,\n self.user_data,\n HTTP_AUTHORIZATION=f'token {token}',\n format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_update_profile_success(self):\n payload = {\n 'email': 'newtest@gmail.com',\n 'password': 'newpassword'\n }\n res = self.client.patch(ME_URL, payload)\n\n # Refresh the user object with latest values from db\n self.user.refresh_from_db()\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(self.user.email, payload['email'])\n self.assertTrue(self.user.check_password(payload['password']))", "def test_update_user_profile(self):\r\n payload = {\r\n 'name': 'new_name',\r\n 'password': 'password123'\r\n }\r\n\r\n res = self.client.patch(ME_URL, payload)\r\n\r\n self.user.refresh_from_db()\r\n\r\n self.assertEqual(self.user.name, payload['name'])\r\n self.assertTrue(self.user.check_password(payload['password']))\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_user_update_permissions(self):\n userPK = User.objects.get(username='c2e1').pk\n url = reverse('User-detail', kwargs={'pk': userPK})\n data = {'username': 'company1NewTest'}\n response = self.client.put(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest')\n data = {'username': 'company1NewTest2'}\n response = self.client.patch(url, data, format='json')\n #This is 404 instead of 403 because there is no way to view a company\n #that you arent an employee of.\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n self.assertNotEqual(User.objects.get(pk=userPK).username,\n 'company1NewTest2')", "def test_update(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.update(TOOLNAME,username,userpass)", "def test_update_another_user(self):\n user1_response = self.client.post(reverse('user-list'), {\n 'username': 'aseem', 'password': 'passwrodaosida123'\n })\n update_user_resp = self.client.patch(\n reverse('user-list') + '1/', {\n 'username': 'rakesh', 'password': 'passwrodaosida123'\n })\n\n self.assertEqual(update_user_resp.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_update(self, commit, expected, db):\n user = ExampleUserModel(username=\"foo\", email=\"foo@bar.com\")\n user.save()\n user.update(commit=commit, username=\"bar\")\n retrieved = db.session.execute(\"\"\"select * from testusers\"\"\").fetchone()\n assert retrieved.username == expected", "def test_update(self, client, users):\n user = users[0]\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:update', args=(user.pk,))\n response = client.post(url, data)\n assert response.status_code == 302\n assert response.url == reverse('users:list')\n\n user.refresh_from_db()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()", "def test_update_password(self):\n\n sync = SyncUserAndGroups(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n auag = UsersAndGroups()\n auag.add_user(\n User(name=\"userx\", mail=\"userx@email.addr\", display_name=\"User X\", password=\"password1\")\n )\n # sync updates\n sync.sync_users_and_groups(users_and_groups=auag)\n sync.update_user_password(\n userid=\"userx\", currentpassword=TS_PASSWORD, password=\"password2\"\n )", "def update_user():\n #TODO user update \n pass", "def test_user_update(self):\n self.client.login(username=self.teacher.username,\n password='1234')\n post = {'email': 'teacher_updated@test.com', 'first_name': 'Tim',\n 'last_name': 'Teacher'}\n response = self.client.post(self.update_url, post)\n updated_teacher = SchoolUser.objects.get(\n username=self.teacher.username)\n self.assertEqual(updated_teacher.email, post['email'])", "def test_update_account_user(self):\n self._require_login()\n\n response = self.client.put('/v1/users/' +str(self.user.id)+'/',\n {\"username\": 'toni@malucao', \"password\": 'cidadeeee'},\n format='json')\n\n self.assertEqual(response.status_code, 200,\n 'Expected Response Code 200, received {0} instead.'.format(response.status_code))", "def test_update_user(self):\n\n client = app.test_client()\n\n response = client.post(\n \"/user/signup/\",\n data=json.dumps(\n dict(\n username=\"admin\",\n password=\"admin\",\n email=\"admin@test.com\",\n role=\"MANAGER\",\n )\n ),\n content_type=\"application/json\",\n )\n\n assert response.status_code == 200\n\n response = client.post(\n \"/user/signup/\",\n data=json.dumps(\n dict(\n username=\"user1\",\n password=\"passwd1\",\n email=\"user1@test.com\",\n role=\"CUSTOMER\",\n )\n ),\n content_type=\"application/json\",\n )\n\n assert response.status_code == 200\n\n # The manager should be able to update the CUSTOMER\n headers = {\n \"Authorization\": \"Basic %s\"\n % b64encode(b\"admin@test.com:admin\").decode(\"ascii\")\n }\n response_update = client.put(\n \"/user/update/\",\n data=json.dumps(dict(email=\"user1@test.com\", data={\"role\": \"STAFF\"})),\n headers=headers,\n content_type=\"application/json\",\n )\n\n assert response_update.status_code == 200\n assert response_update.get_data().decode(\"utf-8\") == \"<h2>updated the data</h2>\"", "def test_updateall():\n url = baseUrl + userurl + emailId\n payload = {'firstName': new_firstName, 'lastName': new_lastName, 'emailId': new_emailId}\n logging.info(\"Update a user's firstName to: %s, lastName to: %s and emailId to: %s\" % (new_firstName, new_lastName, new_emailId))\n r = requests.put(url, data=json.dumps(payload), headers=header)\n assert r.status_code == 200\n resp = r.json()\n assert resp[\"userName\"] == emailId and resp[\"lastName\"] == new_lastName and resp[\"firstName\"] == new_firstName \\\n and resp[\"licenseType\"] == licensetype and resp[\"subscriptionIds\"][0] == subscriptionid and \\\n resp[\"isActive\"] is True and resp[\"source\"] == \"publicapi\" and resp[\"emailId\"] == new_emailId\n global user_id\n user_id = resp[\"id\"]\n assert user_id is not None", "def test_replace_user(self):\n pass", "def test_update_profile_valid_put(self):\n update_user = {\n 'email': 'new@email.com',\n 'password': 'NewPassword!',\n }\n res = self.client.put(ME_URL, update_user)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n self.user.refresh_from_db()\n\n self.assertTrue(self.user.check_password(update_user['password']))\n self.assertEquals(self.user.email, update_user['email'])\n self.assertTrue(self.user.name)", "def test_update_useruser_uuid_put(self):\n pass", "def test_can_update_user_profile(self):\n self.update_user()\n self.assertEqual(self.user.first_name, self.updated_data['first_name'])\n self.assertEqual(self.user.last_name, self.updated_data['last_name'])\n self.assertEqual(self.user.email, self.updated_data['email'])", "def test_update(sqlite_db):\n updated_pass = \"TheUpdatedPassword\"\n site = \"www.example.com\"\n response = smm.update_passwd(site, updated_pass)\n assert response\n assert smm.read_passwd(site) == updated_pass\n bad_response = smm.update_passwd(\"NotASite\", updated_pass)\n assert not bad_response", "def test_update_self_fail(self):\n new_user = self.create_user('1')\n url = '/0/chefs/' + str(new_user.pk)\n\n headers = self.login()\n resp = self.client.put(url, **headers)\n self.assertInvalidCredentials(resp)", "def test_update_author_logged(self):\n self.client.force_authenticate(user=self.user)\n\n data = {'name': 'Ken Thompson'}\n\n request = self.client.patch(self.epoint, data)\n\n self.assertEqual(request.status_code, status.HTTP_200_OK)", "def test_patch_user(self):\n new_user = self.make_user('new_user')\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n patch_data = {'user': str(new_user.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_modify_user(self):\n print('(' + self.test_modify_user.__name__+')',\n self.test_modify_user.__doc__)\n # modify the user with provided user dict\n modify_resp = self.connection.modify_user(\n PATIENT_USERNAME, MODIFIED_PATIENT['public_profile'],\n MODIFIED_PATIENT['restricted_profile'])\n self.assertEqual(modify_resp, PATIENT_USERNAME)\n # check each value in the profile with the modified one, see if modification successful\n # get the get_user response\n get_resp = self.connection.get_user(PATIENT_USERNAME)\n resp_r_profile = get_resp['restricted_profile']\n r_profile = MODIFIED_PATIENT['restricted_profile']\n self.assertEqual(\n r_profile['user_id'], resp_r_profile['user_id'])\n self.assertEqual(r_profile['firstname'], resp_r_profile['firstname'])\n self.assertEqual(r_profile['lastname'], resp_r_profile['lastname'])\n self.assertEqual(r_profile['work_address'],\n resp_r_profile['work_address'])\n self.assertEqual(r_profile['gender'], resp_r_profile['gender'])\n self.assertEqual(r_profile['age'], resp_r_profile['age'])\n self.assertEqual(r_profile['email'], resp_r_profile['email'])\n self.assertDictContainsSubset(get_resp, MODIFIED_PATIENT)", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_users_update(mocker):\r\n mocker.patch('subprocess.call')\r\n users.update(user_dict)\r\n subprocess.call.assert_called_with([\r\n 'usermod',\r\n '-p',\r\n password,\r\n '-G',\r\n 'wheel,dev',\r\n 'kevin',\r\n ])", "def test_update_profile_valid_patch(self):\n update_fields = {\n 'password': 'NewPassword!',\n 'name': 'Mona Lisa'\n }\n res = self.client.patch(ME_URL, update_fields)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n self.user.refresh_from_db()\n\n self.assertTrue(self.user.check_password(update_fields['password']))\n self.assertEquals(self.user.name, update_fields['name'])", "def test_put_change_user(self):\n new_user = self.make_user('new_user')\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n put_data = {\n 'role': PROJECT_ROLE_GUEST,\n 'user': str(new_user.sodar_uuid),\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_update_case(self):\n pass", "def test_users_update(mocker):\n mocker.patch('subprocess.call')\n users.update(user_dict)\n subprocess.call.assert_called_with([\n 'usermod',\n '-p',\n password,\n '-G',\n 'wheel,dev',\n 'kevin',\n ])", "def test_post_update_regular_user(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.user)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update(client):\n rv = update(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def test_updating_user_information_including_password(self):\n user_password = self.user.password\n user_update_data = {\"username\": \"bobby\", \"email\": \"bobby@email.com\",\n \"password\": \"otherpassword\"}\n updated_user = self.serializer.update(self.user, user_update_data)\n self.assertEqual(updated_user.username, \"bobby\")\n self.assertEqual(updated_user.email, \"bobby@email.com\")\n self.assertNotEqual(updated_user.password, user_password)", "def test_user_update(self):\n update_data = {\n \"username\": \"testnotUser\",\n \"email\": \"testnotuser@gmail.com\",\n \"first_name\": \"Test\",\n \"last_name\": \"User\",\n \"profile\": {\n \"user\": 1,\n \"contact_number\": \"9860476499\",\n \"address\": \"kapan\",\n \"education\": self.education,\n },\n }\n # files = {'media': open('accounts/tests/1.png', 'rb')}\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.get_token())\n response = self.client.put(reverse(\"account:user-update\"), update_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['username'], \"testnotUser\")\n self.assertNotEqual(response.data['username'], \"testUser\")", "def test_update_community_works_for_group_admins(self):\n get_response = lambda: self.client.put(self.url, self.update_payload)\n\n self.assert_authentication_required(get_response)\n self.assert_membership_required(get_response)\n self.assert_group_admin_rights_required(get_response)\n\n self.assertNotEqual(Community.objects.get(name=self.GROUP).password, self.new_password)\n\n # bob is group admin, he can update the data:\n self.login_as(\"bob\")\n with self.assertNumQueries(6): \n # (3) is admin check (4) select obj (5) new obj unique check (6) update \n response = get_response()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(list(response.data.keys()), self.expected_keys)\n self.assertEqual(response.data[\"password\"], self.new_password)\n\n self.assertEqual(Community.objects.get(name=self.GROUP).password, self.new_password)", "def test_update_user_endpoint(self, **kwargs):\n print(\"Create a new user\")\n kwargs['return_response_obj'] = True\n response = self.test_create_user_endpoint(**kwargs)\n response = json.loads(response.text)\n\n print(\"Capture Authorization token\")\n token_type = response[\"data\"][\"token\"][\"token_type\"]\n access_token = response[\"data\"][\"token\"][\"access_token\"]\n headers = {\"Content-Type\": \"application/json\", \"Authorization\": \"{0} {1}\".format(token_type, access_token)}\n kwargs['headers'] = headers\n\n print(\"Update the User\")\n custom_data = Workflows.update_user_details(test_args=self.test_args, **kwargs)\n kwargs[\"data\"] = {\"user\": custom_data}\n\n restapi = Rest(base_uri=self.global_config[\"base_url\"])\n response = restapi.put(relative_url=self.test_args[\"relative_url\"], **kwargs)\n\n if kwargs.get(\"return_response_obj\", False):\n return response\n\n print(\"Verify Response body\")\n assert json.loads(response.text)[\"message\"] == self.test_args[\"expected_result\"], \"Test Failed\"\n return None", "def test_valid_update_user_password(self):\n\n data = {\n 'password': 'pedro123456',\n 'new_password': 'pedro123456789',\n 'confirm_password': 'pedro123456789'\n }\n response = self.client.put(self.url, data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_login_update_ok(testapp):\n testapp.post('/login',\n params={'Username': 'amos',\n 'Password': 'password'})\n resp = testapp.get('/journal/1/edit-entry')\n assert resp.status_code == 200", "def test_set_user_status(self):\n pass", "def test_tenant_user_aesthetic_update(sample_identity):\n access_token, tenant, tenant_user, tc = sample_identity\n headers = {\"Authorization\": \"Bearer \" + access_token}\n new_email = f\"{uuid.uuid4()}@c1.com\"\n new_first_name = str(uuid.uuid4())\n new_last_name = str(uuid.uuid4())\n updated_tenant_user = {\"first_name\": new_first_name, \"last_name\": new_last_name}\n update_request = tc.put(\n f\"api/v1/identity/tenant-user/{tenant_user.id}\",\n json=updated_tenant_user,\n headers=headers,\n )\n assert update_request.status_code == 200, \"Update Failed with non 200 error code\"\n assert update_request.json[\"data\"][\"first_name\"] == new_first_name\n assert update_request.json[\"data\"][\"last_name\"] == new_last_name", "def update(self, user: U) -> None:\n ...", "def test_api_user_put(self):\n pass", "def test_mod_combined(self, mapp, existing_user_id, url_of_liveserver):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n email_address = existing_user_id + '_' + str(id(self)) + \"@devpi.net\"\n mapp.modify_user(user=existing_user_id, password=id(self),\n email=email_address)\n\n # Verify that the email was changed.\n json = mapp.getjson(url_of_liveserver)\n assert json['result'][existing_user_id]['email'] == email_address\n\n # Verify that the password was indeed changed.\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\", code=401)\n mapp.login(user=existing_user_id, password=id(self))", "def test_update_other_fields(auth_client):\n account_ids = prep_database(auth_client.sqla)\n\n # For each of the accounts, grab the current value of the \"other\" fields.\n expected_by_id = {}\n for account_id in account_ids:\n current_account = auth_client.sqla.query(Account).filter_by(id=account_id).first()\n expected_by_id[account_id] = {\n 'username': current_account.username,\n 'active': current_account.active\n }\n\n for account_id in account_ids:\n payload = {}\n\n if flip():\n # Randomly update the username.\n new_username = username_factory()\n expected_by_id[account_id]['username'] = new_username\n payload['username'] = new_username\n if flip():\n # Randomly update the active flag.\n new_active = flip()\n expected_by_id[account_id]['active'] = new_active\n payload['active'] = new_active\n\n # At this point, we'll have constructed a payload that might have zero of more\n # of the fields. This lets us test various combinations of update requests.\n # The expected_by_id dictionary stores the values we expect to see in the database,\n # whether the original value retrieve earlier or the newly updated on just\n # created.\n\n # It's possible that none of the fields will have been selected for update,\n # which doesn't make much sense, but we'll still test for that possibility.\n\n resp = auth_client.patch(url_for('people.update_account', account_id=account_id), json=payload)\n assert resp.status_code == 200\n\n for account_id in account_ids:\n updated_account = auth_client.sqla.query(Account).filter_by(id=account_id).first()\n assert updated_account is not None\n assert updated_account.username == expected_by_id[account_id]['username']\n assert updated_account.active == expected_by_id[account_id]['active']", "def test_update(self):\n tz = pytz.timezone(settings.TIME_ZONE)\n self.assertFalse(self.user1.o365_licence)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {\n 'Surname': 'Lebowski',\n 'title': 'Bean Counter',\n 'o365_licence': True,\n\n 'email' : 'l@example.com' ,\n 'name' : 'Mike' ,\n 'username' : 'MikeLebowski' ,\n 'ad_guid' : '123',\n 'expiry_date' : '2019-03-12',\n 'given_name' : 'Mike',\n #'Enabled' :'True',\n 'active' : True,\n 'deleted' : False,\n\n\n\n }\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertEqual(user.surname, data['Surname'])\n self.assertEqual(user.title, data['title'])\n\n self.assertEqual(user.name , data['name'])\n self.assertEqual(user.email, data['email'])\n self.assertEqual(user.username, data['username'])\n\n #self.assertEqual(user.expiry_date, data['expiry_date'])\n\n self.assertEqual(user.ad_guid, data['ad_guid'])\n\n self.assertEqual(user.expiry_date, tz.localize(parse(data['expiry_date'])))\n\n self.assertEqual(user.given_name, data['given_name'])\n #self.assertEqual(user.active, data['Enabled'])\n self.assertEqual(user.active, data['active'])\n self.assertEqual(user.ad_deleted, data['deleted'])\n\n self.assertTrue(user.o365_licence)\n self.assertTrue(user.in_sync)", "def test_mod_email(self, mapp, existing_user_id, url_of_liveserver):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n email_address = existing_user_id + '_' + str(id(self)) + \"@devpi.net\"\n mapp.modify_user(user=existing_user_id, email=email_address)\n # Verify that the email was indeed changed.\n json = mapp.getjson(url_of_liveserver)\n assert json['result'][existing_user_id]['email'] == email_address", "def test_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_update_client_registration():\n\n client_request.user.username = \"test123456\"\n client.update_client_registration(client_request, presenter)\n\n new_client = presenter.get_client_view_model()\n\n assert new_client.user.username == \"test123456\"", "def test_05_update_user_profile(self):\r\n\r\n\r\n # Create an account and log in\r\n self.register()\r\n url = \"/account/fake/update\"\r\n res = self.app.get(url, follow_redirects=True)\r\n assert res.status_code == 404, res.status_code\r\n\r\n # Update profile with new data\r\n res = self.update_profile(method=\"GET\")\r\n msg = \"Update your profile: %s\" % self.user.fullname\r\n assert self.html_title(msg) in res.data, res.data\r\n msg = 'input id=\"id\" name=\"id\" type=\"hidden\" value=\"1\"'\r\n assert msg in res.data, res\r\n assert self.user.fullname in res.data, res\r\n assert \"Save the changes\" in res.data, res\r\n msg = '<a href=\"/account/johndoe/update\" class=\"btn\">Cancel</a>'\r\n assert msg in res.data, res.data\r\n\r\n res = self.update_profile(fullname=\"John Doe 2\",\r\n email_addr=\"johndoe2@example\",\r\n locale=\"en\")\r\n assert \"Please correct the errors\" in res.data, res.data\r\n\r\n\r\n res = self.update_profile(fullname=\"John Doe 2\",\r\n email_addr=\"johndoe2@example.com\",\r\n locale=\"en\")\r\n title = \"Update your profile: John Doe 2\"\r\n assert self.html_title(title) in res.data, res.data\r\n assert \"Your profile has been updated!\" in res.data, res.data\r\n assert \"John Doe 2\" in res.data, res\r\n assert \"johndoe\" in res.data, res\r\n assert \"johndoe2@example.com\" in res.data, res\r\n\r\n # Updating the username field forces the user to re-log in\r\n res = self.update_profile(fullname=\"John Doe 2\",\r\n email_addr=\"johndoe2@example.com\",\r\n locale=\"en\",\r\n new_name=\"johndoe2\")\r\n assert \"Your profile has been updated!\" in res.data, res\r\n assert \"Please sign in\" in res.data, res.data\r\n\r\n res = self.signin(method=\"POST\", email=\"johndoe2@example.com\",\r\n password=\"p4ssw0rd\",\r\n next=\"%2Faccount%2Fprofile\")\r\n assert \"Welcome back John Doe 2\" in res.data, res.data\r\n assert \"John Doe 2\" in res.data, res\r\n assert \"johndoe2\" in res.data, res\r\n assert \"johndoe2@example.com\" in res.data, res\r\n\r\n res = self.signout()\r\n assert self.html_title() in res.data, res\r\n assert \"You are now signed out\" in res.data, res\r\n\r\n # A user must be signed in to access the update page, the page\r\n # the title will be the redirection to log in\r\n res = self.update_profile(method=\"GET\")\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page.\" in res.data, res\r\n\r\n # A user must be signed in to access the update page, the page\r\n # the title will be the redirection to log in\r\n res = self.update_profile()\r\n assert self.html_title(\"Sign in\") in res.data, res\r\n assert \"Please sign in to access this page.\" in res.data, res\r\n\r\n self.register(fullname=\"new\", name=\"new\")\r\n url = \"/account/johndoe2/update\"\r\n res = self.app.get(url)\r\n assert res.status_code == 403", "def test_resource_user_resource_change_user_patch(self):\n pass", "def test_update_virtual_account_by_id(self):\n pass", "def test_client_update(self):\n pass", "def update_user(id):\n pass", "def test_update_person(self):\n user = User.objects.create(username='test_user')\n user.set_password('test123')\n user.save()\n self.client.login(username='test_user', password='test123')\n\n data = {'first_name': 'Daenerys'}\n response = self.client.patch(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Person.objects.count(), 1)\n self.assertEqual(Person.objects.first().first_name, 'Daenerys')", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def test_update(self):\n url_register = reverse('auth_register')\n resp = self.client.post(url_register, {\n \"username\": \"user\",\n \"password\": \"lol1lol1\",\n \"password2\": \"lol1lol1\",\n \"email\": \"lol@gmail.com\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"bio\": \"\"\n })\n print(resp.headers[\"Location\"])\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n url_auth = reverse('token_obtain_pair')\n resp = self.client.post(url_auth, {'username':'user', 'password':'lol1lol1'}, format='json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n token = resp.data['access']\n\n url_upd = reverse('auth_update_profile', kwargs={'pk': 2})\n\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION='Bearer ' + token)\n resp = client.patch(url_upd, {\n \"username\": \"user3\",\n \"email\": \"lol@gmail.com\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"image\": \"\",\n \"bio\": \"\",\n \"city\": \"\",\n \"phone\": \"\"\n })\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n self.assertEqual(User.objects.get().username, 'user3')", "def test_update_one(self):\n pass", "def test_write(self):\n userEdited = self.env['res.users'].browse(\n self.user.id).write({'user_profile_id': self.user_profile2.id})\n self.assertEqual(userEdited, True)", "def test_create_account(self):\n response = self.client.post(\"http://localhost:8000/api/signup/\",\n data={\"username\": \"tests\", \"password\": \"TestTest\"})\n first_user = MyUser.objects.get()\n self.assertEqual(response.status_code, HTTP_201_CREATED)\n self.assertEqual(first_user.username, 'tests')\n response = self.client.post(\"http://localhost:8000/api/signup/\",\n data={\"username\": \"tests2\", \"password\": \"TestTest\"})\n self.assertEqual(response.status_code, HTTP_201_CREATED)\n self.assertTrue(MyUser.objects.filter(username=\"tests2\").exists())\n user = MyUser.objects.get(username=\"tests2\")\n response = self.client.put(f\"http://localhost:8000/api/users/{user.pk}/\", data={\"email\": \"tst@test.te\"})\n # Not logged shouldnt change anything\n self.assertEqual(response.status_code, HTTP_401_UNAUTHORIZED)\n user.set_password(\"TestTest\")\n user.save()\n self.assertTrue(self.client.login(username=\"tests2\", password=\"TestTest\"))\n response = self.client.patch(f\"http://localhost:8000/api/users/{user.pk}/\", data={\"email\": \"tst@test.te\"})\n # Logged, should change\n self.assertEqual(response.status_code, HTTP_200_OK)\n self.assertEqual(MyUser.objects.get(username=\"tests2\").email, \"tst@test.te\")\n # Dont update others users\n response = self.client.patch(f\"http://localhost:8000/api/users/{first_user.pk}/\", data={\"email\": \"tst@test.te\"})\n self.assertEqual(response.status_code, HTTP_400_BAD_REQUEST)", "def test_account_update(self):\r\n params = {\r\n 'name': u'Test Admin'\r\n }\r\n res = self.testapp.post(\r\n str(u\"/api/v1/admin/account?api_key=\" + str(API_KEY)),\r\n content_type='application/json',\r\n params=json.dumps(params),\r\n status=200)\r\n\r\n # make sure we can decode the body\r\n user = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n user['username'], 'admin',\r\n \"Should have a username of admin {0}\".format(user))\r\n self.assertEqual(\r\n user['name'], 'Test Admin',\r\n \"Should have a new name of Test Admin {0}\".format(user))\r\n\r\n self.assertTrue(\r\n 'password' not in user,\r\n \"Should not have a field password {0}\".format(user))\r\n self.assertTrue(\r\n '_password' not in user,\r\n \"Should not have a field password {0}\".format(user))\r\n self.assertTrue(\r\n 'api_key' not in user,\r\n \"Should not have a field password {0}\".format(user))\r\n self._check_cors_headers(res)", "def test_anonymous_user_update(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').update,\r\n token)", "def test_updating_user_information_excluding_password(self):\n user_data = {\"username\": \"robert\", \"email\": \"robert@email.com\"}\n user_password = self.user.password\n updated_user = self.serializer.update(self.user, user_data)\n self.assertEqual(updated_user.username, \"robert\")\n self.assertEqual(updated_user.email, \"robert@email.com\")\n self.assertEqual(updated_user.password, user_password)", "def test_update_privileges_fails(self):\n user = User.create(name='foo', email='foo@bar.com', user_type='user',\n owned_teams=[], owned_organizations=[])\n user.put()\n\n # You get a 200, but the changes you requested don't happen.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'user_type': 'super_admin', 'owned_teams': ['Team_foo'],\n 'owned_organizations': ['Organization_foo']},\n headers=self.login_headers(user),\n )\n user_dict = json.loads(response.body)\n self.assertEqual(user.user_type, user_dict['user_type'])\n self.assertEqual(user.owned_teams, user_dict['owned_teams'])\n self.assertEqual(user.owned_organizations,\n user_dict['owned_organizations'])\n\n # Also not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_calendar_user_view_update(self):\n request = self.factory.post('/module/calendar_user/4/', {\n \"caller_name\": \"test\",\n \"survey\": \"1\",\n }, follow=True)\n request.user = self.user\n request.session = {}\n #response = calendar_user_change(request, 3)\n #self.assertEqual(response.status_code, 200)\n\n request = self.factory.post('/module/calendar_user/3/', {'delete': True}, follow=True)\n request.user = self.user\n request.session = {}\n #response = calendar_user_change(request, 3)\n #self.assertEqual(response.status_code, 302)", "def test_update_client(self):\n pass", "def test_user_modification(self):\n self.user.save(update_fields=['first_name'])\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_INFO)", "def test_user_info_updated(self):\n self.login_test_user()\n self.fill_session_cart()\n response = self.client.get(self.CHECKOUT_URL)\n form_fields_w_values = response.context['form'].initial\n for field in form_fields_w_values:\n form_fields_w_values[field] += \"1\"\n\n self.client.post(self.CHECKOUT_URL, form_fields_w_values, follow=True)\n user = USER_MODEL.objects.get()\n for field, value in form_fields_w_values.items():\n user_value = getattr(user, field)\n self.assertEqual(user_value, value)", "def test_update_virt_realm(self):\n pass", "def testUpdateUser(self):\n UserAPI().create([(u'test', u'secret', u'name', u'name@example.com')])\n user = getUser(u'test')\n passwordHash = user.passwordHash\n self.store.commit()\n info = TUserUpdate(u'test', u'password', u'new-name',\n u'new-name@example.com')\n with login(u'fluiddb', self.admin.objectID, self.transact) as session:\n yield self.facade.updateUser(session, info)\n\n self.store.rollback()\n self.assertEqual(u'test', user.username)\n self.assertNotEqual(passwordHash, user.passwordHash)\n self.assertEqual(u'new-name', user.fullname)\n self.assertEqual(u'new-name@example.com', user.email)", "def testIMembraneUserManagement(self):\n from Products.membrane.interfaces import IMembraneUserManagement\n from Products.membrane.at.interfaces import IUserAuthentication\n \n user = IMembraneUserManagement(self.person);\n auth = IUserAuthentication(self.person);\n \n #test setting password directly, verify that verifyCredentials works as expected\n fsd_tool = getToolByName(self.portal, TOOLNAME)\n self.person.setPassword('secret1')\n if fsd_tool.getUseInternalPassword():\n self.failUnless(auth.verifyCredentials({'login':'abc123','password':'secret1'}), \"failed to verify correct login and password, setting password directly\")\n else:\n self.failIf(auth.verifyCredentials({'login':'abc123','password':'secret1'}), \"internal password not used, method should return none, setting password directly. Value returned: %s\" % returnval)\n \n # now set password using the userChanger method and verify that it worked\n user.doChangeUser('abc123', 'secret2')\n fsd_tool = getToolByName(self.portal, TOOLNAME)\n if fsd_tool.getUseInternalPassword():\n self.failUnless(auth.verifyCredentials({'login':'abc123','password':'secret2'}), \"failed to verify correct login and password, testing doChangeUser()\")\n else:\n self.failIf(auth.verifyCredentials({'login':'abc123','password':'secret2'}), \"internal password not used, method should return none, testing doChangeUser(). Value returned: %s\" % returnval)\n \n # set password and some other value with doChangeUser, using keywords\n self.failIf(self.person.getEmail(), \"email already set, and it shouldn't be: %s\" % self.person.getEmail())\n user.doChangeUser('abc123','secret', email='joebob@hotmail.com')\n self.failUnlessEqual(self.person.getEmail(), 'joebob@hotmail.com', msg=\"failed to update email via doChangeUser(): %s\" % self.person.getEmail())\n \n # now try to delete the user\n self.failUnless(hasattr(self.directory,'abc123'), \"directory does not have person\")\n user.doDeleteUser('abc123')\n self.failIf(hasattr(self.directory,'abc123'), \"directory still contains person\")\n \n # we should not be able to log in as this person anymore\n self.logout()\n try:\n self.login('abc123')\n except AttributeError:\n pass\n else:\n self.fail(\"still able to login: %s\" % self.portal.portal_membership.getAuthenticatedMember().id)", "def test_007_update_user(self, mock_db_query, mock_db_add, mock_db_commit):\n mock_db_query.get.side_effect = [\n seller1,\n seller1.address\n ]\n\n standard_dict_update = standard_dict\n standard_dict_update['identity'] = 1\n standard_dict_update['first_name'] = \"Sally\"\n response = self.app.put('/v1/users/' + str(seller1.identity), data=json.dumps(standard_dict_update),\n headers={'accept': 'application/json', 'content-type': 'application/json'})\n\n print(response.get_data().decode())\n\n self.assertEqual(response.status_code, 200)\n # Check we call the correct two database methods\n self.assertTrue(mock_db_add.called)\n self.assertTrue(mock_db_commit.called)", "def testEditPassword(self):\n self._login_user('eschoppik','secret')\n response = self.client.post('/users/1/edit_password?_method=PATCH',\n data=dict(new_password='newpass', confirm_password='newpass',\n old_password='secret'), follow_redirects=True)\n user = User.query.filter_by(username='eschoppik').first()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bcrypt.check_password_hash(user.password, 'newpass'),True)", "def test_login_after_password_change(self):\n old_password = self.user['password1']\n self.change_password()\n response = self.client.post(\n reverse('users:login'), {'username': self.user['username'], 'password': old_password}\n )\n self.assertEqual(response.status_code, 200)", "def test_user_auth(self):\n self.new_user.save_login()\n test_user=User(\"trinity\",\"trinity@gmail.com\",\"123\")\n test_user.save_login()\n self.assertTrue(self.new_user.users_auth(\"trinity\",\"123\"))", "def test_authenticated_user_update(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').update,\r\n token)", "def test_user_changepassword(self):\n\n changepassword_data = {\n \"old_password\": \"1234\",\n \"new_password\": \"123456\"\n }\n updatedlogin_data = {\n \"email\": \"testuser@gmail.com\",\n \"password\": \"123456\"\n }\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.get_token())\n response = self.client.patch(\n reverse(\"account:change-password\"),\n changepassword_data,\n format=\"json\"\n )\n\n # test if the response status after patch is ok or not\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # test if user can login with new password\n response = self.client.post(self.login_url, updatedlogin_data, format=\"json\")\n token = response.data.get('token')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Token.objects.count(), 1)\n self.assertEqual(Token.objects.get().key, token)\n # test if user can login with old password or not\n response = self.client.post(self.login_url, self.login_data, format=\"json\")\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)", "def test_replace_user(self):\n\n replacement_data = dict(\n username='test_new_username',\n first_name='test_new_first_name',\n last_name='test_new_last_name',\n password='test_new_password',\n email='test_new_email',\n role='test_new_role',\n position='test_new_position',\n display_name='test_new_display_name'\n )\n\n # Replace non-existing user will insert a new user in Database\n replaced = self.user_api.replace_user(MAGEN_USER['user_uuid'], replacement_data)\n self.assertTrue(replaced.success)\n\n # Verify that user was inserted\n selected = self.user_api.get_user(MAGEN_USER['user_uuid'])\n self.assertTrue(selected.success)\n self.assertEqual(selected.documents['username'], 'test_new_username')\n\n # Replace existing use with MAGEN_USER data\n replaced = self.user_api.replace_user(MAGEN_USER['user_uuid'], MAGEN_USER)\n self.assertTrue(replaced.success)\n # Verify username has changed\n self.assertEqual(replaced.documents['username'], 'test_username')\n # Verify that registration timestamp was not changed\n # if registration timestamp is in replacement data the old one gets replaced\n self.assertEqual(selected.documents['registered_on'], replaced.documents['registered_on'])", "def test_post_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(body, response.content)\n self.assertIn(user_url, response.content)", "def test_42_password_link(self):\r\n self.register()\r\n res = self.app.get('/account/johndoe/update')\r\n assert \"Change your Password\" in res.data\r\n user = User.query.get(1)\r\n user.twitter_user_id = 1234\r\n db.session.add(user)\r\n db.session.commit()\r\n res = self.app.get('/account/johndoe/update')\r\n assert \"Change your Password\" not in res.data, res.data", "def test_update_record(self):\n pass", "def test_user(self):\n return True" ]
[ "0.845656", "0.79875004", "0.7690915", "0.76854897", "0.76540387", "0.7620719", "0.7569771", "0.75459176", "0.7516532", "0.7494765", "0.74924505", "0.74789107", "0.7471998", "0.7468943", "0.7455768", "0.7437972", "0.7426278", "0.7419078", "0.74119693", "0.74057174", "0.73813", "0.73484755", "0.7343224", "0.73139817", "0.72807837", "0.7269854", "0.7266302", "0.72561073", "0.7234163", "0.7209438", "0.7203359", "0.71962386", "0.71940005", "0.7172065", "0.7158803", "0.7154984", "0.7150016", "0.71189517", "0.708653", "0.70651716", "0.7060841", "0.70479316", "0.70479316", "0.70479316", "0.70335305", "0.7011957", "0.69937176", "0.698945", "0.6963713", "0.6958484", "0.69414073", "0.6929833", "0.69289684", "0.69278365", "0.6914269", "0.69131374", "0.68725777", "0.68714696", "0.6863836", "0.68446904", "0.6844175", "0.6842865", "0.68319875", "0.6814456", "0.68115616", "0.68055576", "0.67967504", "0.67924726", "0.678682", "0.6778096", "0.6765179", "0.6756295", "0.6751911", "0.6748321", "0.67475593", "0.6744224", "0.673171", "0.6731395", "0.6727498", "0.67232966", "0.6715231", "0.67041016", "0.6702493", "0.6691296", "0.66758233", "0.6675427", "0.6675044", "0.66697633", "0.6659857", "0.66580826", "0.6654367", "0.6652366", "0.66485226", "0.66449815", "0.6639565", "0.66385686", "0.6625265", "0.661957", "0.6610015", "0.6607858" ]
0.74192697
17
Log control data at each step during evaluation.
def _log_control_data(self, action, global_reward): action_r = ','.join(['%d' % a for a in action]) cur_control = {'episode': self.cur_episode, 'step': self.t, 'action': action_r, 'reward': global_reward} self.control_data.append(cur_control)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log(self, data):\n if self.log_data is not None:\n self.log_data(data)", "def on_eval_batch_begin(self, step, logs=None):", "def on_eval_begin(self, logs=None):", "def log_eval(self, epoch, dataset_name):\n pass", "def record(self, step):", "def on_log(self):\n monitors = self.monitors\n if self.monitors is None:\n monitors = self.trainer.metrics.keys()\n\n\n hparams = self.hparams\n if self.hparams is None:\n hparams = self.trainer.hparams.keys()\n\n metrics = {name: format_metric(self.trainer.metrics[name])\n for name in monitors\n if name in self.trainer.metrics}\n hparams = {name: format_metric(self.trainer.hparams[name])\n for name in hparams\n if name in self.trainer.hparams}\n\n\n step_bar = self.step_bars[-1]\n step_bar.set_description(\"Epoch {}\".format(self.trainer.epoch+1))\n step_bar.set_postfix(**metrics, **hparams)\n step_bar.update(self.trainer.steps_trained - self.last_step)\n self.last_step = self.trainer.steps_trained", "def logStep(self):\n n = self.mirror.cv['dp']\n self.r_Vm[n] = self.cv['Vm']\n self.r_Va[n] = self.cv['Va']", "def log(self, step, data=''):\n if self.debug:\n print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'\n for k in range(0, len(step), 68):\n print '+{:^68.68}+'.format(step[k:k + 68])\n for k in range(0, len(data), 68):\n print '+{:^68.68}+'.format(data[k:k + 68])\n print '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++'\n print", "def on_x(self):\r\n self.log()", "def log_all(self):\n self.save_raw()\n self.log()", "def _print_log(self, step, data=None):\n \n # Set mode to append to log file\n mode = 'a'\n\n if self.logfile is None:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n if step == 1:\n # Output log header\n output = \"\\nSKLearnForQlik Log: {0} \\n\\n\".format(time.ctime(time.time()))\n # Set mode to write new log file\n mode = 'w'\n \n elif step == 2:\n # Output the parameters\n output = \"Model Name: {0}\\n\\n\".format(self.model.name)\n output += \"Execution arguments: {0}\\n\\n\".format(self.exec_params)\n \n try:\n output += \"Scaler: {0}, missing: {1}, scale_hashed: {2}, scale_vectors: {3}\\n\".format(\\\n self.model.scaler, self.model.missing,self.model.scale_hashed, self.model.scale_vectors)\n output += \"Scaler kwargs: {0}\\n\\n\".format(self.model.scaler_kwargs)\n except AttributeError:\n output += \"scale_hashed: {0}, scale_vectors: {1}\\n\".format(self.model.scale_hashed, self.model.scale_vectors)\n\n try:\n if self.model.dim_reduction:\n output += \"Reduction: {0}\\nReduction kwargs: {1}\\n\\n\".format(self.model.reduction, self.model.dim_reduction_args)\n except AttributeError:\n pass\n \n output += \"Estimator: {0}\\nEstimator kwargs: {1}\\n\\n\".format(self.model.estimator, self.model.estimator_kwargs)\n \n elif step == 3: \n # Output the request dataframe\n output = \"REQUEST: {0} rows x cols\\nSample Data:\\n\\n\".format(self.request_df.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.request_df.head().to_string(), self.request_df.tail().to_string())\n \n elif step == 4:\n # Output the response dataframe/series\n output = \"RESPONSE: {0} rows x cols\\nSample Data:\\n\\n\".format(self.response.shape)\n output += \"{0}\\n...\\n{1}\\n\\n\".format(self.response.head().to_string(), self.response.tail().to_string())\n \n elif step == 5:\n # Print the table description if the call was made from the load script\n output = \"\\nTABLE DESCRIPTION SENT TO QLIK:\\n\\n{0} \\n\\n\".format(self.table)\n \n elif step == 6:\n # Message when model is loaded from cache\n output = \"\\nModel {0} loaded from cache.\\n\\n\".format(self.model.name)\n \n elif step == 7:\n # Message when model is loaded from disk\n output = \"\\nModel {0} loaded from disk.\\n\\n\".format(self.model.name)\n \n elif step == 8:\n # Message when cache is updated\n output = \"\\nCache updated. Models in cache:\\n{0}\\n\\n\".format([k for k,v in self.__class__.model_cache.items()])\n \n elif step == 9:\n # Output when a parameter grid is set up\n output = \"Model Name: {0}, Estimator: {1}\\n\\nGrid Search Arguments: {2}\\n\\nParameter Grid: {3}\\n\\n\".\\\n format(self.model.name, self.model.estimator, self.model.grid_search_args, self.model.param_grid)\n \n elif step == 10:\n # self.model.estimator_kwargs['architecture']\n output = \"\\nKeras architecture added to Model {0}:\\n\\n{1}\\n\\n\".format(self.model.name,\\\n self.model.architecture.to_string())\n\n elif step == 11:\n # Output after adding lag observations to input data\n output = \"Lag observations added ({0} per sample). New input shape of X is {1}.\\n\\n\".format(self.model.lags, data.shape)\n output += \"Feature Definitions:\\n{0}\\n\\n\".format(self.model.features_df.to_string())\n output += \"Sample Data:\\n{0}\\n...\\n{1}\\n\\n\".format(data.head(5).to_string(), data.tail(5).to_string())\n \n sys.stdout.write(output)\n with open(self.logfile, mode, encoding='utf-8') as f:\n f.write(output)", "def log(self):\n\n\t\t# Only every 1/10 second (or so) to avoid flooding networktables\n\t\tif not self.log_timer.running or not self.log_timer.hasPeriodPassed(self.log_timer_delay):\n\t\t\treturn\n\n\t\twpilib.SmartDashboard.putString('Pressure', '{0:.2f}'.format(self.get_pressure()))\n\t\twpilib.SmartDashboard.putBoolean(\"Garbo?\", self.is_pbot)\n\n\t\tself.drive.log()\n\t\tself.elevator.log()\n\t\tself.intake.log()", "def log_trainable_variables(self):\n var_names = list(self.trainable_variables.keys())\n self.logger.log_trainable_variables(var_names)", "def simulation_step(self):\n if self.data_valid.get():\n print(\"Output pin %s writing %s\" % (self.name, self.debug_data.get()))", "def log(self, step):\n # log mean\n tf.summary.scalar(self.name, self.result(), step=step)\n # call log method of each child\n for child in self.children_real_fake:\n child[0].log(step)\n child[1].log(step)", "def _on_step(self) -> None:\n self._n_calls += 1\n # Account for multiple environments\n # each call to step() corresponds to n_envs transitions\n if self._n_calls % max(self.target_update_interval // self.n_envs, 1) == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n # Copy running stats, see GH issue #996\n polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n self.logger.record(\"rollout/exploration_rate\", self.exploration_rate)", "def logStuff(self, i, epoch, numEpochs, trainData):\r\n step = i + epoch*self.numBatchesPerEpoch\r\n numSteps = numEpochs*self.numBatchesPerEpoch\r\n if step%2000==0:\r\n self.metricLog['G_loss'] = self.genLoss(*trainData).cpu().data[0]\r\n self.metricLog['D_loss'] = self.discLoss(*trainData).cpu().data[0]\r\n if len(self.lab_train):\r\n xy_lab = self.getLabeledXYonly(trainData)\r\n self.metricLog['Train_Acc(Batch)'] = self.batchAccuracy(*xy_lab)\r\n self.metricLog['Val_acc'] = self.getDevsetAccuracy()\r\n #TODO: add Inception and FID\r\n self.writer.add_scalars('metrics', self.metricLog, step)\r\n prettyPrintLog(self.metricLog, epoch, numEpochs, step, numSteps)\r\n\r\n self.scheduleLog['lr'] = self.lr_scheduler.get_lr()[0]\r\n self.writer.add_scalars('schedules', self.scheduleLog, step)\r\n\r\n fakeImages = self.G(self.fixed_z).cpu().data\r\n self.writer.add_image('fake_samples', \r\n vutils.make_grid(fakeImages, normalize=True), step)", "def on_eval_end(self, logs=None):", "def log(self, report, epoch):\n train_return_values = np.asarray([trajectory['reward'].sum()\n for trajectory in report['training_trajectories']])\n trajectories_infos = [trajectory['info'] for trajectory in report.pop('training_trajectories')]\n sum_costs = np.asarray([sum(list(map(lambda info: info.get('cost', 0.0), trajectory)))\n for trajectory in trajectories_infos])\n report.update(dict(\n training_rl_objective=train_return_values.mean(),\n sum_rewards_stddev=train_return_values.std(),\n mean_sum_costs=sum_costs.mean()\n ))\n training_step = report.pop('total_training_steps')\n for key, value in report.items():\n self.training_logger.log_scalar(value, key, training_step)\n self.training_logger.flush()", "def _logging(self):\n msgs = []\n # patch to log stdout spawned processes of dataloader\n logger = init_logger()\n for ds_name, ds_count in self._counts.items():\n msgs.append(f\"\\t\\t\\t* {ds_name}: {ds_count}\")\n logger.info(\"Weighted corpora loaded so far:\\n\" + \"\\n\".join(msgs))", "def on_eval_batch_end(self, step, logs=None):", "def log_training_results(engine: Engine):\n train_evaluator.run(self.train_dl)\n metrics: Dict[str, float] = train_evaluator.state.metrics\n avg_accuracy: float = metrics['accuracy']\n avg_bce: float = metrics['bce']\n pbar.log_message(\n f'Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.4f} Avg loss: {avg_bce:.4f}')", "def _log(self, action: types.NestedArray) -> None:\n if self._logger is None:\n return\n self._logger.info('{}, {}, {}, {}, {}, {}, {}'.format(\n self._last_timestep.observation['STAGE'],\n self._last_timestep.observation['CHIPS'],\n self._last_timestep.observation['PLAYER_TOTAL'],\n self._last_timestep.observation['PLAYER_ACES'],\n self._last_timestep.observation['DEALER_TOTAL'],\n action,\n self._deck_distribution))", "def callback(_locals, _globals):\n global n_steps\n # Print stats every 20 calls\n if (n_steps + 1) % 1 == 0:\n # Evaluate policy training performance\n episode_rewards, episode_lengths = evaluate_policy(_locals['self'], eval_real_env,\n n_eval_episodes=n_eval_episodes,\n render=False,\n deterministic=False,\n return_episode_rewards=False)\n print(\"Last mean reward per episode at target: {:.2f}\".format(episode_rewards))\n\n episode_rewards_grnd, episode_lengths_grnd = evaluate_policy(_locals['self'], eval_grnd_env,\n n_eval_episodes=n_eval_episodes,\n render=False,\n deterministic=False,\n return_episode_rewards=False)\n print(\"Last mean reward per episode at grounded environment: {:.2f}\".format(episode_rewards_grnd))\n\n with open(os.path.join(log_dir, 'eval_at_target.txt'), 'a') as f:\n f.write(\"{}, {}, {}\\n\".format(n_steps, episode_rewards, episode_lengths/n_eval_episodes))\n f.close()\n with open(os.path.join(log_dir, 'eval_at_grnd.txt'), 'a') as f:\n f.write(\"{}, {}, {}\\n\".format(n_steps, episode_rewards_grnd, episode_lengths_grnd/n_eval_episodes))\n f.close()\n n_steps += 1\n return True", "def log_tensorboard(self, value_dict, step):\n for key, value in value_dict.items():\n summary = tf.Summary(value=[tf.Summary.Value(tag=key, simple_value=value)])\n self.writer.add_summary(summary, step)", "def on_L3(self):\r\n self.log()", "def compute_debug(self):", "def simulate(self):\n #loop to perform additional steps until the current temperature is no longer greater than the ending_temperature\n while self.current_T >= self.end_temp: \n self.step(self.current_T)\n \n #log various parameters that changed in the MCMCSampler object after a single step\n self.temperature.append(self.current_T)\n self.iteration.append(self.current_iteration)\n self.energy.append(self.current_energy)\n #return a pandas dataframe that will hold all of the information requested above\n log_table = pd.DataFrame(list(zip(self.iteration, self.energy, self.temperature)), columns =['iteration', 'energy', 'temperature']) \n return(log_table)", "def log(self):\n self.logger = logging.getLogger(self.log_name)\n self.logger.info(f\"Name: {self.name}\")\n self.logger.info(f\"Grid points: {self.gp}\")\n self.logger.info(f\"Nadir points: {self.nadir_p}\")\n self.logger.info(f\"Penalty weight: {self.eps}\")\n self.logger.info(f\"Early exit: {self.early_exit}\")\n self.logger.info(f\"Bypass coefficient: {self.bypass}\")\n self.logger.info(f\"Flag array: {self.flag}\")\n self.logger.info(f\"CPU Count: {self.cpu_count}\")\n self.logger.info(f\"Redivide work: {self.redivide_work}\")\n self.logger.info(f\"Shared flag array: {self.shared_flag}\")\n self.logger.info(Helper.separator())", "def internal_event (self):\n self.clock_time += 1\n self.log()", "def log_metric(self, name, val, step):\n raise NotImplementedError", "def log_evaluation(tester, name, description):\r\n\tfor dataset, output in tester.preds.items():\r\n\t\tresults = pandas.DataFrame.from_dict(output)\r\n\t\tpath = os.path.join(\r\n\t\t\tEXPERIMENT_PATH, tester.config[\"name\"] + '-' + dataset)\r\n\t\twith open(path + \".csv\", \"w\") as f:\r\n\t\t\tresults.to_csv(f, sep=\"\\t\", encoding='utf-8',\r\n\t\t\t\tfloat_format='%.3f', index=False)", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n logger.info(\"Monitoring on auxiliary data finished\")", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n logger.info(\"Monitoring on auxiliary data finished\")", "def on_L1(self):\r\n self.log()", "def log_validation_results(engine):\n validation_evaluator.run(self.val_dl)\n metrics: Dict[str, float] = validation_evaluator.state.metrics\n avg_accuracy: float = metrics['accuracy']\n avg_bce: float = metrics['bce']\n pbar.log_message(\n f'Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.4f} Avg loss: {avg_bce:.4f}')\n pbar.n = pbar.last_print_n = 0", "def _log_results(self, first_time=False):\n\n if not first_time:\n print(self.READINGS_PRINT_TEMPLATE % self.get_sensors_data())\n\n self._log_timer = self._start_timer(Config.LOG_INTERVAL, self._log_results)", "def _log_progress(self, t):\n\n # Run the update only 2 step before the actual logging happens in order to\n # make sure that the most recent possible values will be stored in\n # self.summary. This is a hacky workaround in order to support OffPolicyAgent\n # which runs 2 threads without coordination\n if (t+2) % self.log_freq == 0 and self.learn_started:\n episode_rewards = self.env_monitor.get_episode_rewards()\n self.episode_rewards = np.asarray(episode_rewards)\n if self.episode_rewards.size > 0:\n self.mean_ep_rew = np.mean(episode_rewards[-self.stats_n:])\n self.best_mean_ep_rew = max(self.best_mean_ep_rew, self.mean_ep_rew)\n\n if t % self.log_freq == 0 and self.learn_started:\n stats_logger.info(\"\")\n for s, lambda_v in self.log_info:\n stats_logger.info(s.format(lambda_v(t)))\n stats_logger.info(\"\")\n\n if self.summary:\n # Log with TensorBoard\n self.tb_writer.add_summary(self.summary, global_step=t)", "def on_a(self):\r\n self.log()", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n for key in value_dict.keys():\n value_dict[key] *= self.coverage\n value_dict['coverage'] = self.coverage\n logging.info(\"coverage:{0}\".format(self.coverage))\n for key, value in value_dict.items():\n logging.info(\"{0}:{1}\".format(key,value))\n self.add_records(self.main_loop.log, value_dict.items())\n self.check_stop(value_dict)\n logger.info(\"Monitoring on auxiliary data finished\")", "def log_batch(self, measurements):\n for m in measurements:\n logger.info(m)\n self.log(metric=m.metric, value=m.value, source=m.source, timestamp=m.timestamp)", "def on_R1(self):\r\n self.log()", "def log_output_data(self):\r\n with tf.name_scope('model_output'):\r\n for i in range(self.action_handler.get_number_actions()):\r\n variable_name = str(self.action_handler.action_list_names[i])\r\n tf.summary.histogram(variable_name + '_output', self.actor_last_row_layer[i])", "def _save(self, data: MetricsDict) -> None:\n client = MlflowClient()\n try:\n run_id = self.run_id\n except DataSetError:\n # If run_id can't be found log_metric would create new run.\n run_id = None\n\n log_metric = (\n partial(client.log_metric, run_id)\n if run_id is not None\n else mlflow.log_metric\n )\n metrics = (\n self._build_args_list_from_metric_item(k, v) for k, v in data.items()\n )\n\n if self._logging_activated:\n for k, v, i in chain.from_iterable(metrics):\n log_metric(k, v, step=i)", "def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n self.add_records(self.main_loop.log, value_dict.items())\n self.check_stop(value_dict)\n logger.info(\"Monitoring on auxiliary data finished\")", "def logging(self, function):\n avg_nms_time_per_step = sum(self.nms_times)/len(self.nms_times)\n avg_total_time_per_step = sum(self.total_times)/len(self.total_times)\n\n avg_min_latency = [x[0] for x in self.inference_times]\n avg_max_latency = [x[1] for x in self.inference_times]\n avg_latency = [x[2] for x in self.inference_times]\n\n function(\"Inference stats: image size {}x{}, batches per step {}, batch size {}, {} steps\".format(\n self.cfg.model.image_size, self.cfg.model.image_size, self.cfg.ipuopts.batches_per_step, self.cfg.model.micro_batch_size, len(self.total_times)\n ))\n function(\"--------------------------------------------------\")\n function(\"Inference\")\n function(\"Average Min Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_min_latency)/len(self.inference_times)))\n function(\"Average Max Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_max_latency)/len(self.inference_times)))\n function(\"Average Latency per Batch: {:.3f} ms\".format(1000 * sum(avg_latency)/len(self.inference_times)))\n function(\"Average Inference Throughput: {:.3f} img/s\".format(sum(self.inference_throughputs)/len(self.inference_throughputs)))\n function(\"--------------------------------------------------\")\n # TODO remove the NMS and end-to-end time report once NMS is on device\n function(\"End-to-end\")\n function(\"Average NMS Latency per Batch: {:.3f} ms\".format(1000 * avg_nms_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"Average End-to-end Latency per Batch: {:.3f} ms\".format(1000 * avg_total_time_per_step/self.cfg.ipuopts.batches_per_step))\n function(\"End-to-end Throughput: {:.3f} img/s\".format(sum(self.total_throughputs)/len(self.total_throughputs)))\n function(\"==================================================\")\n\n if self.cfg.eval.metrics:\n self.compute_and_print_eval_metrics()", "def test_case(self):\n log.e('error日志')\n log.d('debug日志')\n log.i('info日志')\n log.w('warning日志')", "def on_train_begin(self, logs={}):\n self.losses = []\n self.accuracies = []", "def log_update(self, policy_loss, entropy, kl_divergence, clipping_fraction,\n value_loss, explained_variance, steps):\n\n # Diagnostics\n self.writer.add_scalar(\"Diagnostics/Policy/PolicyLoss\",\n policy_loss,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Policy/Entropy\",\n entropy,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Policy/KLDivergence\",\n kl_divergence,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Policy/ClipFraction\",\n clipping_fraction,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Value/ValueLoss\",\n value_loss,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Value/ValueEstimate\",\n np.mean(self.buffer.values),\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Value/ExplainedVariance\",\n explained_variance,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Info/LearningRate\",\n self.lr_pi,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Info/TotalTimesteps\",\n self.update_counter * self.batch_size,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Info/KLDivCoef\",\n self.kl_coef,\n self.update_counter)\n # Training Episodes\n self.writer.add_scalar(\"Training/Episodes/PolicyGradientSteps\",\n steps,\n self.update_counter)\n mean_frames = np.mean(self.buffer.episode_lengths)\n std_frames = np.std(self.buffer.episode_lengths)\n self.writer.add_scalar(\"Training/Episodes/Mean_Frames\",\n mean_frames,\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Std_Frames\",\n std_frames,\n self.update_counter)\n self.writer.add_histogram(\"Training/Episodes/Frames\",\n np.array(self.buffer.episode_lengths),\n self.update_counter)\n mean_reward = np.mean(self.buffer.episode_rewards)\n std_reward = np.std(self.buffer.episode_rewards)\n rews_per_frame = np.array(self.buffer.episode_rewards) / \\\n np.array(self.buffer.episode_lengths, dtype=np.float)\n self.writer.add_scalar(\"Training/Episodes/Mean_Reward\",\n mean_reward,\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Std_Reward\",\n std_reward,\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Reward_per_Frame_Mean\",\n np.mean(rews_per_frame),\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Reward_per_Frame_Std\",\n np.std(rews_per_frame),\n self.update_counter)\n self.writer.add_histogram(\"Training/Episodes/Rewards\",\n np.array(self.buffer.episode_rewards),\n self.update_counter)\n self.writer.add_histogram(\"Training/Episodes/Rewards_per_Frame\",\n rews_per_frame,\n self.update_counter)\n actions = np.array(self.buffer.actions)\n self.writer.add_histogram(\"Training/Action/DeltaVel\",\n actions[:, 0],\n self.update_counter)\n self.writer.add_histogram(\"Training/Action/DeltaLat\",\n actions[:, 1],\n self.update_counter)\n self.writer.add_histogram(\"Training/Values\",\n np.array(self.buffer.values),\n self.update_counter)\n self.writer.add_histogram(\"Training/Avantages\",\n np.array(self.buffer.advantages),\n self.update_counter)\n self.writer.add_histogram(\"Training/GradNorms\",\n np.array(self.grad_norms),\n self.update_counter)\n self.writer.add_histogram(\"Training/Ratio/Ratio\",\n np.array(self.ratios).flatten(),\n self.update_counter)\n self.writer.add_histogram(\"Training/Ratio/ClippedRatio\",\n np.array(self.clipped_ratios).flatten(),\n self.update_counter)\n\n self.writer.flush()\n\n print(\"-\" * 30)\n print(\"PPO Optimization\")\n print(\"Policy_Loss: {}\\t\\t\".format(policy_loss))\n print(\"Value_Loss: {}\\t\\t\".format(value_loss))\n print(\"Entropy: {}\\t\\t\".format(entropy))\n print(\"Lr_pi: {}\\t\\t\".format(self.lr_pi))\n print(\"Lr_vf: {}\\t\\t\".format(self.lr_vf))\n print(\"KL_Divergence: {}\\t\\t\".format(kl_divergence))\n print(\"Clip_Fraction: {}\\t\\t\".format(clipping_fraction))\n print(\"Exp_Variance: {}\\t\\t\".format(explained_variance))\n print(\"Mean_Reward: {}\\t\\t\".format(mean_reward))\n print(\"Std_Reward: {}\\t\\t\".format(std_reward))\n print(\"Mean_Frames: {}\\t\\t\".format(mean_frames))\n print(\"Std_Frames: {}\\t\\t\".format(std_frames))\n print(\"Mean_Reward_per_frame: {}\\t\\t\".format(np.mean(rews_per_frame)))\n print(\"Std_Reward_per_frame: {}\\t\\t\".format(np.std(rews_per_frame)))\n print(\"Optimization steps: {}\\t\\t\". format(self.update_counter))\n print(\"-\" * 30)", "def log(data):\n return _make.log(data)", "def on_R3(self):\r\n self.log()", "def log_training(self, batch, total_batches, result):\n metrics = [\"loss\", \"accuracy\"]\n for metric in metrics:\n if metric not in self.logs:\n self.logs[metric] = []\n self.logs[metric].append(result[metric])\n if batch % self.log_frequency == 0 or batch + 1 == total_batches:\n print(\"Batch {} / {} = {:.2f} %\".format(batch, total_batches, 100 * batch / total_batches))\n print(\"{:20}: {}\".format(\"Global step\", result[\"global_step\"]))\n print(\"{:20}: {:.4e}\".format(\"Learning rate\", result[\"learning_rate\"]))\n for metric in metrics:\n metric_logs = self.logs[metric]\n average = sum(metric_logs) / len(metric_logs)\n print(\"{:20}: {:.4}\".format(\"Training \" + metric, average))\n self.logs[metric] = []\n val_metrics = self.evaluate(self.batches_valid)\n for k, v in val_metrics.items():\n print(\"{:20}: {:.4}\".format(\"Validation \" + k, v))", "def test_logging(train_logger, valid_logger):\n\n global_step = 0\n acc_list = []\n acc_val_list = []\n #acc_list = np.array([])\n #acc_val_list = np.array([])\n # This is a strongly simplified training loop\n for epoch in range(10):\n torch.manual_seed(epoch)\n for iteration in range(20):\n dummy_train_loss = 0.9**(epoch+iteration/20.)\n dummy_train_accuracy = epoch/10. + torch.randn(10)\n \n #log the training loss\n train_logger.add_scalar('loss', dummy_train_loss, global_step = global_step)\n global_step += 1\n\n #append the training accuracy to a list\n acc_list.append(dummy_train_accuracy)\n \n #take the average of the training accuract \n #average = torch.mean(torch.stack(acc_list))\n #log the taining accuracy\n acc_new = [x.cpu().detach().numpy() for x in acc_list]\n train_logger.add_scalar('accuracy', np.mean(acc_new), global_step = global_step)\n \n torch.manual_seed(epoch)\n for iteration in range(10):\n dummy_validation_accuracy = epoch / 10. + torch.randn(10)\n \n #append the accuracy to a list\n acc_val_list.append(dummy_validation_accuracy)\n \n #take the average and log the accuracy\n averageValid = torch.mean(torch.stack(acc_val_list))\n valid_logger.add_scalar('accuracy', averageValid, global_step = global_step)", "def on_train_begin(self, logs):\n self.train_start = timeit.default_timer()\n self.metrics_names = self.model.metrics_names\n print('Training for {} steps ...'.format(self.params['nb_steps']))", "def log(self, log_directly = True):\n stats = self.get_stats()\n logging_dict = dict(advantage_mean = _seq_mean(stats[\"advantage_mean\"]),\n critic_grad_norm = _seq_mean(stats[\"critic_grad_norm\"]),\n critic_loss =_seq_mean(stats[\"critic_loss\"]),\n policy_grad_norm = _seq_mean(stats[\"policy_grad_norm\"]),\n policy_loss = _seq_mean(stats[\"policy_loss\"]),\n target_critic_mean = _seq_mean(stats[\"target_critic_mean\"]),\n T_critic=self.T_critic,\n T_policy=self.T_policy\n )\n logging_str = \"T_policy={:g}, T_critic={:g}, \".format(logging_dict[\"T_policy\"], logging_dict[\"T_critic\"])\n logging_str += _make_logging_str(_copy_remove_keys(logging_dict, [\"T_policy\", \"T_critic\"]))\n\n if log_directly:\n self.logging_struct.py_logger.info(\"{} LEARNER INFO: {}\".format(self.args.learner.upper(), logging_str))\n\n return logging_str, logging_dict", "def log_parameters(self, parameters, step=None):\n self.experiment.log_parameters(parameters, step=step)", "def tbx_logger(self, log_dict, training_i):\n for tag, value in log_dict.items():\n self.tbx_writer.add_scalar(tag, value, training_i)", "def log_test_step(self, test_log: dict, step: Union[int,None] = None) -> None:\n if self.log_mlflow:\n mlflow.log_metrics(test_log, step=self.e)", "def on_train_begin(self, logs={}):\n self.losses = []\n self.val_losses = []", "def logging_loop(self, num_gpus):\n # Launch the test worker to get performance metrics\n self.test_worker = self_play.SelfPlay.options(\n num_cpus=0, num_gpus=num_gpus,\n ).remote(\n self.checkpoint,\n self.Game,\n self.config,\n self.config.seed + self.config.num_workers,\n )\n self.test_worker.continuous_self_play.remote(\n self.shared_storage_worker, None, True\n )\n\n # Write everything in TensorBoard\n writer = SummaryWriter(self.config.results_path)\n\n print(\n \"\\nTraining...\\nRun tensorboard --logdir ./results and go to http://localhost:6006/ to see in real time the training performance.\\n\"\n )\n\n # Save hyperparameters to TensorBoard\n hp_table = [\n f\"| {key} | {value} |\" for key, value in self.config.__dict__.items()\n ]\n writer.add_text(\n \"Hyperparameters\",\n \"| Parameter | Value |\\n|-------|-------|\\n\" + \"\\n\".join(hp_table),\n )\n # Save model representation\n writer.add_text(\n \"Model summary\", self.summary,\n )\n # Loop for updating the training performance\n counter = 0\n keys = [\n \"total_reward\",\n \"wormzero_reward\",\n \"opponent_reward\",\n \"episode_length\",\n \"mean_value\",\n \"training_step\",\n \"lr\",\n \"total_loss\",\n \"value_loss\",\n \"policy_loss\",\n \"num_played_games\",\n \"num_played_steps\",\n \"num_reanalysed_games\",\n ]\n info = ray.get(self.shared_storage_worker.get_info.remote(keys))\n try:\n while info[\"training_step\"] < self.config.training_steps:\n info = ray.get(self.shared_storage_worker.get_info.remote(keys))\n writer.add_scalar(\n \"1.Total_reward/1.Total_reward\", info[\"total_reward\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/2.Mean_value\", info[\"mean_value\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/3.Episode_length\", info[\"episode_length\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/4.WormZero_reward\", info[\"wormzero_reward\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/5.Opponent_reward\",\n info[\"opponent_reward\"],\n counter,\n )\n writer.add_scalar(\n \"2.Workers/1.Self_played_games\", info[\"num_played_games\"], counter,\n )\n writer.add_scalar(\n \"2.Workers/2.Training_steps\", info[\"training_step\"], counter\n )\n writer.add_scalar(\n \"2.Workers/3.Self_played_steps\", info[\"num_played_steps\"], counter\n )\n writer.add_scalar(\n \"2.Workers/4.Reanalysed_games\",\n info[\"num_reanalysed_games\"],\n counter,\n )\n writer.add_scalar(\n \"2.Workers/5.Training_steps_per_self_played_step_ratio\",\n info[\"training_step\"] / max(1, info[\"num_played_steps\"]),\n counter,\n )\n writer.add_scalar(\"2.Workers/6.Learning_rate\", info[\"lr\"], counter)\n writer.add_scalar(\n \"3.Loss/1.Total_weighted_loss\", info[\"total_loss\"], counter\n )\n writer.add_scalar(\"3.Loss/Value_loss\", info[\"value_loss\"], counter)\n writer.add_scalar(\"3.Loss/Policy_loss\", info[\"policy_loss\"], counter)\n print(\n f'Last test reward: {info[\"total_reward\"]:.2f}. Training step: {info[\"training_step\"]}/{self.config.training_steps}. Played games: {info[\"num_played_games\"]}. Loss: {info[\"total_loss\"]:.2f}',\n end=\"\\r\",\n )\n counter += 1\n time.sleep(0.5)\n except KeyboardInterrupt:\n pass\n\n self.terminate_workers()\n\n if self.config.save_model:\n # Persist replay buffer to disk\n print(\"\\n\\nPersisting replay buffer games to disk...\")\n pickle.dump(\n {\n \"buffer\": self.replay_buffer,\n \"num_played_games\": self.checkpoint[\"num_played_games\"],\n \"num_played_steps\": self.checkpoint[\"num_played_steps\"],\n \"num_reanalysed_games\": self.checkpoint[\"num_reanalysed_games\"],\n },\n open(os.path.join(self.config.results_path, \"replay_buffer.pkl\"), \"wb\"),\n )", "def dump_to_tensorboard(self, log_path: str) -> None:\n LOGGER.info(f'Log evaluations in tensorboard.')\n writer = SummaryWriter(log_dir=log_path)\n for key, value in self.eval_dict:\n writer.add_scalar(key, value)", "def log_val_step(self, val_log: dict, step: Union[int,None] = None) -> None:\n if self.log_mlflow:\n mlflow.log_metrics(val_log, step=self.e)", "def log_results(self, filename=None):\n\n self.ad_log['train_auc'] = self.diag['train']['auc'][-1]\n self.ad_log['train_accuracy'] = self.diag['train']['acc'][-1]\n self.ad_log['train_time'] = self.train_time\n\n self.ad_log['test_auc'] = self.diag['test']['auc'][-1]\n self.ad_log['test_accuracy'] = self.diag['test']['acc'][-1]\n self.ad_log['test_time'] = self.test_time\n\n self.ad_log.save_to_file(filename=filename)", "def log_step(\n metric_dict={},\n mode='train',\n writer=None,\n global_step=0,\n elapsed_eta=None,\n training_speed=None\n):\n log_msg = '[{mode}] step: {step}'\n log_msg = log_msg.format(\n mode=mode,\n step=global_step,\n )\n for key, value in metric_dict.items():\n log_msg += ' - {}: {}'.format(key, round(value, 4))\n\n # Write to tensorboard\n if writer is not None:\n for key, value in metric_dict.items():\n writer.add_scalar(key, value, global_step=global_step)\n\n if elapsed_eta is not None:\n log_msg += ' - elapsed: {} - eta: {}'.format(\n datetime.timedelta(seconds=int(elapsed_eta[0])),\n datetime.timedelta(seconds=int(elapsed_eta[1]))\n )\n if writer is not None:\n writer.add_scalar('eta', elapsed_eta[1], global_step=global_step)\n\n if training_speed is not None:\n log_msg += ' - step/sec: {:.4f}'.format(training_speed)\n if writer is not None:\n writer.add_scalar(\n 'step/sec', training_speed, global_step=global_step)\n\n logger.info(log_msg)", "def logStarted(build, step, log):", "def enter_state(self):\r\n self.__log__(logging.debug)\r\n return", "def on_L2(self):\r\n self.log()", "def log_metrics(self, metrics, step=None, epoch=None, prefix=None):\n self.experiment.log_metrics(metrics, step=step, epoch=epoch, prefix=prefix)", "def on_train_end(self, logs=None):", "def on_train_end(self, logs=None):", "def trace(self, out):\n if self.step == 0:\n out.write(\"# %5s %16s %8s %8s %7s\\n\" \\\n % ('Step', 'Current energy', 'Av shift',\n 'Mx shift', 'Funcs'))\n log = \"%7d %16.5f %8.4f %8.4f %7d\\n\" \\\n % (self.step, self.current_e, self.shiftavr,\n self.shiftmax, self.funcs)\n out.write(log)", "def record(self, var_keys, value=None):\n\n for var_key in make_list(var_keys):\n\n # Create empty lists\n if 't' not in self.log:\n self.log['t'] = []\n if var_key not in self.log:\n self.log[var_key] = [None] * len(self.log['t'])\n\n if self.model.t not in self.log['t']:\n\n # Create empty slot for new documented time step\n for v in self.log.values():\n v.append(None)\n\n # Store time step\n self.log['t'][-1] = self.model.t\n\n if value is None:\n v = getattr(self, var_key)\n else:\n v = value\n\n self.log[var_key][-1] = v", "def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return", "def log_data(self):\n\n assert self.tello is not None\n self.tello.subscribe(self.tello.EVENT_LOG_DATA, self.log_handler)\n self.tello.subscribe(self.tello.EVENT_FLIGHT_DATA, self.log_handler)\n self.tello.subscribe(self.tello.EVENT_FILE_RECEIVED, self.log_handler)", "def _add_log_data(self, data):\n self.solver._notify_new_log(data)\n if self.log_enabled:\n if self.log_print:\n write_checking_unicode_errors(self.log_output, data)\n self.log_output.flush()\n if self.log_data is not None:\n self.log_data.append(data)\n # Update statistics\n self.process_infos.incr(CpoProcessInfos.TOTAL_LOG_DATA_SIZE, len(data))", "def record_lr(self, optimizer, step):\n for idx, group in enumerate(optimizer.param_groups):\n updated_lr = group[\"lr\"]\n self.writer_.add_scalar(tag=f\"train_step/group{idx}\", scalar_value=updated_lr, global_step=step)", "def on_b(self):\r\n self.log()", "def log_state(self):\n\n log('-' * 50)\n log('.level=%d' % self.level)\n log('.view_llon=%.3f, .view_rlon=%.3f'\n % (self.view_llon, self.view_rlon))\n log('.view_tlat=%.3f, .view_blat=%.3f'\n % (self.view_tlat, self.view_blat))\n log('.ppd_x=%.2f, .ppd_y=%.2f' % (self.ppd_x, self.ppd_y))\n log('.view_offset_x=%d, .view_offset_y=%d'\n % (self.view_offset_x, self.view_offset_y))\n log('.view_width=%d, .view_height=%d'\n % (self.view_width, self.view_height))\n log('-' * 50)\n log('')", "def step(self):\n #1. Time progresses\n self.time_operator.step()\n \n #2. Form and dissolve relationships\"\n self.relationship_operator.step()\n\n #3. HIV transmission\n self.infection_operator.step()", "def log_saved_for_later( cls, logger ):\n for arg_set in cls.__log_later :\n print( f\"log_saved_for_later {arg_set[ 0 ]} {arg_set[ 1 ]}\" )\n logger.log( arg_set[ 0 ], arg_set[ 1 ] )", "def _log_event(event):\n if event.WhichOneof(\"what\") == \"summary\":\n summary = event.summary\n for v in summary.value:\n if v.HasField(\"simple_value\"):\n # NB: Most TensorFlow APIs use one-indexing for epochs, while tf.Keras\n # uses zero-indexing. Accordingly, the modular arithmetic used here is slightly\n # different from the arithmetic used in `__MLflowTfKeras2Callback.on_epoch_end`,\n # which provides metric logging hooks for tf.Keras\n if (event.step - 1) % _LOG_EVERY_N_STEPS == 0:\n _add_to_queue(\n key=v.tag,\n value=v.simple_value,\n step=event.step,\n time=int(time.time() * 1000),\n run_id=mlflow.active_run().info.run_id,\n )", "def record_vars(context, data):\n pass", "def after_val_iter(self,\n runner,\n batch_idx: int,\n data_batch: DATA_BATCH = None,\n outputs: Optional[Sequence] = None) -> None:\n if self.every_n_inner_iters(batch_idx, self.interval):\n _, log_str = runner.log_processor.get_log_after_iter(\n runner, batch_idx, 'val')\n runner.logger.info(log_str)", "def __log_data_handler(self, event, sender, data):\n pos_x = -data.mvo.pos_x\n pos_y = -data.mvo.pos_y\n pos_z = -data.mvo.pos_z\n # First time we have meaningful values, we store them as reference\n if abs(pos_x) + abs(pos_y) + abs(pos_z) > 0.07:\n if self.ref_pos_x == -1:\n self.ref_pos_x = pos_x\n self.ref_pos_y = pos_y\n self.ref_pos_z = pos_z\n else:\n self.pos_x = pos_x - self.ref_pos_x\n self.pos_y = pos_y - self.ref_pos_y\n self.pos_z = pos_z - self.ref_pos_z\n\n qx = data.imu.q1\n qy = data.imu.q2\n qz = data.imu.q3\n qw = data.imu.q0\n\n degree = 0.01745\n siny = 2 * (qw * qz + qx * qy)\n cosy = 1 - 2 * (qy * qy + qz * qz)\n self.yaw = int(atan2(siny, cosy) / degree)\n\n if self.write_header:\n self.log_file.write(f\"{data.format_cvs_header()}\\n\")\n self.write_header = False\n self.log_file.write(f\"{data.format_cvs()}\\n\")", "def log_example(var):\n\n log.info('example code started')\n log.debug('calling settings')\n test_settings()\n log2.error('there is no error this is example ')\n log2.info('finished')", "def log_train_step(self, train_log: dict, step: Union[int,None] = None) -> None:\n if self.log_mlflow:\n mlflow.log_metrics(train_log, step=step)", "def do(self, callback_name, *args):\n value_dict = self._evaluator.evaluate(self.data_stream)\n print(\"Train test coverage:{0}\".format(self.coverage))\n for key, value in value_dict.items():\n print(\"{0}:{1}\".format(key, value * self.coverage))", "def _log_results(self, results):\n log.new_entry(results)\n self.new_entry = 2", "def report(LOGDIR, epoch, e_dict, saver, sess, fh_log):\n # print loss\n print (\"Epoch: %i; Loss: %f; KLd: %f; CE %f\" % (epoch, e_dict[\"loss\"][-1], e_dict[\"KLd\"][-1], e_dict[\"CE\"][-1]))\n fh_log.write(\"%i\\t%0.5e\\t%0.5e\\t%0.5e\\n\" % (epoch, e_dict[\"loss\"][-1], e_dict[\"KLd\"][-1], e_dict[\"CE\"][-1]))", "def log(self, reward, action):\n self.logs.append([reward, action])", "def magic_logstate(self,parameter_s=''):\n\n self.logstate()", "def report(self):\r\n print(\"\".join(self.memory), self.error, self.steps)", "def log(self, key: str, val: Any, iteration: int = None) -> None:\n assert key is not None and val is not None, \"Please set key and val\"\n\n if self._tb_writer is not None:\n assert (\n iteration is not None\n ), \"Must specify iteration when logging to tensorboard\"\n self._tb_writer.add_scalar(key, val, iteration)\n if self._tqdm_bar is not None:\n # update tqdm bar\n self._tqdm_data[key] = val\n self._tqdm_bar.set_postfix(self._tqdm_data, refresh=True)", "def _print(self):\n self.i += 1\n np.set_printoptions(precision=3, suppress=True)\n if self.i%40 == 0:\n self.i = 0\n print self.ekf.current_state_estimate[4:7]", "def test_print_each(self):\n logger = Logger(each=3, nb_epochs=100)\n logger.__print_function__ = mock.Mock()\n logger.write = mock.Mock()\n for i in range(100):\n i += 1\n logger.epoch(i, lambda: {\"train_lemma\": (0+i, 1+i, 2+i)})\n\n self.assertEqual(logger.__print_function__.called, True, \"Calling to print should have been done\")\n self.assertEqual(logger.write.called, False, \"File has not been set and should not be called\")\n self.assertEqual(len(logger.__print_function__.call_args_list), 35*4, \"There should be 35 time the printing\")\n expected = [\n mock.call(\"::: Train Scores (lemma) :::\"),\n mock.call('+\\tall acc:', 1),\n mock.call('+\\tkno acc:', 2),\n mock.call('+\\tunk acc:', 3)\n ] + [\n call\n for i in range(1, 34)\n for call in [\n mock.call(\"::: Train Scores (lemma) :::\"),\n mock.call('+\\tall acc:', 0+i*3),\n mock.call('+\\tkno acc:', 1+i*3),\n mock.call('+\\tunk acc:', 2+i*3)\n ]\n ] + [\n mock.call(\"::: Train Scores (lemma) :::\"),\n mock.call('+\\tall acc:', 100),\n mock.call('+\\tkno acc:', 101),\n mock.call('+\\tunk acc:', 102)\n ]\n self.assertEqual(\n logger.__print_function__.call_args_list, expected,\n \"It should print the first, each third (except the first) log and the last one\"\n )", "def on_train_begin(self, logs={}):\n self.val_kappas = []", "def start_evaluation(self, tb_path: str = None, log_path: str = None) -> None:\n LOGGER.info(f'Start Evaluation')\n self.model.eval() #evaluation mode\n mean_dict = {metric.__name__: [] for metric in self.metrics}\n\n for bootstrap_step in range(self.num_bootstraps):\n self._current_bootstrap_step = bootstrap_step\n self._bootstrap_step(mean_dict)\n self._compute_mean_variance(mean_dict)\n self._evaluation_done = True\n if tb_path is not None:\n self.dump_to_tensorboard(tb_path)\n if log_path is not None:\n self.dump_evaluation(log_path)", "def __log_trial__(self, trial_data):\n from klibs.KLDatabase import EntryTemplate\n\n trial_template = EntryTemplate('trials')\n trial_template.log(P.id_field_name, P.participant_id)\n for attr in trial_data:\n trial_template.log(attr, trial_data[attr])\n\n return self.database.insert(trial_template)", "def on_train_begin(self, logs={}):\n self._beta = []", "def print_data(self):\n for chain, gen in self.generations.items():\n print('Generations for chain %s: %d' % (chain, gen))\n print('Log likelihood effective size: %d' % self.loglik_effsize)\n print('Log likelihood relative difference: %f' % self.loglik_rel_diff)\n print('Max diff: %f' % self.max_diff)" ]
[ "0.6422396", "0.6307403", "0.6306143", "0.6290289", "0.6281441", "0.6219549", "0.61849844", "0.61498964", "0.6084978", "0.6076392", "0.60656595", "0.60503274", "0.5975851", "0.597538", "0.59376127", "0.5935147", "0.5904615", "0.58918566", "0.5876711", "0.5858225", "0.58474666", "0.5846372", "0.58054787", "0.57902116", "0.5784475", "0.5778492", "0.57668", "0.5735868", "0.5714438", "0.57084805", "0.5699252", "0.56946373", "0.5694122", "0.5694122", "0.56927085", "0.5692503", "0.56875896", "0.56772083", "0.5673383", "0.5672568", "0.5656803", "0.5646829", "0.5644982", "0.5639137", "0.5635797", "0.5634765", "0.56039524", "0.5601109", "0.55945915", "0.55860317", "0.5585943", "0.55847496", "0.5572207", "0.55642354", "0.5559731", "0.55521035", "0.5531135", "0.55217576", "0.5521756", "0.55128", "0.5510964", "0.55064285", "0.5502006", "0.54925627", "0.54918104", "0.54878324", "0.5487501", "0.5486364", "0.5485709", "0.5485709", "0.5477827", "0.5461813", "0.5448802", "0.5440158", "0.5438903", "0.543809", "0.5436081", "0.54352045", "0.5430252", "0.5412385", "0.5411353", "0.5401044", "0.5400685", "0.5397233", "0.5388681", "0.53748685", "0.5374832", "0.53692645", "0.53680444", "0.5365697", "0.5362714", "0.5360613", "0.53568166", "0.53566134", "0.53530335", "0.53456616", "0.5334657", "0.5333595", "0.53278714", "0.532472" ]
0.65032554
0
Returns agents fingerprints (policies).
def get_fingerprint(self): return self.fp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _gpg_fingerprints(self) -> List[str]:\n return self._gpg_keys.fingerprints", "def get_fingerprints(self, jid: JID) -> List[str]:\n return []", "def get_hostfingerprint_list(self):\n return self.hostfingerprint", "def get_policies():\r\n policy = policies.values()\r\n return policy", "def policy(agent):", "def probes(self):\r\n return probes.Probes(self)", "def getProbes(self):\n probes = \"\"\n try:\n p = self.fetchProbes()\n for probe in p:\n probes += \"{id};{ip};{status};\\n\".format(id = probe.getId(), ip = probe.getIp(), status = probe.getStatus())\n except ProbeConnectionFailed:\n probes = self.error\n finally:\n return probes", "def getAgents(issuer=False, dbn='core', env=None):\n global gDbEnv\n\n if env is None:\n env = gDbEnv\n\n if env is None:\n raise DatabaseError(\"Database environment not set up\")\n\n entries = []\n subDb = gDbEnv.open_db(dbn.encode(\"utf-8\"), dupsort=True) # open named sub db named dbn within env\n with gDbEnv.begin(db=subDb) as txn: # txn is a Transaction object\n with txn.cursor() as cursor:\n if cursor.first(): # first key in database\n while True:\n key = cursor.key().decode()\n if len(key) == DID_LENGTH and \"/\" not in key:\n value = cursor.value().decode()\n ser, sep, sig = value.partition(SEPARATOR)\n try:\n dat = json.loads(ser, object_pairs_hook=ODict)\n except ValueError as ex:\n if cursor.next():\n continue\n else:\n break\n try:\n did, index = dat[\"signer\"].rsplit(\"#\", maxsplit=1)\n except (AttributeError, ValueError) as ex:\n if cursor.next():\n continue\n else:\n break\n\n if did == key: # self signed so agent\n if issuer:\n if \"issuants\" in dat:\n entries.append(key)\n else:\n entries.append(key)\n if not cursor.next(): # next key in database if any\n break\n return entries", "def get_protection_policies(cohesity_client):\n policy_list = cohesity_client.protection_policies.get_protection_policies()\n policy_list = policy_list if policy_list else []\n for policy in policy_list:\n exported_res_dict[\"Protection Policies\"].append(policy.name)\n return policy_list", "def getAllProbes():\n\tprobes = set()\n\tfor log in ProbeLog.objects.all():\n\t\tprobes.add(log.probe)\n\treturn probes", "def aslist(self):\n return sorted(list(self.fingerprints))", "def hs_signers(self):\n return [{'name': u.get_full_name(), 'email': u.email} for u in [self.workspace.lawyer, self.user]]", "def get_agent_keys(logger=None):\n paramiko_agent = paramiko.Agent()\n agent_keys = paramiko_agent.get_keys()\n if logger:\n logger.info('{0} keys loaded from agent'.format(len(agent_keys)))\n return list(agent_keys)", "def _get_electronic_signatures(self, report=False):\n certificates = []\n\n return certificates", "def get_usable_guards(client_as, fp_to_as, pfi):\r\n\r\n # dict mapping all guard fps to bool usability\r\n guard_to_usability = make_guard_usability_dict(client_as, \r\n fp_to_as, \r\n pfi)\r\n\r\n\r\n # filter\r\n guard_to_usability = {fp:guard_to_usability[fp] for fp in fp_to_as}\r\n\r\n safe_guard_fps = list(filter(lambda x: guard_to_usability[x],\r\n guard_to_usability.keys()))\r\n\r\n return safe_guard_fps", "def fingerprint(self, algorithm):", "def capabilities(self):\n return []", "def get_probes(self):\n # TODO: might create a cached list of objects\n # TODO: instead of returning the simple json dict\n for probe in self.probes.values():\n yield probe", "def get(self):\n return {\n 'imLeader': arearesilience.imLeader(),\n 'imBackup': arearesilience.imBackup(),\n 'imCloud' : agentstart.imCloud\n }, 200", "def get_critics(self):\n actors = [ddpg_agent.critic for ddpg_agent in self.maddpg_agent]\n return actors", "def getFingerprint(self):\r\n if self.getNumCerts() == 0:\r\n raise AssertionError()\r\n return self.x509List[0].getFingerprint()", "def _get_p_agent(self):\n context = self._context or {}\n res = {}.fromkeys(self.ids, self._get_partner_agent(\n ))\n return res", "def consolidated_risks(self):\n privilege_escalation_results = {}\n resource_exposure_results = []\n data_exfiltration_results = []\n\n # Get it from each inline policy\n if self.inline_policies:\n for inline_policy in self.inline_policies:\n # Privilege Escalation\n if inline_policy.policy_document.allows_privilege_escalation:\n for entry in inline_policy.policy_document.allows_privilege_escalation:\n if entry[\"type\"] not in privilege_escalation_results.keys():\n privilege_escalation_results[entry[\"type\"]] = entry[\"actions\"]\n # Resource Exposure\n if inline_policy.policy_document.permissions_management_without_constraints:\n for action in inline_policy.policy_document.permissions_management_without_constraints:\n if action not in resource_exposure_results:\n resource_exposure_results.append(action)\n # Data Exfiltration\n if inline_policy.policy_document.allows_data_exfiltration_actions:\n for action in inline_policy.policy_document.allows_data_exfiltration_actions:\n if action not in data_exfiltration_results:\n data_exfiltration_results.append(action)\n\n if self.attached_managed_policies:\n for managed_policy in self.attached_managed_policies:\n # Privilege Escalation\n if managed_policy.policy_document.allows_privilege_escalation:\n for entry in managed_policy.policy_document.allows_privilege_escalation:\n if entry[\"type\"] not in privilege_escalation_results.keys():\n privilege_escalation_results[entry[\"type\"]] = entry[\"actions\"]\n # Resource Exposure\n if managed_policy.policy_document.permissions_management_without_constraints:\n for action in managed_policy.policy_document.permissions_management_without_constraints:\n if action not in resource_exposure_results:\n resource_exposure_results.append(action)\n # Data Exfiltration\n if managed_policy.policy_document.allows_data_exfiltration_actions:\n for action in managed_policy.policy_document.allows_data_exfiltration_actions:\n if action not in data_exfiltration_results:\n data_exfiltration_results.append(action)\n\n # turn it into a list because we want to be able to count the number of results\n these_privilege_escalation_results = []\n\n for key in privilege_escalation_results:\n result = {\n \"type\": key,\n \"actions\": privilege_escalation_results[key]\n }\n these_privilege_escalation_results.append(result)\n\n resource_exposure_results.sort()\n data_exfiltration_results.sort()\n\n results = {\n \"PrivilegeEscalation\": these_privilege_escalation_results,\n \"ResourceExposure\": resource_exposure_results,\n \"DataExfiltration\": data_exfiltration_results,\n }\n return results", "def test_get_bios_policy_list(self):\n pass", "def create_agents() -> List[InsuranceAgent]:\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(\n personal_info={\n AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(),\n KIDS_COUNT: FAKE.random_int(min=0, max=12),\n CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)),\n INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(),\n AVAILABLE: True,\n },\n call_acceptance_criteria=[\n {\n \"person_attribute\": AGE,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=120,\n ),\n },\n {\n \"person_attribute\": INCOME,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=1000000,\n ),\n },\n {\n \"person_attribute\": KIDS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": CARS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": INSURANCE_OPERATION,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": random.choice((RENT, BUY)),\n },\n ],\n )\n agents.append(insurance_agent)\n return agents", "def get_ies_profiles():\n\n # XXX: Read dynamically from rp installation\n profiles = [\n 'area_light.ies', 'bollard.ies', 'cylinder_narrow.ies',\n 'cylinder_wide.ies', 'defined_diffuse.ies', 'defined_diffuse_spot.ies',\n 'defined_spot.ies', 'display.ies', 'jelly_fish.ies', 'medium_scatter.ies', 'overhead.ies',\n 'parallel_beam.ies', 'pear.ies', 'scatter_light.ies', 'soft_arrow.ies',\n 'soft_display.ies', 'star_focused.ies', 'three_lobe_umbrella.ies', 'three_lobe_vee.ies',\n 'tight_focused.ies', 'top_post.ies', 'trapezoid.ies', 'umbrella.ies', 'vee.ies',\n 'x_arrow.ies', 'x_arrow_diffuse.ies', 'x_arrow_soft.ies'\n ]\n\n options = [(\"none\", \"None\", \"None\")]\n for profile_id in profiles:\n name = profile_id.replace(\".ies\", \"\").title().replace(\"_\", \" \")\n options.append((profile_id, name, name))\n\n return options", "def _get_wh_agent(self):\n context = self._context or {}\n res = {}.fromkeys(self.ids, self._get_uid_wh_agent(\n ))\n return res", "def claims(self):\n return self._itempage.claims", "def do_capabilities(cs, args):\n caps = cs.capabilities.list()\n fields = [\"scheme\", \"location\", \"term\", \"title\"]\n\n schemes = {i[\"scheme\"] for i in caps}\n\n print schemes\n for scheme in schemes:\n aux = [i for i in caps if scheme == i[\"scheme\"]]\n utils.print_list(aux, fields)", "def get_agent_terms(self):\n return # osid.authentication.AgentQueryInspector", "def _show_fingerprints(self, jid: JID) -> None:\n fprs = self.get_fingerprints(jid)\n if len(fprs) == 1:\n self.api.information(\n 'Fingerprint for %s: %s' % (jid, fprs[0]),\n 'Info',\n )\n elif fprs:\n self.api.information(\n 'Fingerprints for %s:\\n\\t%s' % (jid, '\\n\\t'.join(fprs)),\n 'Info',\n )\n else:\n self.api.information(\n 'No fingerprints to display',\n 'Info',\n )", "def _get_identities_from_provisioning_profile(mpf):\n for identity in mpf[\"DeveloperCertificates\"]:\n if not isinstance(identity, bytes):\n # Old versions of plistlib return the deprecated plistlib.Data type\n # instead of bytes.\n identity = identity.data\n yield _certificate_fingerprint(identity)", "def getAllAgents(self):\n agent_dict ={}\n for member in self.membership.listMembers():\n if member.has_role('Agent'):\n agent_id = member.getUserName()\n agent_dict[agent_id]={}\n agent_dict[agent_id]['email'] = member.getProperty('email')\n agent_dict[agent_id]['areas'] = self.__wrapAreas(member.getProperty('areas'))\n agent_dict[agent_id]['fullname'] = member.getProperty('fullname')\n \n return agent_dict", "def all_known_cars():\n return list(_FINGERPRINTS.keys())", "def getPurchasableGenerators(self) -> list:\n pass", "def test_get_dispatch_policy_list(self):\n pass", "def list_known_phylogenetic_metrics():\r\n result = []\r\n for name in dir(qiime.beta_metrics):\r\n if name.startswith('dist_'):\r\n result.append(name[5:])\r\n result.sort()\r\n return result", "def gene_descriptors(civic_gid19):\n return [civic_gid19]", "def keys(self):\n if self.policies is None:\n return set([])\n return self.policies.keys()", "def probe(self) -> dict:\n result = {}\n for name, func in self._probes.items():\n result[name] = func()\n return result", "def routes():\n routeList = []\n for profile in globalProfile():\n routeList.append(profile.route)\n return routeList", "def getReportersForSittingVocab(self):\n rota_tool = getToolByName(self, 'portal_rotatool')\n members = rota_tool.getAvailableReporters()\n return DisplayList([(m.UID(), m.Title()) for m in members])", "def notifiers(self):\n return self.registry.keys()", "def get_public_keys(self):\n return self.control_connection.call('get_agents_publickeys')", "def get_spawning_profile_list(intersection):\n return intersection.get_spawning_profile_list()", "def list(cls):\n\n forges = cls.forges()\n\n return {\"forges\": [{\"id\": id, \"description\": forges[id]} for id in sorted(forges.keys())]}", "def forges():\n\n forges = {}\n\n for forge_path in sorted(glob.glob(\"/opt/service/forge/*.yaml\")):\n if forge_path.split(\"/\")[-1] not in [\"fields.yaml\", \"values.yaml\"]:\n with open(forge_path, \"r\") as forge_file:\n forges[forge_path.split(\"/\")[-1].split(\".\")[0]] = yaml.safe_load(forge_file)[\"description\"]\n\n return forges", "def relations(self):\n\t\treturn [(self.factions[k][0], self._faction_affinity.get(k, 50)) for k in self.factions.keys()]", "def available_policies(self):\n return tuple(self._policies.keys())", "def compute_agents(self):\n path = '/os-agents'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack compute agents: %s' % truncate(res))\n return res[0]['agents']", "def get_session_algorithms(self): # real signature unknown; restored from __doc__\n return \"\"", "def get(self):\n return {\"claims\": g.claims}, 200", "def fingerprint(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"fingerprint\")", "def policies(self):\n return self._policies", "def get_registries(self):\n raise NotImplementedError(\"get_registries method is not implemented.\")", "def list_caps():\n global _CAPABILITIES_MAP\n\n try:\n return tuple(sorted(_CAPABILITIES_MAP.keys()))\n\n except NameError:\n pass # We can remedy this.\n\n loop = get_loop()\n\n controller_connection = CioRoot(loop)\n\n _CAPABILITIES_MAP = {}\n\n for capability_id in controller_connection.init():\n _CAPABILITIES_MAP[capability_id] = {\n 'acquire': controller_connection.acquire,\n 'release': controller_connection.release,\n }\n\n return tuple(sorted(_CAPABILITIES_MAP.keys()))", "def generate_statistics(self, timestep):\n total_friends = 0\n total_enemies = 0\n affinity_entries = 0\n num_online = len(self.online_agents)\n for agent in self.online_agents:\n total_friends += len(agent.friends)\n total_enemies += len(agent.enemies)\n affinity_entries += len(agent.affinity_map)\n self.logger.log(3, \"round %d: %d agents, each average of %d friend(s), %d unfriend(s), %d people known\" %\n (timestep, num_online, total_friends / num_online, total_enemies / num_online,\n affinity_entries / num_online))\n self.logger.log(3, \"Relationship between online agents 0 and 1 (degrees of separation): %r\" %\n (find_degrees_of_separation(self.online_agents[0], self.online_agents[1])))\n\n # Randomly pick a couple pairs of agents and check to see how many degrees of separation there are between\n # those two agents.\n num_users_to_average_separation = int(len(self.online_agents) / 200)\n deg_sep = 0\n unknowns = 0\n for x in range(num_users_to_average_separation):\n a1 = random.randint(0, len(self.online_agents)-1)\n a2 = a1\n while a2 == a1:\n a2 = random.randint(0, len(self.online_agents)-1)\n sep = find_degrees_of_separation(self.online_agents[a1], self.online_agents[a2])\n if sep is not None:\n deg_sep += sep\n else:\n unknowns += 1\n\n if num_users_to_average_separation != unknowns:\n deg_sep = int(deg_sep / (num_users_to_average_separation - unknowns))\n\n self.logger.log(3, \"%d random user pairs whom have a chain of connection, the average length of\"\n \" that chain is %d. %d had no path to other agent.\" %\n (num_users_to_average_separation, deg_sep, unknowns))\n\n self.logger.log(3, \"There were %d messages sent and %d messages received this round.\" %\n (self.messages_sent, self.messages_received))\n\n self.logger.log(3, \"------------\")\n self.total_messages_received += self.messages_received\n self.total_messages_sent += self.messages_sent\n self.messages_sent = 0\n self.messages_received = 0", "def get_selected_policies(actor):\n dike_model, _ = get_model_for_problem_formulation(actor)\n levers = [lever.name for lever in dike_model.levers]\n policies_df = pd.read_csv('simulation/selected/selected_policies_' + actor + '.csv')\n policies_df = policies_df.loc[:, levers]\n policies = []\n\n for i, row in policies_df.iterrows():\n policy = Policy(f'Policy {i}', **row.to_dict())\n policies.append(policy)\n\n return policies", "def fingerprint(self):\n\n tree = self.xml()\n self.fingerprint = \"[\"\n # start with dummy tier\n self.analyze_tier(\n {\"id\": self.path, \"constraint\": \"root\", \"ltype\": \"\"},\n 0,\n # lump=lump\n )\n self.fingerprint += \"]\"\n return self.fingerprint", "def _gpg_keys(self) -> ListKeys:\n return self.gpg.list_keys()", "def init_results(self):\n results = {}\n for i in range(len(self.agents)):\n actor = self.agents[i]\n actor.set_name(\"Actor\") # the filename part of the name is set in actor using filename from init_agents\n results[actor.name] = 0\n return results", "def get_fingerprints(fp, format = None):\n\n spectrum, axis_freqs, axis_times = do_fft(*get_raw(fp, format))\n peaks = find_peaks(spectrum, axis_freqs, axis_times)\n fingerprints = calculate_fingerprints(peaks)\n\n return fingerprints", "def list_policies(policies, verbosity):\n print()\n if verbosity < 1:\n rows = []\n for p in sorted_by_name(policies):\n rows.append((p.name, p.generator, p.length, p.frequency))\n print_table(('NAME', 'GEN', 'LEN', 'FREQ'), rows)\n else:\n for policy in sorted_by_name(policies):\n chars = NONE\n if policy.disallowed_characters:\n chars = ''.join(sorted(policy.disallowed_characters))\n print_detail(\n policy.name, (\n ('description', nullable(policy.description)),\n ('specs', get_policy_specs(policy)),\n ('∅ chars', chars),\n ),\n )\n print()", "def filter_func(self, agents):\n return [\n agent for agent in agents\n if agent.energy < self.model.energy_threshold and not agent.pack\n ]", "def reset(self):\n agent_info = []\n\n for a in self.agents:\n agent_info.append(a.reset())\n print('agent_info', agent_info)\n return agent_info", "def get_capsules(method=\"\"):\n return _get(\"capsules\", method)", "def fingerprint(self):\n return self.gpg.list_keys()[0]['fingerprint']", "def generate_random_agent_keys():\n\n new_random_agent = list(all_waypoints)\n random.shuffle(new_random_agent)\n\n converted_set = np.array(new_random_agent)\n i = 0\n for key in new_random_agent:\n converted_set[i] = names_lookup[key]\n i+= 1\n\n #print converted_set\n return converted_set", "def verifiers(self):\n # type: () -> Dict[str, PublicKey]\n return self._verifiers", "def list_trusted_issuers():\n\n # Query the blockchain and manage exceptions\n try:\n trusted_issuers = tf.dump_trusted_identities()\n except Exception as e:\n detail=str(e)\n log.error(detail)\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=detail)\n\n return {\"payload\": trusted_issuers}", "def get_signatories(account_id):\n query = iroha.query(\"GetSignatories\", account_id=account_id)\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def getFuList(self):\n if self._fupardict:\n return self._fupardict.keys()\n buf = self._parent.Xeprbuf(10000)\n self.aqGetExpFuList(buf, 10000)\n return buf.get_unicode_str().split(',')", "def compute_fingerprint(self):\n\t\t# initialize tensorflow session\n\t\tsess = tf.InteractiveSession()\n\t\tclf_image = wb_clf(config.clf_image_filename, reshape_size=None, input_dim=(100,200), \n\t\t\t\t\t\t\tname=\"image\", ALEXNET=True)\n\t\tclf_blot = wb_clf(config.clf_blot_filename, input_dim=(15,30), reshape_size=4*8*64, name=\"blot\")\n\t\tsess.run(tf.global_variables_initializer())\n\t\t# create western blot fingerprinting object\n\t\tWB = WesternBlot(clf_image=clf_image, clf_blot=clf_blot)\n\t\tWB.figure = self.figure\n\t\tWB.figure_gray = cv2.cvtColor(self.figure, cv2.COLOR_BGR2GRAY)\n\t\t# compute fingerprint\n\t\tWB.westernBlotExtractor(VISUALIZE=False)\n\t\tself.local_database = WB.Fingerprint", "def sample(self):\n return [agent_observation_space.sample() for agent_observation_space in self._agents_observation_space]", "def loaded_keys(self):\n\n keys = {}\n\n cmd = ['ssh-add', '-l']\n p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n (stdout, stderr) = [str(x, 'utf-8') for x in p.communicate()]\n\n if p.returncode == 1:\n line = stdout.split('\\n')[0].strip()\n if line == 'The agent has no identities.':\n return keys\n\n if p.returncode != 0:\n raise SSHKeyError('Error listing loaded SSH agent keys')\n\n for line in [line.rstrip() for line in stdout.split('\\n')[:-1]]:\n data = parse_public_key_line_pattern(line)\n if not data:\n raise SSHKeyError('Error parsing agent key list line {}'.format(line))\n keys[data['fingerprint']] = data\n\n return keys", "def get_all_scopes(self):\n return dict(ProtectedCapability.objects.values_list('slug', 'title'))", "def get_agents(self):\n ret = []\n for i in self.all_instances:\n if i.instance_type == InstanceType.AGENT:\n ret.append(i)\n return ret", "def analyze(website, scanners):\n attributes = {}\n for scanner in scanners:\n attribute = scanner(website)\n attributes[attribute.key] = attribute\n return attributes", "def capabilities(self):\n pass", "def get(self):\n\n return self.get_request_handler(request.headers).get_all_genders()", "def list_policies(self):\n client = self.connect(VAULT_TOKEN)\n return client.list_policies()", "def search_a_for_genetic(env:RailEnv,randomized):\r\n schedules = []\r\n occupancy_map=[[] for i in range(len(env.agents))]\r\n\r\n n_timesteps = np.array([])\r\n state_schedule =[]\r\n conv = StateConverter(env)\r\n # Compute the transition and valid action table\r\n model = convert_to_transition(env, conv)\r\n # Calculate the shortest dist from one state to another state\r\n shortest = all_pairs_shortest_paths(conv.num_states, model[0])\r\n random_order_agent = randomized\r\n print(random_order_agent)\r\n\r\n for i in random_order_agent:\r\n # Compute occupancy map\r\n occupancy_map[i] = compute_map(i, random_order_agent, n_timesteps, state_schedule, conv)\r\n\r\n # Compute schedule for each agent based on the occupancy map\r\n each_schedule = a_star_search(SearchEnv(env,conv,model,shortest,i).get_root_node(),occupancy_map[i])\r\n #print(each_schedule)\r\n schedules.append(each_schedule[0])\r\n state_schedule.append(each_schedule[1])\r\n n_timesteps = np.append(n_timesteps, [len(each_schedule[1])])\r\n\r\n # Combine separate actions into a list\r\n actions = combine(schedules,random_order_agent,int(np.max(n_timesteps)))\r\n\r\n return actions", "def describe(self, urns, client_cert, credentials):\n se_manifest, se_slivers, last_slice = SERMv3ManifestFormatter(), [], \"\"\n\n result = []\n\n if self._verify_users:\n for urn in urns:\n logger.debug(\"describe: authenticate the user for %s\" % (urn))\n client_urn, client_uuid, client_email =\\\n self.auth(client_cert, credentials, urn, (\"sliverstatus\",))\n logger.info(\"Client urn=%s, uuid=%s, email=%s\" % (\n client_urn, client_uuid, client_email,))\n\n links_db, nodes, links = self.SESlices.get_link_db(urn)\n self.SESlices._create_manifest_from_req_n_and_l(se_manifest, nodes,links)\n\n result.append( \n { \n \"geni_sliver_urn\": links_db['geni_sliver_urn'],\n \"geni_expires\": links_db['geni_expires'],\n \"geni_allocation_status\": links_db[\"geni_allocation_status\"],\n \"geni_operational_status\" : \"Not yet implemented\"\n }\n )\n\n\n logger.debug(\"SE-ManifestFormatter=%s\" % (se_manifest,))\n # logger.debug(\"SE-Slivers(%d)=%s\" % (len(links_db), links_db,))\n\n\n return {\"geni_rspec\": \"%s\" % se_manifest,\n \"geni_urn\": urns,\n \"geni_slivers\": result}", "def claimlist(self):\n return list(\n set(\n list(self.caller.player_ob.db.claimed_scenelist or [])\n + list(self.requested_validation)\n )\n )", "def get_drivers():\n return [str(d) for d in drivers.values()]", "def dump_trusted_identities():\n\n node_name = \"ala\"\n\n numberSubnodes = ens.numberSubnodes(node_name)\n id_list = []\n\n # Iterate for each node\n for i in range(numberSubnodes):\n\n # Get the subnode (in name_hash format)\n subnode_hash = ens.subnode(node_name, i)\n\n # Get the data for the subnode\n DID, name, DIDDocument, active = resolver.AlaDIDPublicEntity(\n node_hash=subnode_hash)\n\n identity = {\n \"DID\": DID,\n \"name\": name,\n \"node_hash\": subnode_hash.hex()\n }\n id_list.append(identity)\n \n return id_list", "def mechanisms(self):\n return list(self)", "def describe_agents(agentIds=None, filters=None, maxResults=None, nextToken=None):\n pass", "def __qualitaetsListeProteins(self):\n rv = []\n pam30_sortierbar = {}\n for key in pam30.keys():\n pam30_sortierbar[str(pam30[key]) + \";\" + ''.join(key)] = pam30[key]\n if key[0] != key[1]:\n pam30_sortierbar[\n str(pam30[key]) + \";\" + ''.join((key[1], key[0]))\n ] = pam30[key]\n sorted_keys = list(pam30_sortierbar.keys())\n sorted_keys.sort(key=lambda k: int(k.split(\";\")[0]), reverse=True)\n # debugging kept for historical reasons\n # for key in iter(sorted_keys):\n # print(key.split(\";\")[1] + \" has score \" + str(pam30_sortierbar[key]))\n for key in iter(sorted_keys):\n rv.append(key.split(\";\")[1])\n return(rv)", "def get_agency_terms(self):\n return # osid.authentication.AgencyQueryInspector", "def list_profiles(self):\n return self._get(\"posture\", box=BoxList)", "def get_interactions_fingerprint(self, fingerprint: str) -> List[IgnoreFingerprintRecord]:\n\n with self.session.begin() as session:\n interactions = (\n session.query(IgnoreFingerprintRecord).filter(IgnoreFingerprintRecord.fingerprint == fingerprint).all()\n )\n return [\n {\n \"id\": t.id,\n \"fingerprint\": t.fingerprint,\n \"ignore_type\": t.ignore_type,\n \"reported_at\": t.reported_at,\n \"expires_at\": t.expires_at,\n }\n for t in interactions\n ]", "def get_patient_dict():\r\n return common.get_dict_all(get_patient_filename(), None)", "def get(self):\n rest_params = common.get_restful_params(self.request.uri)\n if rest_params is None:\n common.echo_json_response(self, 405, \"Not Implemented: Use /agents/ interface\")\n return\n\n if \"agents\" not in rest_params:\n common.echo_json_response(self, 400, \"uri not supported\")\n logger.warning('GET returning 400 response. uri not supported: ' + self.request.path)\n return\n\n agent_id = rest_params[\"agents\"]\n\n if agent_id is not None:\n agent = self.db.get_agent(agent_id)\n if agent is not None:\n response = cloud_verifier_common.process_get_status(agent)\n common.echo_json_response(self, 200, \"Success\", response)\n #logger.info('GET returning 200 response for agent_id: ' + agent_id)\n\n else:\n #logger.info('GET returning 404 response. agent id: ' + agent_id + ' not found.')\n common.echo_json_response(self, 404, \"agent id not found\")\n else:\n # return the available keys in the DB\n json_response = self.db.get_agent_ids()\n common.echo_json_response(self, 200, \"Success\", {'uuids':json_response})\n logger.info('GET returning 200 response for agent_id list')", "def execution_graph_mechs(self):\n return list(mech_tuple[0] for mech_tuple in self.executionGraph)", "def m_dump_all_identities():\n\n m_dump_identities(\"ala\")", "def list(self):\n path = 'orgProvisioning/ipGreTunnelInfo'\n return self._session.get(path)", "def get_driver_list():\n return list(object_store.ObjectStorageDriver.registry.keys())", "def choose(self):\n # pick agent A\n keys = list(self._agents.keys())\n keyA = random.choice(keys)\n agentA = self.model.schedule.agents[keyA]\n\n # pick pick agent B\n keyB = random.choice(agentA.neighbors)\n agentB = self.model.schedule.agents[keyB]\n\n return agentA, agentB", "def make_recommendation_ga(playlist):\n tracklist = []\n\n # tracknames = list(playlist['name'])\n print(playlist.head())\n\n track_features = playlist[['danceability', 'energy']]\n # 'speechiness', 'acousticness',\n # 'instrumentalness', 'liveness', 'valence']]\n\n track_features_matrix = track_features.values\n\n path, fitness = ga.genetic_algorithm(track_features_matrix, plot=False)\n\n visualization.plot_path(\n track_features,\n path,\n fitness,\n mode=\"none\",\n keep=True\n )\n\n return tracklist", "def _hydro_metrics(self) -> list:\n\n return self._minimal() + [\n 'fdc_flv', 'fdc_fhv',\n 'kge', 'kge_np', 'kge_mod', 'kge_bound', 'kgeprime_c2m', 'kgenp_bound',\n 'nse', 'nse_alpha', 'nse_beta', 'nse_mod', 'nse_bound']" ]
[ "0.59994614", "0.588699", "0.5650054", "0.5606896", "0.55353206", "0.5501572", "0.5438777", "0.5225652", "0.52099985", "0.51455563", "0.5142792", "0.51344025", "0.5094674", "0.50501364", "0.50284547", "0.49927717", "0.49804366", "0.49701825", "0.49695376", "0.49271792", "0.49101743", "0.49095923", "0.49038154", "0.4899712", "0.4866054", "0.4842499", "0.48377603", "0.48178318", "0.48075372", "0.4775204", "0.47734976", "0.47646365", "0.47573856", "0.4751456", "0.47296003", "0.4717857", "0.4716225", "0.47104505", "0.4704746", "0.46937025", "0.46928886", "0.46791327", "0.46772933", "0.46737918", "0.46695697", "0.46690494", "0.46585196", "0.46515307", "0.46329442", "0.46309224", "0.46174067", "0.46049854", "0.46028763", "0.4601013", "0.45949647", "0.45905748", "0.45883167", "0.4583611", "0.45697325", "0.45671538", "0.45662", "0.45654145", "0.45452154", "0.45420137", "0.4538979", "0.4534148", "0.4528782", "0.45276582", "0.45175958", "0.45168957", "0.45110866", "0.45097706", "0.45045674", "0.4500355", "0.4491484", "0.4489395", "0.44842023", "0.44744894", "0.44707277", "0.44705752", "0.44673494", "0.4466627", "0.4462427", "0.44596305", "0.44488356", "0.44401354", "0.44392222", "0.44368848", "0.4430049", "0.44282794", "0.44282582", "0.4411629", "0.44088173", "0.44035354", "0.44005808", "0.4398491", "0.43949547", "0.439475", "0.43922523", "0.4385501", "0.43826213" ]
0.0
-1
Get actions of each agents neighbour in the graph.
def get_neighbor_action(self, action): naction = [] for i in range(self.n_agent): naction.append(action[self.neighbor_mask[i] == 1]) return naction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def actions(self) -> list:\n if self.debug: print(f\"StateNode.actions()\")\n if not self._examined:\n if self.debug: print(f\"\\tExamining...\")\n self._edges = self.state.actions()\n for e in self._edges:\n e: Action\n e.source = self\n e.cost = self.get_cost(e)\n self._examined = True\n return self._edges", "def actions(self, agent_state):\n raise NotImplementedError(\"Don't know what actions are available\")", "def act(self, states: np.ndarray, eps: float = 0.0) -> List[np.ndarray]:\n actions = [\n agent.act(state.reshape(-1, 1).T, eps)\n for agent, state in zip(self.agents, states)\n ]\n return actions", "def get_all_valid_actions(self):\r\n\r\n # Select, for each agent, the valid actions based on its position (state).\r\n agent_actions = self.searchenv.valid_actions[self.searchstate.positions[0]]\r\n\r\n #print(\"Agent Action: \",agent_actions)\r\n\r\n # Mask the rail transition actions for idle agents.\r\n if self.searchstate.actives == 0:\r\n agent_actions = [0, 0, 1, 0, 1] # STOP_MOVING, or MOVE_FORWARD.\r\n\r\n # Mask the rail transition actions for done agents.\r\n if self.agents_at_goal() == True:\r\n agent_actions = [1, 0, 0, 0, 0] # DO_NOTHING only.\r\n\r\n # Identify for each agent the IDs of the valid actions (i.e., [0, 1, 1, 0, 0] --> [1, 2])\r\n agent_action_list =[]\r\n for i in range(len(agent_actions)):\r\n if agent_actions[i] == 1:\r\n agent_action_list.append(i)\r\n\r\n # Return list containing for each agent, the IDs of the actions available to it.\r\n return agent_action_list", "def get_actions(self):\n return self.agent.get_actions()", "def get_possible_actions(self, world_state,agent_host):\n action_list = []\n possibilities = {'movenorth 1': -3,'movesouth 1': 3,'moveeast 1': 1,'movewest 1': -1}\n #check walls to see whether can move left,right,back,forward\n #check floor beneath to see whether should do anything at all, or just nothing and sink\n obs_text = world_state.observations[-1].text\n obs = json.loads(obs_text)\n grid = load_grid(world_state)\n print 'GRID SIZE: ', len(grid)\n for k,v in possibilities.items():\n #with current grid, index 31 will always be our agent's current location\n #check walls to see whether can move left,right,back,forward\n if grid[31+v+9] == 'water' or grid[31+v+9] == 'wooden_door': #+9 because we want to check\n action_list.append(k) #where our feet are located\n #check if you can teleport down a level\n if grid[31-27] == 'water' or grid[31-27] == 'wooden_door':\n action_list.append(self.teleport(agent_host,False))\n #check if you can teleport up a level\n if grid[31+45] == 'water' or grid[31+45] == 'wooden_door':\n action_list.append(self.teleport(agent_host,True))\n\n print(\"ACTION LIST: {}\".format(action_list))\n return action_list", "def traverse(self, action_details: Dict):\n agent = action_details[\"agent_id\"]\n self.agents[agent-1].traversing = True\n # distanation node\n dest_node = action_details[\"to\"]\n\n # TODO add checks for from and to nodes\n\n node1, node2, distance = self.agents_location[agent]\n # people_collected = 0\n \n # If the agent is in node ( not on the edge ) check if the distination node is its neighbor\n if node1 == node2 and self.graph.is_neighbours(node1, dest_node) and not (node2,dest_node) in self.blocked_edges :\n # Get (node1,dest_node) edge weight\n\n edge_weight = self.graph.get_weight(node1, dest_node)\n\n # Move the agent into the edge (node1,dest_node)\n distance = edge_weight - 1\n self.agents_location[agent] = [node1, dest_node, distance]\n action_succeed = True\n\n # If the agent is already inside the edge , check whether destination node is correct\n elif node1 != node2 and node2 == dest_node:\n\n # Move the agent one step on the edge\n distance -= 1\n self.agents_location[agent][2] = distance\n\n action_succeed = True\n else:\n # If the destination node is wrong\n action_succeed = False\n # TODO write warning\n\n # If the agent arrived to some node , collect all the people there and change the location from [node1,node2,X]\n # to [dest_node,dest_node,0]\n if distance == 0 and action_succeed:\n self.agents_location[agent] = [dest_node, dest_node, 0]\n self.agents[agent-1].traversing = False\n self.agents[agent-1].location = dest_node\n action_succeed = True\n\n self.agents_last_action[agent] = action_succeed\n\n new_observation = self.get_observation({})\n\n return new_observation", "def process_actions(self, n_steps, actions):\n # Each row of actions is one time step,\n # row contains action indices for all agents\n # Convert to [time, agents, l_action]\n # so each agent gets its own 1-hot row vector\n actions_1hot = np.zeros([n_steps, self.n_agents, self.l_action], dtype=int)\n grid = np.indices((n_steps, self.n_agents))\n actions_1hot[grid[0], grid[1], actions] = 1\n # Convert to format [time*agents, agents-1, l_action]\n # so that the set of <n_agent> actions at each time step\n # is duplicated <n_agent> times, and each duplicate\n # now contains all <n_agent>-1 actions representing\n # the OTHER agents actions\n list_to_interleave = []\n for n in range(self.n_agents):\n # extract all actions except agent n's action\n list_to_interleave.append( actions_1hot[:, np.arange(self.n_agents)!=n, :] )\n # interleave\n actions_others_1hot = np.zeros([self.n_agents*n_steps, self.n_agents-1, self.l_action])\n for n in range(self.n_agents):\n actions_others_1hot[n::self.n_agents, :, :] = list_to_interleave[n]\n # In-place reshape of actions to [time*n_agents, l_action]\n actions_1hot.shape = (n_steps*self.n_agents, self.l_action)\n\n return actions_1hot, actions_others_1hot", "def _obtain_OtherAgentsActionsSummationTensor(self):\n dim = np.concatenate(([self.N], # agent i\n [self.N for _ in range(self.N-1)], # other agnt\n [self.M], # agent a of agent i\n [self.M for _ in range(self.N)], # all acts\n [self.M for _ in range(self.N-1)])) # other a's\n Omega = np.zeros(dim.astype(int), int)\n\n for index, _ in np.ndenumerate(Omega):\n I = index[0]\n notI = index[1:self.N]\n A = index[self.N]\n allA = index[self.N+1:2*self.N+1]\n notA = index[2*self.N+1:]\n\n if len(np.unique(np.concatenate(([I], notI)))) is self.N:\n # all agents indicides are different\n\n if A == allA[I]:\n # action of agent i equals some other action\n cd = allA[:I] + allA[I+1:] # other actionss\n areequal = [cd[k] == notA[k] for k in range(self.N-1)]\n if np.all(areequal):\n Omega[index] = 1\n\n return Omega", "def getNeighbors(self):\n targets = set()\n for arc in self._arcsFrom:\n targets.add(arc.getFinish())\n return [ node for node in sorted(targets) ]", "def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions", "def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions", "def actions(self):\n x, y = self._empty\n\n actions = []\n\n if x > 0: actions.append((x - 1, y))\n if y > 0: actions.append((x, y - 1))\n if x < self._size - 1: actions.append((x + 1, y))\n if y < self._size - 1: actions.append((x, y + 1))\n\n return actions", "def act(self, obs_all_agents, noise=0.0):\n actions_next = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions_next", "def get_possible_actions(self) -> [Action]:\r\n if self.fields[self.agent_x][self.agent_y] == Field.EMPTY or self.terminated:\r\n return [Action.NORTH, Action.EAST, Action.SOUTH, Action.WEST]\r\n else: # must be terminal\r\n return [Action.TERMINAL]", "def act(self, states, add_noise=True):\n actions = np.zeros([self.num_agents, self.action_size])\n for index, agent in enumerate(self.agents):\n actions[index, :] = agent.act(states[index], add_noise)\n return actions", "def actions(self):\n return {0, 1, 2, 3, 4, 5, 11, 12}", "def solution(self):\n return [node.action for node in self.path()[1:]]", "def solution(self):\n return [node.action for node in self.path()[1:]]", "def solution(self):\n return [node.action for node in self.path()[1:]]", "def solution(self):\n return [node.action for node in self.path()[1:]]", "def actions(self, state):\n\n\t\tpossibleActions = []\n\n\t\tflashlightLocation = state[0]\n\n\t\t\"\"\"\n\t\t\tIf a person is on the side of the flashlight, then they can cross the bridge by themselves or \n\t\t\tthey can cross with another person who is also on their side (the side of the flashlight).\n\t\t\t-\tSo we add an action for this person crossing by themselves, and also actions for them crossing\n\t\t\t\twith other people (each of these actions is them crossing with one of these other \n\t\t\t\tpeople, making 2 of them crossing the bridge)\n\t\t\t\t\n\t\t\tNote that person i and person j crossing the bridge is the same action as person j and person i crossing, \n\t\t\tand we only want to add this action once so when determining the people that person i can cross with \n\t\t\twe look at people who come after this person i (a person j where j > i) \n\t\t\"\"\"\n\n\t\tfor personI in range(1, self.n+1): # exclude the flashlight - only traverse the peoples' locations\n\t\t\tif state[personI] == flashlightLocation: #This person can cross the bridge\n\t\t\t\taction = [personI] # This person (person i) can cross bridge on their own (with the flashlight)\n\t\t\t\tpossibleActions.append(action)\n\t\t\t\tfor personJ in range(personI+1, self.n+1):\n\t\t\t\t\tif state[personJ] == flashlightLocation: # This person (person j) can cross the bridge\n\t\t\t\t\t\taction = [personI, personJ] # person i can cross the bridge with person j (and the flashlight)\n\t\t\t\t\t\tpossibleActions.append(action)\n\n\t\treturn possibleActions", "def solution(self):\n\t\treturn [node.action for node in self.path()[1:]]", "def actions(self, states, agent_indices):\n return NotImplementedError()", "def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target", "def neighbours(self):\n return [x.node for x in self.edges]", "def neighbors(\n self, state: Grid2D.State\n ) -> Iterable[Tuple[Grid2D.Action, Grid2D.State]]:\n # pylint: disable=invalid-name\n for a, cell in self.adjacent_coordinates(cell=state.agent_position):\n if not self.is_wall(cell):\n yield (a, Grid2D.State(cell))", "def actions(self):\n return self._action_list", "def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return np.array(actions)", "def generate_actions(self):\n \n # For all state nodes\n action = 0\n \n for l in range(self.u0_n):\n for m in range(self.u1_n):\n \n u = np.array([ self.ud[0][l] , self.ud[1][m] ])\n \n # State and grid index based on node #\n self.actions_input[action,:] = u\n self.actions_index[action,:] = np.array([l,m])\n \n # Increment node number\n action = action + 1", "def neighbors(self):\n return [e.name for e in self.edges()]", "def all_actions(self):\n actions = self.actions.stream[:]\n for eq in self.equipment:\n actions.extend(eq.actions)\n return actions", "def actions(self) -> Sequence[_A_out]:\n return self._actions", "def target_act(self, obs_all_agents, noise=0.0):\n target_actions = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions", "def target_act(self, obs_all_agents, noise=0.0):\n target_actions = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions", "def target_act(self, obs_all_agents, noise=0.0):\n target_actions_next = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in\n zip(self.maddpg_agent, obs_all_agents)]\n return target_actions_next", "def step(self, actions): # actions is a list,\n\n assert len(actions) == len(self.agents), \"Number of actions (\" + str(\n len(actions)) + \") does not match number of agents (\" + str(self.n_agents) + \")\"\n\n # Process movement based on real states (not belief)\n\n\n rewards = [0.] * self.n_agents\n\n reward = 0.\n\n\n nextcells = [None] * self.n_agents\n rand_nums = self.rng.uniform(size=self.n_agents)\n\n for i in range(self.n_agents):\n\n currcell = self.tocellcoord[self.agents[i].state]\n if isinstance(actions,int):\n act = actions\n else:\n act = actions[i]\n direction = self.directions[act]\n\n if rand_nums[i] > 1/3: # pick action as intended\n if self.occupancy[tuple(currcell + direction)] == 0:\n nextcells[i] = self.tocellnum[tuple(currcell+direction)]\n else:\n nextcells[i] = self.tocellnum[tuple(currcell)] # wall collision\n # rewards[i] += self.collision_penalty\n\n else: # pick random action, except one initially intended\n adj_cells = self.adjacent_to(currcell) # returns list of tuples\n adj_cells.remove(tuple(currcell+direction))\n\n index = self.rng.choice(range(len(adj_cells)))\n new_cell = adj_cells[i]\n\n if self.occupancy[new_cell] == 0:\n nextcells[i] = self.tocellnum[new_cell]\n else:\n nextcells[i] = self.tocellnum[tuple(currcell)] # wall collision\n # rewards[i] += self.collision_penalty\n\n\n # check for inter-agent collisions:\n collisions = [c for c, count in Counter(nextcells).items() if count > 1]\n while(len(collisions) != 0): # While loop needed to handle edge cases\n for i in range(len(nextcells)):\n if nextcells[i] in collisions:\n nextcells[i] = self.agents[i].state # agent collided with another, so no movement\n\n\n collisions = [c for c, count in Counter(nextcells).items() if count > 1]\n\n\n for i in range(self.n_agents):\n if nextcells[i] == self.agents[i].state: # A collision happened for this agent\n rewards[i] += self.collision_penalty\n else:\n s = nextcells[i] # movement is valid\n self.agents[i].state = s\n if s in self.goals and s not in self.discovered_goals:\n rewards[i] += self.goal_reward\n self.discovered_goals.append(s)\n #rewards[i] += broadcasts[i]*self.broadcast_penalty\n\n\n self.currstate = tuple(nextcells)\n\n\n\n reward = np.sum(rewards)\n\n self.step_count += 1\n\n\n # If all goals were discovered, end episode\n done = len(self.discovered_goals) == len(self.goals)\n\n \n return reward, self.currstate, done, None", "def getActions(self):\n actions = self.actions[:]\n return actions", "def generate_actions(self):\n \n # For all state nodes\n action = 0\n \n for k in range(self.u0_n):\n \n u = np.array([ self.ud[0][k] ])\n \n # State and grid index based on node #\n self.actions_input[action,:] = u\n self.actions_index[action,:] = k\n\n # Increment node number\n action = action + 1", "def get_direct_outputs(self):\n # TODO: brute force:\n result = []\n actions = self.root.get_all_nodes()\n for action in actions:\n if self in action.get_direct_inputs():\n result += [action]\n return result", "def get_list_of_actions(self):\n return self.actions", "def act(self, states, add_noise=True):\n \n actions = []\n for agent, state in zip(self.agents, states):\n action = agent.act(state, noise_weight=self.noise_weight, add_noise=self.enable_noise)\n actions.append(action)\n self.noise_weight *= self.noise_decay\n return np.array(actions).reshape(1, -1) # flatten", "def get_actions(self, vert):\n return self.adjacent_vert_dict[vert]", "def actions(self) -> list:\n if self.debug: print(f\"AState.actions()\")\n if not self._examined:\n if self.debug: print(f\"\\tExamining...\")\n self._actions = self._generate_actions()\n self._examined = True\n return self._actions", "def actions(self):\r\n return self.puzzle.actions", "def neighboring_consumers(self, position_list):\n agent_list = []\n #loop over all neighbors\n for position in position_list:\n agents_in_cell = self.model.grid.get_cell_list_contents(position)\n #loop over all agents in the cell to find if agent is present\n for agent in agents_in_cell:\n if type(agent).__name__ == \"Consumer\":\n agent_list.append(agent)\n \n return agent_list", "def actions(self):\n return self._actions", "def GetMoves(self):\n return [(source, target) for source in self.scores.keys() for target in self.fullGraph.neighbors_iter(source) if target not in self.pathes[source].nodes]", "def get_actions(g: Game):\n\n act_actions = []\n act = [Income, ForeignAid, Coup, Tax, Assassinate, Exchange, Steal]\n\n opponents = g.get_opponents()\n\n if g.players[g.action_player].coins >= 10:\n act = [Coup]\n\n for x in act:\n if g.players[g.action_player].coins >= x.cost:\n if x.attack_action:\n for p in range(len(opponents)):\n if g.players[opponents[p]].in_game:\n act_actions.append((x, p))\n else:\n act_actions.append((x, None))\n\n return act_actions", "def target_act(self, obs, noise=0.0):\n #return target_actions\n target_actions = torch.zeros(obs.shape[:2] + (self.action_size,), dtype=torch.float, device=device)\n for i in range(self.num_agents):\n target_actions[:, i, :] = self.maddpg_agent[i].target_act(obs[:, i])\n \n return target_actions", "def get_neighbours(self):\n return []", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def actions(self):\n return np.array([m['actions'] for m in self.model_outs], dtype=np.int32)", "def iterate_edges(\n self, verbs=None, directions=None, nodes=None\n ) -> Iterable[Edge]:", "def actions(self):\n\n return self._actions.getSlice(0)", "def actions(self):\n\n return self._actions.getSlice(0)", "def get_legal_actions(self, index):\n actions = []\n agent = self.agent_states[index]\n for action in ACTIONS:\n pos = agent.pos[0] + action[0], agent.pos[1] + action[1]\n if MAP[pos[0]][pos[1]] not in WALL:\n actions.append(action)\n return actions", "def get_actions(self, hyper_s):\n hyper_actions = []\n for next_s in self._get_periphery(hyper_s):\n if not self._is_covered(hyper_actions, next_s):\n hyper_actions.append(Action(self._get_hyper_s(next_s)))\n\n return hyper_actions", "def neighbors(self) -> List['Node']:\r\n self._load_neighbors()\r\n return [edge.source if edge.source != self else edge.target\r\n for edge in self._neighbors.values()]", "def getSuccessors(self, state):\n\n successors = []\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\n print('search2Agents state',state)\n state1,goals = state\n x,y = state1\n dx, dy = Actions.directionToVector(action)\n nextx, nexty = int(x + dx), int(y + dy)\n if not self.walls[nextx][nexty]:\n nextState = (nextx, nexty)\n cost = self.costFn(nextState)\n successors.append( ( nextState, action, cost) )\n\n\n # Bookkeeping for display purposes\n self._expanded += 1 # DO NOT CHANGE\n if state1 not in self._visited:\n self._visited[state1] = True\n self._visitedlist.append(state1)\n\n return successors", "def step(self, actions):\r\n # Run actions\r\n actions = [np.argmax((action_scores+.0001) * mask) for action_scores, mask in zip(actions, self.get_avail_actions())]\r\n reward, terminated, info = self.env.step(actions)\r\n\r\n # Get updated state\r\n self.state = self.env.get_state()\r\n\r\n # Return arrays for each agent\r\n reward_n = [reward / self.n for _ in range(self.n)]\r\n terminated_n = [terminated for _ in range(self.n)]\r\n info_n = [info for _ in range(self.n)]\r\n observation_n = self.env.get_obs()\r\n\r\n return observation_n, reward_n, terminated_n, info_n", "def getAllActions(self):\n decision_rules = self.getAllDecisionRules()\n return list(itertools.product(decision_rules, self.getAllRobotActions()))", "def _get_of_actions(of_flow_stats):\n # Add list of high-level actions\n # Filter action instructions\n apply_actions = InstructionType.OFPIT_APPLY_ACTIONS\n of_instructions = (ins for ins in of_flow_stats.instructions\n if ins.instruction_type == apply_actions)\n # Get actions from a list of actions\n return chain.from_iterable(ins.actions for ins in of_instructions)", "def Action(self, states, eps=0, isRandom=False):\n action = []\n # No gradient needed\n with T.no_grad():\n states = T.Tensor(states).float().to(device) # Send states to GPU\n [a.eval() for a in self.Actor] # Eval Mode\n # Loop Each Actor Model to get Action as np.array()\n for i in range(len(self.Actor)):\n action.append(self.Actor[i](states[i]).cpu().data.numpy())\n # If less Actor Models the Env. Agents: use Agent[0] to calc the rest\n if len(self.Actor)<len(states):\n act2 = self.Actor[0](states[len(self.Actor):]).cpu().data.numpy() # Get Action\n action = np.append(action, act2, axis=0) # Append to Action Array\n else: action = np.array(action) # Else convert List to np.array()\n [a.train() for a in self.Actor] # Back to Train Mode\n # Add Epsilon Greedy Noise\n action += self.Noise(1 if isRandom else eps, action.shape)\n return action", "def _advance_by_action(game, agents, action):\n getLogger(__name__).debug(\"Agent {} action {}\".format(game.current_agent_id, action))\n agent_id_for_action = game.current_agent_id\n\n game.take_action(action)\n for agent in agents:\n agent.take_action(action, agent.agent_id == agent_id_for_action)", "def getActions(self, state): \n util.raiseNotDefined()", "def build_actions(list_of_tuples):\n node_dict = build_verticies(list_of_tuples)\n ACTIONS = lambda path: node_dict[path.end]\n return ACTIONS", "def toposorted_actions(self) -> Iterable[Action]:\n # Here we execute two \"nanopasses\" (a term borrowed from compiler implementation)\n #\n # 1. Traverse a values-and-actions graph, reducing it to a dependency graph containing actions\n #\n # 2. Perform a toposort over actions (using Kahn's algorithm https://en.wikipedia.org/wiki/Topological_sorting)\n #\n # TODO: switch to graphlib from standard library\n #\n # TODO: Consider using Tarjan's strongly connected components algorithm\n # Rationale: Tarjan's SCC would find loops and produce a helpful diagnostic\n\n # 1. Dependency graph representation optimized for toposort\n o: dict[Action, set[Action]] = {} # for actions: action -> set of outgoing dependency edges\n i: dict[Action, set[Action]] = {} # for actions: action -> set of incoming dependency edges\n\n # set of nodes without incoming edges\n s: Set[Action] = set()\n\n # 1. Transform execution plan into dependency graph\n for action in self.actions:\n # if action does not depend on any other action, add it to set s\n if all(inp.producer() is None for inp in action.inputs()):\n s.add(action)\n # add outgoing edges to graph, if any\n for output in action.outputs():\n for depending_action in output.consumers():\n # add an edge action -> depending_action to the graph\n if action not in o:\n o[action] = set()\n if depending_action not in i:\n i[depending_action] = set()\n o[action].add(depending_action)\n i[depending_action].add(action)\n\n # 2. Now run Kahn's algorithm (could be separated from previous to improve abstraction)\n # resulting list\n l: list[Action] = []\n\n while len(s) > 0:\n n = s.pop()\n l.append(n)\n if n in o:\n o_n = o[n]\n del o[n]\n else:\n o_n = set()\n while len(o_n) > 0:\n # remove edge from the graph\n m = o_n.pop()\n i[m].remove(n)\n if len(i[m]) == 0:\n del i[m]\n s.add(m)\n\n if len(o) != 0 or len(i) != 0:\n for (node, edges) in o.items():\n print(\"Source: \" + str(node))\n for e in edges:\n print(\" Edge: \" + str(e))\n raise Exception(\"Dependency graph has at least one cycle\")\n else:\n return l", "def _get_actions(self):\n return self.__actions", "def _get_actions(self):\n return self.__actions", "def _get_actions(self):\n return self.__actions", "def get_actions(\n self, observations: Observations, action_space: gym.Space\n ) -> Actions:\n return super().get_actions(observations, action_space)", "def get_available_actions(self): \n actions = [] \n direction = [[1, 0], [0, 1]]\n for dir_ in direction:\n for point in self.points_generator(): \n dir_p = Point(*dir_)\n new_point = point + dir_p\n try:\n _ = self.game.board[new_point] \n actions.append((point, new_point))\n except OutOfBoardError:\n continue\n return actions", "def getNeighbors(self, current: MstarNode):\n neighbors = []\n options = []\n # Loop over all the agents\n for i in range(self.n_agents):\n node: Node = current.nodes[i]\n options_i = []\n if i in current.collision_set:\n # If the agent in the collision set we add the current node as well as all possible nodes\n options_i.append(node)\n (x, y) = node.position\n moves = {0: (x, y - 1), 90: (x + 1, y), 180: (x, y + 1), 270: (x - 1, y)}\n options_i.append(Node(node.position, node, node.rotation + 90, node.h))\n options_i.append(Node(node.position, node, node.rotation - 90, node.h))\n if self.grid[moves[node.rotation][1]][moves[node.rotation][0]] == 0:\n options_i.append(Node(moves[node.rotation], node, node.rotation,\n self.heuristic(i, moves[node.rotation], node.rotation)))\n else:\n # If the agent is not in the collision set we add only the optimal following node\n try:\n if (node, self.goal.nodes[i]) in self.policy:\n nextPos = self.policy[(node, self.goal.nodes[i])]\n else:\n nextPos = Astar(self.grid, node, self.goal.nodes[i]).solve()\n self.policy[(node, self.goal.nodes[i])] = nextPos\n except ValueError:\n print(f\"start: {node}, goal: {self.goal.nodes[i]}\")\n raise RuntimeError()\n options_i.append(Node(nextPos[0], node, nextPos[1], self.heuristic(i, nextPos[0], nextPos[1])))\n options.append(options_i)\n # Take the cartesian product to get all options\n for element in itertools.product(*options):\n neighbors.append(list(element))\n return neighbors", "def get_next_targets(self) -> List['RoutingTable']:\n targets = []\n for edge in self._get_out_edges(self.active_pod):\n new_graph = RoutingTable(self, copy=True)\n new_graph.active_pod = edge.pod\n targets.append((new_graph, edge.send_as_bind))\n return targets", "def actions_to_accel(self, actions_list): #动作处理函数\n a_container = [[] for _ in range(self.agent_num)]\n for agent_idx in range(self.agent_num):\n action = actions_list[agent_idx]\n if action is None:\n accel = [0, 0]\n else:\n if self.agent_list[agent_idx].is_fatigue: #if agent is out of energy, no driving force applies\n accel = [0,0]\n else:\n mass = self.agent_list[agent_idx].mass\n\n assert self.action_f[0] <= action[0] <= self.action_f[1], print('Continuous driving force needs '\n 'to be within the range [-100,200]')\n force = action[0] / mass\n\n assert self.action_theta[0] <= action[1] <= self.action_theta[1], print(\n 'Continuous turing angle needs to be within the range [-30deg, 30deg]')\n theta = action[1]\n\n theta_old = self.agent_theta[agent_idx][0]\n theta_new = theta_old + theta\n self.agent_theta[agent_idx][0] = theta_new\n\n accel_x = force * math.cos(theta_new / 180 * math.pi)\n accel_y = force * math.sin(theta_new / 180 * math.pi)\n accel = [accel_x, accel_y]\n #self.agent_accel[agent_idx] = accel # update the agent acceleration\n\n a_container[agent_idx] = accel\n return a_container", "def get_next_actions(self):\n return enumerate(self.actions[self.current_action_index + 1:], self.current_action_index + 1)", "def neighbors(self):\n return self.graph.neighbors(self.id)", "def execute_actions(self, actions):\n execute_actions(self.board, self.agent_locs, actions)", "def edges(self, irc, msg, args, channel):\n pie = self.instances[irc.network]\n irc.reply([str(x) for x in pie.graphs[channel].edges.values()])", "def getAction(self, gameState):\n\n # BEGIN_YOUR_CODE\n def G(gameState):\n return gameState.isWin() or gameState.isLose()\n\n def U(gameState):\n if gameState.isWin():\n return numpy.inf\n if gameState.isLose():\n return -numpy.inf\n\n def Turn(agent_index):\n if agent_index + 1 < gameState.getNumAgents():\n return agent_index + 1\n else:\n return 0\n\n # The heuristic evaluation function\n evalFumc = self.evaluationFunction\n\n def GetMinMaxActionAlphaBeta(gameState, agent_index, depth, alpha, beta):\n # we reached a win or a lose situation.\n if G(gameState):\n return (U(gameState), None, depth)\n # end of search depth.\n if depth == 0:\n return (evalFumc(gameState), None, depth)\n if agent_index == 0:\n # Pacmans turn\n CurrMax = -numpy.inf\n MaxAction = None\n maxDepth = -numpy.inf\n for move in gameState.getLegalActions(agent_index):\n # if there are no agents every call we should go one layer deeper.\n if gameState.getNumAgents() == 1:\n v = GetMinMaxActionAlphaBeta(gameState.generateSuccessor(agent_index, move), Turn(agent_index),\n depth - 1, alpha, beta)\n else:\n v = GetMinMaxActionAlphaBeta(gameState.generateSuccessor(agent_index, move), Turn(agent_index),\n depth - 1, alpha, beta)\n if CurrMax < v[0] or (CurrMax == v[0] and maxDepth < v[2]):\n CurrMax = v[0]\n MaxAction = move\n maxDepth = v[2]\n alpha = max(CurrMax, alpha)\n if CurrMax >= beta:\n return (numpy.inf, move, maxDepth)\n return (CurrMax, MaxAction, maxDepth)\n else:\n # Ghosts turn\n CurrMin = numpy.inf\n MinAction = None\n maxDepth = -numpy.inf\n for move in gameState.getLegalActions(agent_index):\n if Turn(agent_index) == 0:\n # the next turn will be pacmans so go one depth lower.\n v = GetMinMaxActionAlphaBeta(gameState.generateSuccessor(agent_index, move), Turn(agent_index),\n depth - 1, alpha, beta)\n else:\n # next turn is another ghost so stay in same depth.\n v = GetMinMaxActionAlphaBeta(gameState.generateSuccessor(agent_index, move), Turn(agent_index),\n depth, alpha, beta)\n if CurrMin > v[0] or (CurrMin == v[0] and maxDepth < v[2]):\n CurrMin = v[0]\n MinAction = move\n maxDepth = v[2]\n if Turn(agent_index) == 0:\n beta = min(CurrMin, beta)\n if CurrMin <= alpha:\n return (-numpy.inf, move, maxDepth)\n return (CurrMin, MinAction, maxDepth)\n\n return GetMinMaxActionAlphaBeta(gameState, 0, self.depth, -numpy.inf, numpy.inf)[1]\n # END_YOUR_CODE", "def neighbors(self):\n return self.mesh.neighbors()", "def apply_action(self, action):\n agent = action['action_details']['agent_id']\n current_node = self.agents_location[agent][0]\n people_collected = self.people_location.get(current_node, 0)\n self.people_location[current_node] = 0\n self.people_collected[agent] += people_collected\n self.agents[agent-1].score += people_collected\n self.agents[agent-1].location = current_node\n self.agents[agent-1].t += 1\n self.agents[agent%2].t += 1\n # self.agents_location[agent%2+1][2] = max(self.agents_location[agent%2+1][2]-1,0)\n # if self.agents_location[agent%2+1][2] == 0:\n # self.agents_location[agent%2+1][0] = self.agents_location[agent%2+1][1]\n # self.agents[agent%2].traversing = False\n # self.agents[agent%2].location = self.agents_location[agent%2+1][1]\n\n if 'expansions' in action[\"action_details\"]:\n self.agents_expansions[action[\"action_details\"]['agent_id']] += action[\"action_details\"]['expansions']\n resulting_observ = self.actions_reactions[action[\"action_tag\"]](action[\"action_details\"])\n resulting_observ['collected'] = people_collected\n return resulting_observ", "def gen_action(self, agent_list, observation, frame_idx, train, free_map=None):\n #TODO add multiple agent functionality with a for loop \n \n if train == True:\n epsilon = self.epsilon_by_frame(frame_idx)\n if random.random() > epsilon:\n state = observation\n state = torch.FloatTensor(np.float32(state))\n state = state.to(self.device).unsqueeze(0).unsqueeze(0)\n q_value = self.current_model.forward(state)\n max_q, action = q_value[0].max(0)\n max_q = float(max_q)\n action = int(action)\n \n else:\n action = random.randrange(self.num_actions)\n \n # for evaluation\n elif train == False:\n #TODO fix the CNN input dimensions\n state = observation.flatten()\n state = torch.FloatTensor(np.float32(state))\n state = state.to(self.device)\n \n q_value = self.current_model.forward(state)\n max_q, action = q_value.max(0)\n\n #TODO get all agent actions for one team here\n action_out = []\n action_out.append(action)\n return action_out", "def get_train_action(self, state, physic_mode_id, num_thrusters):\n actions = []\n names = [s['name'] for s in state]\n for agent in self.agents:\n actions.append(\n agent.get_train_action(state[names.index(agent.name)],\n physic_mode_id,\n num_thrusters))\n return actions", "def actions(self) -> List[str]:\n return list(self.__endpoints.keys())", "def expand(self):\n nodes = []\n\n for action in self.board.actions():\n # copy the current board\n board = copy.deepcopy(self.board)\n board.apply_action(action)\n\n nodes.append(Node(board, action, self.cost + 1, self))\n \n return nodes", "def getAllRobotActions(self):\n return self.robot.actions", "def find_routes_to_observations(self, agent):\n best_ends_for_observations = {i: None for i in range(len(\n self.observation_areas[\n agent]))} # We want to find best route to each observation area, given that we have found any\n best_costs = {i: np.inf for i in range(len(self.observation_areas[agent]))}\n while len(self.observations[agent]) < self.N_subtrees:\n self.add_node(agent)\n\n observations_made = {i: [] for i in range(\n len(self.observation_areas[agent]))} # {i : [observation_nodes]} for each observation area\n\n for node in self.all_nodes[agent]:\n if node.observed:\n area = node.observation_area\n for area_index in range(len(self.observation_areas[agent])):\n if area == self.observation_areas[agent][area_index]:\n observations_made[area_index].append(\n node) # Append node to observation area \"area_index\" to keep track of which nodes were observed where\n\n for i in observations_made.keys():\n all_obs_nodes = observations_made[i]\n for node_temp in all_obs_nodes:\n cost_temp = np.sum(node_temp.path_costs.copy() + node_temp.terminal_costs.copy())\n if cost_temp < best_costs[i]:\n best_costs[i] = cost_temp\n best_ends_for_observations[i] = node_temp\n\n return best_ends_for_observations", "def get_neighbours(self):\n return self.neighbours", "def edges(self):\n edge_list = []\n for node1 in self.node_dict:\n for node2 in self.node_dict[node1]:\n edge_list.append((node1,\n node2,\n self.node_dict[node1][node2]))\n return edge_list", "def getAction(self, gameState):\n\n # BEGIN_YOUR_CODE\n def G(gameState):\n return gameState.isWin() or gameState.isLose()\n\n def U(gameState):\n if gameState.isWin():\n return numpy.inf\n if gameState.isLose():\n return -numpy.inf\n\n def Turn(agent_index):\n if agent_index + 1 < gameState.getNumAgents():\n return agent_index + 1\n else:\n return 0\n\n def UniformProbability(gameState, agent_index):\n moves = gameState.getLegalActions(agent_index)\n states = [gameState.generateSuccessor(agent_index, move) for move in moves]\n return [(state, 1 / len(moves)) for state in states]\n\n # The heuristic evaluation function\n evalFumc = self.evaluationFunction\n\n def GetExpectimaxAction(gameState, agent_index, depth):\n # we reached a win or a lose situation.\n if G(gameState):\n return (U(gameState), None, depth)\n # end of search depth.\n if depth == 0:\n return (evalFumc(gameState), None, depth)\n if agent_index == 0:\n # Pacmans turn\n CurrMax = -numpy.inf\n MaxAction = None\n maxDepth = -numpy.inf\n for move in gameState.getLegalActions(agent_index):\n # if there are no agents every call we should go one layer deeper.\n if gameState.getNumAgents() == 1:\n v = GetExpectimaxAction(gameState.generateSuccessor(agent_index, move), Turn(agent_index),\n depth - 1)\n else:\n v = GetExpectimaxAction(gameState.generateSuccessor(agent_index, move), Turn(agent_index),\n depth)\n if CurrMax < v[0] or (CurrMax == v[0] and maxDepth < v[2]):\n CurrMax = v[0]\n MaxAction = move\n maxDepth = v[2]\n return (CurrMax, MaxAction, maxDepth)\n else:\n # Ghosts turn\n values = []\n depths = []\n for c, p in UniformProbability(gameState, agent_index):\n if Turn(agent_index) == 0:\n v = GetExpectimaxAction(c, Turn(agent_index), depth - 1)\n else:\n v = GetExpectimaxAction(c, Turn(agent_index), depth)\n values.append(p * v[0])\n depths.append(v[2])\n return (sum(values), None, max(depths))\n\n return GetExpectimaxAction(gameState, 0, self.depth)[1]\n # END_YOUR_CODE", "def get_actions(self):\n return []", "def edges(self) -> Iterable[Tuple[Node]]:\n edges = []\n for node in self.__graph_dict.keys():\n for neighbour in self.__graph_dict[node]:\n # Since all edges go both ways, we need only return one of them.\n if {neighbour, node} not in edges:\n edges.append({node, neighbour})\n yield (node, neighbour)", "def get_step_actions(self):\n return self.actor(tf.numpy_function(self.get_states, [], self.states[0].dtype))", "def get_all_edges(self):\n \n ans = []\n for node_id in self.neighbors:\n for edge_to_neighbor in self.neighbors[node_id]:\n ans.append(edge_to_neighbor)\n\n return ans", "def get_neighbors(self):\n \n return self.adjacent.keys()", "def sense(self, agents, agent_index, top_down_map=None):\n host_agent = agents[agent_index]\n other_agent_dists = {}\n sorted_pairs = sorted(other_agent_dists.items(),\n key=operator.itemgetter(1))\n\n sorting_criteria = []\n for i, other_agent in enumerate(agents):\n if other_agent.id == host_agent.id:\n continue\n # project other elements onto the new reference frame\n rel_pos_to_other_global_frame = other_agent.pos_global_frame - \\\n host_agent.pos_global_frame\n p_parallel_ego_frame = np.dot(rel_pos_to_other_global_frame, host_agent.ref_prll)\n p_orthog_ego_frame = np.dot(rel_pos_to_other_global_frame, host_agent.ref_orth)\n dist_between_agent_centers = vec2_l2_norm(rel_pos_to_other_global_frame)\n dist_2_other = dist_between_agent_centers - host_agent.radius - other_agent.radius\n combined_radius = host_agent.radius + other_agent.radius\n\n if dist_between_agent_centers > Config.SENSING_HORIZON:\n # print(\"Agent too far away\")\n continue\n\n if self.agent_sorting_method != \"time_to_impact\":\n time_to_impact = None\n else:\n time_to_impact = compute_time_to_impact(host_agent.pos_global_frame,\n other_agent.pos_global_frame,\n host_agent.vel_global_frame,\n other_agent.vel_global_frame,\n combined_radius)\n\n sorting_criteria.append([i, round(dist_2_other,2), p_orthog_ego_frame, time_to_impact])\n\n clipped_sorted_inds = self.get_clipped_sorted_inds(sorting_criteria)\n clipped_sorted_agents = [agents[i] for i in clipped_sorted_inds]\n\n other_agents_states = np.zeros((Config.MAX_NUM_OTHER_AGENTS_OBSERVED, 7))\n other_agent_count = 0\n for other_agent in clipped_sorted_agents:\n if other_agent.id == host_agent.id:\n continue\n # project other elements onto the new reference frame\n rel_pos_to_other_global_frame = other_agent.pos_global_frame - \\\n host_agent.pos_global_frame\n p_parallel_ego_frame = np.dot(rel_pos_to_other_global_frame,\n host_agent.ref_prll)\n p_orthog_ego_frame = np.dot(rel_pos_to_other_global_frame,\n host_agent.ref_orth)\n v_parallel_ego_frame = np.dot(other_agent.vel_global_frame,\n host_agent.ref_prll)\n v_orthog_ego_frame = np.dot(other_agent.vel_global_frame,\n host_agent.ref_orth)\n dist_2_other = np.linalg.norm(rel_pos_to_other_global_frame) - \\\n host_agent.radius - other_agent.radius\n combined_radius = host_agent.radius + other_agent.radius\n\n other_obs = np.array([p_parallel_ego_frame,\n p_orthog_ego_frame,\n v_parallel_ego_frame,\n v_orthog_ego_frame,\n other_agent.radius,\n combined_radius,\n dist_2_other])\n \n if other_agent_count == 0:\n host_agent.other_agent_states[:] = other_obs\n\n other_agents_states[other_agent_count,:] = other_obs\n other_agent_count += 1\n\n host_agent.num_other_agents_observed = other_agent_count\n\n return other_agents_states", "def edges(self):\n es = []\n for vertex1 in self.vertices():\n for vertex2 in self.out_vertices(vertex1):\n es.append(self[vertex1][vertex2])\n return es", "def getAction(self, gameState):\n\n # BEGIN_YOUR_CODE\n def G(gameState):\n return gameState.isWin() or gameState.isLose()\n\n def U(gameState):\n if gameState.isWin():\n return numpy.inf\n if gameState.isLose():\n return -numpy.inf\n\n def Turn(agent_index):\n if agent_index + 1 < gameState.getNumAgents():\n return agent_index + 1\n else:\n return 0\n\n # The heuristic evaluation function\n evalFumc = self.evaluationFunction\n\n def GetMinMaxAction(gameState, agent_index, depth):\n # we reached a win or a lose situation.\n if G(gameState):\n return (U(gameState), None, depth)\n # end of search depth.\n if depth == 0:\n return (evalFumc(gameState), None, depth)\n if agent_index == 0:\n # Pacmans turn\n CurrMax = -numpy.inf\n MaxAction = None\n maxDepth = -numpy.inf\n for move in gameState.getLegalActions(agent_index):\n # if there are no agents every call we should go one layer deeper.\n if gameState.getNumAgents() == 1:\n v = GetMinMaxAction(gameState.generateSuccessor(agent_index, move), Turn(agent_index),\n depth - 1)\n else:\n v = GetMinMaxAction(gameState.generateSuccessor(agent_index, move), Turn(agent_index), depth)\n if CurrMax < v[0] or (CurrMax == v[0] and maxDepth < v[2]):\n CurrMax = v[0]\n MaxAction = move\n maxDepth = v[2]\n return (CurrMax, MaxAction, maxDepth)\n else:\n # Ghosts turn\n CurrMin = numpy.inf\n MinAction = None\n maxDepth = -numpy.inf\n for move in gameState.getLegalActions(agent_index):\n if Turn(agent_index) == 0:\n # the next turn will be pacmans so go one depth lower.\n v = GetMinMaxAction(gameState.generateSuccessor(agent_index, move), Turn(agent_index),\n depth - 1)\n else:\n # next turn is another ghost so stay in same depth.\n v = GetMinMaxAction(gameState.generateSuccessor(agent_index, move), Turn(agent_index), depth)\n if CurrMin > v[0] or (CurrMin == v[0] and maxDepth < v[2]):\n CurrMin = v[0]\n MinAction = move\n maxDepth = v[2]\n return (CurrMin, MinAction, maxDepth)\n\n return GetMinMaxAction(gameState, 0, self.depth)[1]\n # END_YOUR_CODE" ]
[ "0.7049442", "0.6467031", "0.643074", "0.6362425", "0.62686896", "0.6207406", "0.6155371", "0.6118249", "0.6094078", "0.6003385", "0.59684646", "0.59684646", "0.59386194", "0.5927922", "0.5917781", "0.58938307", "0.58807933", "0.5880295", "0.5880295", "0.5880295", "0.5880295", "0.58579725", "0.5857972", "0.58551204", "0.5835745", "0.58263546", "0.5824062", "0.5822234", "0.57835925", "0.5774486", "0.5768538", "0.5744683", "0.571684", "0.5699443", "0.5699443", "0.5685284", "0.5669095", "0.5630727", "0.5629251", "0.56261635", "0.5621632", "0.56099206", "0.5607322", "0.5606693", "0.560533", "0.5601515", "0.5580712", "0.5572924", "0.5571755", "0.5563616", "0.5558105", "0.5548631", "0.5538839", "0.55010086", "0.5477597", "0.5477597", "0.54771745", "0.5476222", "0.5472331", "0.54706097", "0.54703957", "0.54668325", "0.5457648", "0.5436906", "0.5436681", "0.54156625", "0.5412429", "0.5407605", "0.5406411", "0.5406411", "0.5406411", "0.53993654", "0.5394521", "0.53855294", "0.5358917", "0.5346057", "0.5337443", "0.533404", "0.5332967", "0.5323547", "0.5313427", "0.5313257", "0.53106594", "0.5308836", "0.53066486", "0.5302075", "0.5295029", "0.5287335", "0.52848995", "0.52643067", "0.5263257", "0.5260826", "0.52605456", "0.52604806", "0.52574193", "0.52554935", "0.52554804", "0.52553225", "0.5254577", "0.5252881" ]
0.71311367
0
Return the apprpriate observations to the agents depending on the type of algoithm being run. params
def _get_state(self, obs_env): state = [] obs_env = obs_env.reshape(self.n_agent, 2) for i in range(self.n_agent): local_obs = obs_env[i] if self.agent.startswith('ia2c'): imgs = [local_obs] if not self.agent == 'ia2c_fp': # ia2c for j in np.where(self.neighbor_mask[i] == 1)[0]: imgs.append(obs_env[j]) imgs = np.array(imgs, dtype=np.float32) fps = np.array([], dtype=np.float32) else: # ia2c_fp fps = [] for j in np.where(self.neighbor_mask[i] == 1)[0]: imgs.append(obs_env[j]) fps.append(self.fp[j]) imgs = np.array(imgs, dtype=np.float32) fps = np.concatenate(fps).astype(np.float32) agent_obs = [imgs, fps] else: # ma2c agent_obs = local_obs.astype(np.float32) state.append(agent_obs) return state # return [[obs_env, np.array([], dtype=np.float32)] for _ in range(self.n_agent)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_experiences(self):\n for i in range(self.num_frames_per_proc):\n # Do one agent-environment interaction\n\n preprocessed_obs0 = self.preprocess_obss(self.obs0, device=self.device)\n \n preprocessed_obs1 = self.preprocess_obss(self.obs1, device=self.device)\n \n with torch.no_grad():\n \n model_results0 = self.acmodel0(preprocessed_obs1, self.memory0 * self.mask0.unsqueeze(1)) ### NOTE\n \n dist0 = model_results0['dist'] ### NOTE\n value0 = model_results0['value']\n memory0 = model_results0['memory']\n msg0 = model_results0['message']\n dists_speaker0 = model_results0['dists_speaker']\n extra_predictions0 = model_results0['extra_predictions']\n #self.rng_states0[i] = model_results0['rng_states']\n #if torch.cuda.is_available():\n # self.cuda_rng_states0[i] = model_results0['cuda_rng_states']\n \n preprocessed_obs0.instr *= 0\n preprocessed_obs0.image *= 0\n model_results1 = self.acmodel1(preprocessed_obs0, self.memory1 * self.mask1.unsqueeze(1), msg=(msg0.transpose(0, 1) * self.mask1.unsqueeze(1).unsqueeze(2)).transpose(0, 1)) ### NOTE\n \n dist1 = model_results1['dist']\n value1 = model_results1['value']\n memory1 = model_results1['memory']\n msg1 = model_results1['message']\n dists_speaker1 = model_results1['dists_speaker']\n extra_predictions1 = model_results1['extra_predictions']\n #self.rng_states1[i] = model_results1['rng_states']\n #if torch.cuda.is_available():\n # self.cuda_rng_states1[i] = model_results1['cuda_rng_states']\n \n #state = torch.get_rng_state()\n action0 = dist0.sample()\n \n #torch.set_rng_state(state)\n action1 = dist1.sample()\n\n obs0, reward0, done0, env_info0 = self.env0.step(action0.cpu().numpy())\n \n obs1, reward1, done1, env_info1 = self.env1.step(action1.cpu().numpy())\n \n # mask any rewards based on (previous) been_done\n rewardos0 = [0] * self.num_procs\n rewardos1 = [0] * self.num_procs\n for j in range(self.num_procs):\n rewardos0[j] = reward0[j] * (1 - self.been_done0[j].item())\n rewardos1[j] = reward1[j] * (1 - self.been_done1[j].item())\n \n reward0 = tuple(rewardos0)\n reward1 = tuple(rewardos1)\n \n #reward0 = tuple(0.5*r0 + 0.5*r1 for r0, r1 in zip(reward0, reward1)) ### NOTE\n #reward1 = reward0\n \n # reward sender agent (0) equally for success of receiver agent (1) ### NOTE\n reward0 = reward1\n \n self.been_done0 = (1 - (1 - self.been_done0) * (1 - torch.tensor(done0, device=self.device, dtype=torch.float)))\n self.been_done1 = (1 - (1 - self.been_done1) * (1 - torch.tensor(done1, device=self.device, dtype=torch.float)))\n both_done = self.been_done0 * self.been_done1\n \n # reset if receiver agent (1) is done ### NOTE\n both_done = self.been_done1\n \n obs0 = self.env0.sync_reset(both_done, obs0)\n obs1 = self.env1.sync_reset(both_done, obs1)\n \n if self.aux_info:\n env_info0 = self.aux_info_collector0.process(env_info0)\n # env_info0 = self.process_aux_info0(env_info0)\n \n env_info1 = self.aux_info_collector1.process(env_info1)\n # env_info1 = self.process_aux_info1(env_info1)\n\n # Update experiences values\n\n self.obss0[i] = self.obs0\n self.obs0 = obs0\n \n self.obss1[i] = self.obs1\n self.obs1 = obs1\n\n self.memories0[i] = self.memory0\n self.memory0 = memory0\n \n self.memories1[i] = self.memory1\n self.memory1 = memory1\n \n self.msgs0[i] = self.msg0\n self.msg0 = msg0\n \n self.msgs1[i] = self.msg1\n self.msg1 = msg1\n \n self.msgs_out0[i] = msg0\n \n self.msgs_out1[i] = msg1\n\n self.masks0[i] = self.mask0\n #self.mask0 = 1 - torch.tensor(done0, device=self.device, dtype=torch.float)\n self.mask0 = 1 - both_done\n self.actions0[i] = action0\n self.values0[i] = value0\n if self.reshape_reward is not None:\n self.rewards0[i] = torch.tensor([\n self.reshape_reward(obs_, action_, reward_, done_)\n for obs_, action_, reward_, done_ in zip(obs0, action0, reward0, done0)\n ], device=self.device)\n else:\n self.rewards0[i] = torch.tensor(reward0, device=self.device)\n self.log_probs0[i] = dist0.log_prob(action0)\n self.speaker_log_probs0[i] = self.acmodel0.speaker_log_prob(dists_speaker0, msg0)\n \n self.masks1[i] = self.mask1\n #self.mask1 = 1 - torch.tensor(done1, device=self.device, dtype=torch.float)\n self.mask1 = 1 - both_done\n self.actions1[i] = action1\n self.values1[i] = value1\n if self.reshape_reward is not None:\n self.rewards1[i] = torch.tensor([\n self.reshape_reward(obs_, action_, reward_, done_)\n for obs_, action_, reward_, done_ in zip(obs1, action1, reward1, done1)\n ], device=self.device)\n else:\n self.rewards1[i] = torch.tensor(reward1, device=self.device)\n self.log_probs1[i] = dist1.log_prob(action1)\n self.speaker_log_probs1[i] = self.acmodel1.speaker_log_prob(dists_speaker1, msg1)\n\n if self.aux_info:\n self.aux_info_collector0.fill_dictionaries(i, env_info0, extra_predictions0)\n \n self.aux_info_collector1.fill_dictionaries(i, env_info1, extra_predictions1)\n\n # Update log values\n\n self.log_episode_return0 += torch.tensor(reward0, device=self.device, dtype=torch.float)\n self.log_episode_reshaped_return0 += self.rewards0[i]\n \n self.log_episode_return1 += torch.tensor(reward1, device=self.device, dtype=torch.float)\n self.log_episode_reshaped_return1 += self.rewards1[i]\n \n self.log_episode_num_frames0 += torch.ones(self.num_procs, device=self.device)\n self.log_episode_num_frames1 += torch.ones(self.num_procs, device=self.device)\n \n #for i, done_ in enumerate(done0):\n for i in range(self.num_procs):\n #if done_:\n if both_done[i]:\n self.log_done_counter0 += 1\n self.log_return0.append(self.log_episode_return0[i].item())\n self.log_reshaped_return0.append(self.log_episode_reshaped_return0[i].item())\n self.log_num_frames0.append(self.log_episode_num_frames0[i].item())\n \n #for i, done_ in enumerate(done1):\n #if done_:\n self.log_done_counter1 += 1\n self.log_return1.append(self.log_episode_return1[i].item())\n self.log_reshaped_return1.append(self.log_episode_reshaped_return1[i].item())\n self.log_num_frames1.append(self.log_episode_num_frames1[i].item())\n\n # if both are done, reset both to not done\n self.been_done0 *= (1 - both_done)\n self.been_done1 *= (1 - both_done)\n\n self.log_episode_return0 *= self.mask0\n self.log_episode_reshaped_return0 *= self.mask0\n self.log_episode_num_frames0 *= self.mask0\n\n self.log_episode_return1 *= self.mask1\n self.log_episode_reshaped_return1 *= self.mask1\n self.log_episode_num_frames1 *= self.mask1\n\n # Add advantage and return to experiences\n\n preprocessed_obs0 = self.preprocess_obss(self.obs0, device=self.device)\n preprocessed_obs1 = self.preprocess_obss(self.obs1, device=self.device)\n \n with torch.no_grad():\n tmp = self.acmodel0(preprocessed_obs1, self.memory0 * self.mask0.unsqueeze(1)) ### NOTE\n next_value0 = tmp['value']\n \n preprocessed_obs0.instr *= 0\n preprocessed_obs0.image *= 0\n next_value1 = self.acmodel1(preprocessed_obs0, self.memory1 * self.mask1.unsqueeze(1), msg=(tmp['message'].transpose(0, 1) * self.mask1.unsqueeze(1).unsqueeze(2)).transpose(0, 1))['value'] ### NOTE\n\n for i in reversed(range(self.num_frames_per_proc)):\n next_mask0 = self.masks0[i+1] if i < self.num_frames_per_proc - 1 else self.mask0\n next_value0 = self.values0[i+1] if i < self.num_frames_per_proc - 1 else next_value0\n next_advantage0 = self.advantages0[i+1] if i < self.num_frames_per_proc - 1 else 0\n \n next_mask1 = self.masks1[i+1] if i < self.num_frames_per_proc - 1 else self.mask1\n next_value1 = self.values1[i+1] if i < self.num_frames_per_proc - 1 else next_value1\n next_advantage1 = self.advantages1[i+1] if i < self.num_frames_per_proc - 1 else 0\n\n delta0 = self.rewards0[i] + self.discount * next_value0 * next_mask0 - self.values0[i]\n self.advantages0[i] = delta0 + self.discount * self.gae_lambda * next_advantage0 * next_mask0\n \n delta1 = self.rewards1[i] + self.discount * next_value1 * next_mask1 - self.values1[i]\n self.advantages1[i] = delta1 + self.discount * self.gae_lambda * next_advantage1 * next_mask1\n\n # Flatten the data correctly, making sure that\n # each episode's data is a continuous chunk\n\n exps0 = DictList()\n exps0.obs = [self.obss0[i][j]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n \n exps1 = DictList()\n exps1.obs = [self.obss1[i][j]\n for j in range(self.num_procs)\n for i in range(self.num_frames_per_proc)]\n \n # In commments below T is self.num_frames_per_proc, P is self.num_procs,\n # D is the dimensionality\n\n # T x P x D -> P x T x D -> (P * T) x D\n exps0.memory = self.memories0.transpose(0, 1).reshape(-1, *self.memories0.shape[2:])\n \n exps1.memory = self.memories1.transpose(0, 1).reshape(-1, *self.memories1.shape[2:])\n \n exps0.message = self.msgs0.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel0.max_len_msg, self.acmodel0.num_symbols)\n \n exps1.message = self.msgs1.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel1.max_len_msg, self.acmodel1.num_symbols)\n \n exps0.message_out = self.msgs_out0.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel0.max_len_msg, self.acmodel0.num_symbols)\n \n exps1.message_out = self.msgs_out1.transpose(1, 2).transpose(0, 1).reshape(-1, self.acmodel1.max_len_msg, self.acmodel1.num_symbols)\n \n #exps0.rng_states = self.rng_states0.transpose(0, 1).reshape(-1, *self.rng_states0.shape[2:])\n #if torch.cuda.is_available():\n # exps0.cuda_rng_states = self.cuda_rng_states0.transpose(0, 1).reshape(-1, *self.cuda_rng_states0.shape[2:])\n \n #exps1.rng_states = self.rng_states1.transpose(0, 1).reshape(-1, *self.rng_states1.shape[2:])\n #if torch.cuda.is_available():\n # exps1.cuda_rng_states = self.cuda_rng_states1.transpose(0, 1).reshape(-1, *self.cuda_rng_states1.shape[2:])\n \n # T x P -> P x T -> (P * T) x 1\n exps0.mask = self.masks0.transpose(0, 1).reshape(-1).unsqueeze(1)\n \n exps1.mask = self.masks1.transpose(0, 1).reshape(-1).unsqueeze(1)\n\n # for all tensors below, T x P -> P x T -> P * T\n exps0.action = self.actions0.transpose(0, 1).reshape(-1)\n exps0.value = self.values0.transpose(0, 1).reshape(-1)\n exps0.reward = self.rewards0.transpose(0, 1).reshape(-1)\n exps0.advantage = self.advantages0.transpose(0, 1).reshape(-1)\n exps0.returnn = exps0.value + exps0.advantage\n exps0.log_prob = self.log_probs0.transpose(0, 1).reshape(-1)\n exps0.speaker_log_prob = self.speaker_log_probs0.transpose(0, 1).reshape(-1)\n \n exps1.action = self.actions1.transpose(0, 1).reshape(-1)\n exps1.value = self.values1.transpose(0, 1).reshape(-1)\n exps1.reward = self.rewards1.transpose(0, 1).reshape(-1)\n exps1.advantage = self.advantages1.transpose(0, 1).reshape(-1)\n exps1.returnn = exps1.value + exps1.advantage\n exps1.log_prob = self.log_probs1.transpose(0, 1).reshape(-1)\n exps1.speaker_log_prob = self.speaker_log_probs1.transpose(0, 1).reshape(-1)\n\n if self.aux_info:\n exps0 = self.aux_info_collector0.end_collection(exps0)\n \n exps1 = self.aux_info_collector1.end_collection(exps1)\n\n # Preprocess experiences\n\n exps0.obs = self.preprocess_obss(exps0.obs, device=self.device)\n\n exps1.obs = self.preprocess_obss(exps1.obs, device=self.device)\n\n # Log some values\n\n keep0 = max(self.log_done_counter0, self.num_procs)\n\n keep1 = max(self.log_done_counter1, self.num_procs)\n\n log0 = {\n \"return_per_episode\": self.log_return0[-keep0:],\n \"reshaped_return_per_episode\": self.log_reshaped_return0[-keep0:],\n \"num_frames_per_episode\": self.log_num_frames0[-keep0:],\n \"num_frames\": self.num_frames,\n \"episodes_done\": self.log_done_counter0,\n }\n\n log1 = {\n \"return_per_episode\": self.log_return1[-keep1:],\n \"reshaped_return_per_episode\": self.log_reshaped_return1[-keep1:],\n \"num_frames_per_episode\": self.log_num_frames1[-keep1:],\n \"num_frames\": self.num_frames,\n \"episodes_done\": self.log_done_counter1,\n }\n\n self.log_done_counter0 = 0\n self.log_return0 = self.log_return0[-self.num_procs:]\n self.log_reshaped_return0 = self.log_reshaped_return0[-self.num_procs:]\n self.log_num_frames0 = self.log_num_frames0[-self.num_procs:]\n\n self.log_done_counter1 = 0\n self.log_return1 = self.log_return1[-self.num_procs:]\n self.log_reshaped_return1 = self.log_reshaped_return1[-self.num_procs:]\n self.log_num_frames1 = self.log_num_frames1[-self.num_procs:]\n\n return exps0, log0, exps1, log1", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def step(self):\n\n \"\"\" First updates the variables values of the current time form the environment \"\"\"\n self.update_crispval(self.env.context)\n\n \"\"\"\n here the decision making of the agent\n to determine which activity to suggest to the patient\n i apply the creative controller to the current context\n \"\"\"\n curr_input = sample_inputs(False, 0, self.curr_interaction, self.variables_default_val, self.action_var,\n self.fuzzysets_values, self.variables_universe)\n c_out, rules_activations, is_cc_exception = self.creative_controller.computeOutput(curr_input, False)\n\n \"\"\" i obtain a number of ouput crisp values.\n i determine which one achieves the max expected output w.r.t. the a-rules \"\"\"\n best_a = None\n best_a_val = -1000\n best_a_exphapp = 5\n if self.verbose > Constants.VERBOSE_BASIC:\n print(\"rules activations\")\n for a in rules_activations:\n if rules_activations[a] > 0:\n print(str(a) + \"\\n\\t\\t\\t-> \" + str(rules_activations[a]))\n for item in c_out.items(): # for each pair <activity, crisp output>\n if self.verbose > Constants.VERBOSE_BASIC:\n print(item)\n if not item[\n 0] in self.curr_iter_suggestions: # if i didn't suggest the same activity already in the same interaction\n inputs = dict(curr_input) # I create a copy fo the dict\n inputs[item[0]] = item[1]\n assessor_id = self.actions_to_ti[item[0]]\n self.assessors[assessor_id].feed_inputs(inputs)\n is_ac_exception = False\n assout = []\n try:\n a_out, a_rules_activations, is_ac_exception = self.assessors[assessor_id].compute(verbose=False)\n assout = [a_out[ao] for ao in a_out]\n except:\n is_ac_exception = True\n traceback.print_exc()\n # todo the following assumes that every assessor controller has same eval var\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n if len(assout) == 0:\n for v in self.eval_var:\n assout.append(self.variables_default_val[v])\n w_ta = self.weights_therapeutic_interventions[self.actions_to_ti[item[0]]]\n\n avg_credit_rules_that_suggested_action = 1.0\n nr_rules_that_suggested_action = 0\n for r in rules_activations:\n if (rules_activations[r] > 0) and (str(item[0]) in str(r)):\n avg_credit_rules_that_suggested_action = avg_credit_rules_that_suggested_action + \\\n self.rules_credits[str(r)]\n nr_rules_that_suggested_action = nr_rules_that_suggested_action + 1\n if nr_rules_that_suggested_action > 0:\n avg_credit_rules_that_suggested_action = (\n avg_credit_rules_that_suggested_action - 1.0) / nr_rules_that_suggested_action\n repetition_cost = 1.0\n a_val = (mean(assout) * w_ta * avg_credit_rules_that_suggested_action) / repetition_cost\n if (a_val > best_a_val) and (\n item[1] >= (self.variables_default_val[item[0]] + self.range_step[item[0]])):\n best_a = item\n best_a_val = a_val\n best_a_exphapp = mean(assout)\n\n \"\"\"I suggest the activity with best expected outcome and store the information to populate the interactions \n memory \"\"\"\n self.proposeActivity(best_a)\n if not best_a is None:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"proposing activity\" + str(best_a) + \" which has expected feedback: \" + str(\n best_a_exphapp) + \", which weighted is \" + str(best_a_val))\n self.curr_iter_suggestions.append(best_a[0])\n self.last_suggestion = best_a\n else:\n if (self.verbose > Constants.VERBOSE_FALSE) and (self.verbose <= Constants.VERBOSE_BASIC):\n print(\"the activity proposed is \" + str(\n best_a) + \" so I don't suggest anything. I will ask a question instead\")\n self.last_suggestion = []\n self.expected_feedback = best_a_exphapp\n self.last_context = self.env.context.copy()\n self.last_rules_activations = rules_activations", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def test_mmp_active_inference(self):\n\n num_obs = [3, 2]\n num_states = [4, 3]\n num_control = [1, 3]\n A = random_A_matrix(num_obs, num_states)\n B = random_B_matrix(num_states, num_control)\n\n C = obj_array_zeros(num_obs)\n C[1][0] = 1.0 \n C[1][1] = -2.0 \n\n agent = Agent(A=A, B=B, C=C, control_fac_idx=[1], inference_algo=\"MMP\", policy_len=2, inference_horizon=3)\n\n T = 10\n\n for t in range(T):\n\n o = [np.random.randint(num_ob) for num_ob in num_obs] # just randomly generate observations at each timestep, no generative process\n qx = agent.infer_states(o)\n agent.infer_policies()\n action = agent.sample_action()\n \n print(agent.prev_actions)\n print(agent.prev_obs)", "def get_observations(self):\n joint_states = self.joints_state\n self.force = self.wrench_stamped.wrench.force\n self.torque = self.wrench_stamped.wrench.torque\n self.static_taxel = self.tactile_static.taxels\n# dynamic_taxel= tactile_dynamic\n\n# print(\"[force]\", self.force.x, self.force.y, self.force.z)\n# print(\"[torque]\", self.torque.x, self.torque.y, self.torque.z)\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n\n shp_joint_vel = joint_states.velocity[0]\n shl_joint_vel = joint_states.velocity[1]\n elb_joint_vel = joint_states.velocity[2]\n wr1_joint_vel = joint_states.velocity[3]\n wr2_joint_vel = joint_states.velocity[4]\n wr3_joint_vel = joint_states.velocity[5]\n\n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n# print(\"q(observation):\", q)\n eef_x, eef_y, eef_z = self.get_xyz(q)\n self.end_effector = self.get_xyz(q)\n eef_x_ini, eef_y_ini, eef_z_ini = self.get_xyz(self.init_joint_pose2) \n\n delta_image_r, delta_image_l = self.get_image()\n self.cnn_image_r = agent.update_cnn(delta_image_r)\n self.cnn_image_l = agent.update_cnn(delta_image_l)\n self.cnn_image_r_list = self.cnn_image_r.tolist()\n self.cnn_image_l_list = self.cnn_image_l.tolist()\n print(\"r_list\", self.cnn_image_r_list)\n print(\"l_list\", self.cnn_image_l_list)\n\n observation = []\n# rospy.logdebug(\"List of Observations==>\"+str(self.observations))\n for obs_name in self.observations:\n if obs_name == \"shp_joint_ang\":\n observation.append((shp_joint_ang - self.init_joint_pose2[0]) * self.joint_n)\n elif obs_name == \"shl_joint_ang\":\n observation.append((shl_joint_ang - self.init_joint_pose2[1]) * self.joint_n)\n elif obs_name == \"elb_joint_ang\":\n observation.append((elb_joint_ang - self.init_joint_pose2[2]) * self.joint_n)\n elif obs_name == \"wr1_joint_ang\":\n observation.append((wr1_joint_ang - self.init_joint_pose2[3]) * self.joint_n)\n elif obs_name == \"wr2_joint_ang\":\n observation.append((wr2_joint_ang - self.init_joint_pose2[4]) * self.joint_n)\n elif obs_name == \"wr3_joint_ang\":\n observation.append((wr3_joint_ang - self.init_joint_pose2[5]) * self.joint_n)\n elif obs_name == \"shp_joint_vel\":\n observation.append(shp_joint_vel)\n elif obs_name == \"shl_joint_vel\":\n observation.append(shl_joint_vel)\n elif obs_name == \"elb_joint_vel\":\n observation.append(elb_joint_vel)\n elif obs_name == \"wr1_joint_vel\":\n observation.append(wr1_joint_vel)\n elif obs_name == \"wr2_joint_vel\":\n observation.append(wr2_joint_vel)\n elif obs_name == \"wr3_joint_vel\":\n observation.append(wr3_joint_vel)\n elif obs_name == \"eef_x\":\n observation.append((eef_x - eef_x_ini) * self.eef_n)\n elif obs_name == \"eef_y\":\n observation.append((eef_y - eef_y_ini) * self.eef_n)\n elif obs_name == \"eef_z\":\n observation.append((eef_z - eef_z_ini) * self.eef_n)\n elif obs_name == \"force_x\":\n observation.append((self.force.x - self.force_ini.x) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_y\":\n observation.append((self.force.y - self.force_ini.y) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_z\":\n observation.append((self.force.z - self.force_ini.z) / self.force_limit1 * self.force_n)\n elif obs_name == \"torque_x\":\n observation.append((self.torque.x - self.torque_ini.x) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_y\":\n observation.append((self.torque.y - self.torque_ini.y) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_z\":\n observation.append((self.torque.z - self.torque_ini.z) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"image_cnn\":\n for x in range(0, 10):\n observation.append(self.cnn_image_r_list[0][x])\n# print(\"r_list\", self.cnn_image_r_list[0][x])\n for x in range(0, 10):\n observation.append(self.cnn_image_l_list[0][x])\n# print(\"l_list\", self.cnn_image_l_list[0][x])\n elif obs_name == \"static_taxel\":\n for x in range(0, 28):\n observation.append((self.static_taxel[0].values[x] - self.static_taxel_ini[0].values[x]) * self.taxel_n)\n for x in range(0, 28):\n observation.append((self.static_taxel[1].values[x] - self.static_taxel_ini[1].values[x]) * self.taxel_n)\n# elif obs_name == \"dynamic_taxel\":\n# observation.append(dynamic_taxel[0].values) * self.taxel_n\n# observation.append(dynamic_taxel[1].values) * self.taxel_n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n print(\"observation\", list(map(round, observation, [3]*len(observation))))\n# print(\"observation\", observation)\n\n return observation", "def test_intent_classifier_get_params(self):\n pass", "def process_observation(self, observation):\n #print(\"start_process_obs\")\n processed_observation = np.zeros((NB_AGENTS, OBSERVATION_SIZE))\n\n goliath_type = getattr(env, 'Terran_Goliath')\n battlecruiser_type = getattr(env, 'Terran_Battlecruiser')\n '''\n goliath and battlecruiser type:\n hp_max: 125\n armor: 1\n cooldown_max: 22\n acceleration: 1\n top_speed: 4.57\n damage_amount: 12\n damage_factor: 1\n weapon_range: 192\n sight_range: 256\n seek_range: 160\n\n hp_max: 500\n energy_max: 200\n armor: 3\n cooldown_max: 30\n acceleration: 27\n top_speed: 2.5\n damage_amount: 25\n damage_factor: 1\n weapon_range: 192\n sight_range: 352\n '''\n #print(\"goliath and battlecruiser type:\")\n #print(goliath_type)\n #print(battlecruiser_type)\n\n for i, agent in enumerate(observation.my_unit):\n if agent.hp <= 0:\n continue\n my_x = agent.pos_x\n my_y = agent.pos_y\n my_type_str = agent.unit_type\n my_type = goliath_type if my_type_str == 'Terran_Goliath' else print(\"error in the my_type\")\n t1 = [agent.hp + agent.shield, agent.cooldown, math.atan2(agent.velocity_y, agent.velocity_x),\n math.sqrt((agent.velocity_x) ** 2 + (agent.velocity_y) ** 2), agent.angle,\n 1 if agent.accelerating else -1 if agent.braking else 0, agent.attacking, agent.is_attack_frame]\n t2 = [self.last_action[i] / (env.action_space[1] - 1)]\n t3 = [i.nearest_obstacle_dist for i in agent.pos_info]\n t4 = []\n t5 = []\n t4_max = []\n t5_max = []\n for idx, enemy in enumerate(observation.en_unit):\n en_type_str = enemy.unit_type\n if en_type_str == 'Terran_Battlecruiser':\n en_type = battlecruiser_type\n else:\n continue \n if enemy.hp <= 0:\n t4.extend([0,0,0,0,0,0,0,0,0,0])\n else:\n t4.extend([math.atan2(enemy.pos_y - my_y, enemy.pos_x - my_x), math.sqrt((enemy.pos_x - my_x) ** 2 + (enemy.pos_y - my_y) ** 2),\n math.atan2(enemy.velocity_y, enemy.velocity_x), math.sqrt((enemy.velocity_x) ** 2 + (enemy.velocity_y) ** 2),\n enemy.cooldown, enemy.hp + enemy.shield, enemy.angle, 1 if agent.accelerating else -1 if agent.braking else 0, agent.attacking, agent.is_attack_frame])\n t4_max.extend([math.pi, 320, math.pi, en_type.top_speed, en_type.cooldown_max, en_type.hp_max + en_type.shield_max, math.pi, 1, 1, 1])\n for idx, ally in enumerate(observation.my_unit):\n if i == idx:\n continue\n if ally.hp <= 0:\n t5.extend([0,0,0,0,0])\n else:\n t5.extend([math.atan2(ally.pos_y - my_y, ally.pos_x - my_x), math.sqrt((ally.pos_x - my_x) ** 2 + (ally.pos_y - my_y) ** 2),\n math.atan2(ally.velocity_y, ally.velocity_x), math.sqrt((ally.velocity_x) ** 2 + (ally.velocity_y) ** 2), ally.hp + ally.shield])\n ally_type = goliath_type\n t5_max.extend([math.pi, 320, math.pi, ally_type.top_speed, ally_type.hp_max + ally_type.shield_max])\n if my_type_str == 'Terran_Goliath':\n t1_max = [my_type.hp_max + my_type.shield_max, 1, math.pi, my_type.top_speed, math.pi, 1, 1, 1]\n else:\n t1_max = [my_type.hp_max + my_type.shield_max, my_type.cooldown_max, math.pi, my_type.top_speed, math.pi, 1, 1, 1]\n #t4_max = [math.pi, 320, math.pi, en_type.top_speed, en_type.cooldown_max, en_type.hp_max + en_type.shield_max, math.pi, 1, 1, 1]\n #t5_max = [math.pi, 320, math.pi, ally_type.top_speed, ally_type.hp_max + ally_type.shield_max]\n\n #t5_max = [32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, type.cooldown_max,\n #32, 32, type.hp_max + type.shield_max, math.pi,\n #32, 32, type.hp_max + type.shield_max, math.pi,\n #32, 32, type.hp_max + type.shield_max, math.pi]\n\n t1 = np.divide(t1, t1_max) # runtime warning\n t2 = np.array(t2) / 320\n t3 = np.array(t3) / 320\n t4 = np.divide(t4, t4_max)\n t5 = np.divide(t5, t5_max)\n\n processed_observation[i] = np.concatenate([t1, t2, t3, t4, t5])\n\n self.last_my_unit_cnt.append(np.sum(np.array([u.hp+u.shield for u in observation.my_unit]) > 0))\n self.last_enemy_unit_cnt.append(np.sum(np.array([u.hp+u.shield for u in observation.en_unit]) > 0))\n self.last_enemy_unit_hp.append(sum([u.hp + u.shield for u in observation.en_unit]))\n self.accumulated_observation.append(processed_observation)\n\n\n return processed_observation", "def analyse_data(file_name, data_types, agent_types, types):\n metrics_data = pd.read_csv(file_name)\n\n for agent_type in agent_types:\n for data_type in data_types:\n csv_data = {}\n # Separating the data based on the winner type and extracting only what's important\n for element_type in types[agent_type]:\n csv_data[element_type] = list(metrics_data[data_type][metrics_data[agent_type] == element_type])\n\n visualise_data(csv_data, types[agent_type], data_type, agent_type)\n\n print(\"----------------------------------------------------------\")\n print(\"ANOVA test for '{1}' in terms of '{0}':\".format(agent_type, data_type))\n anova_test_data(csv_data, types[agent_type])\n print(\"----------------------------------------------------------\")", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def _get_observations(self):\n food = np.array(self.game.state.data.food.data)\n walls = np.array(self.game.state.data.layout.walls.data)\n map_shape = walls.shape\n capsules = self.game.state.data.capsules\n pacman_pos = self.game.state.data.agentStates[0].configuration.pos\n\n gosts_pos = list(map(lambda agent: agent.configuration.pos,\n self.game.state.data.agentStates[1:]))\n gosts_scared = list(\n map(lambda agent: agent.scaredTimer > 0, self.game.state.data.agentStates[1:]))\n\n \"\"\"\n 0: empty,\n 1: wall,\n 2: food,\n 3: capsules,\n 4: ghost,\n 5: scared ghost,\n 6: pacman\n \"\"\"\n\n view_slices = ((max(pacman_pos[0]-self.view_distance[0], 0), min(pacman_pos[0]+self.view_distance[0]+1, map_shape[0])),\n (max(pacman_pos[1]-self.view_distance[1], 0), min(pacman_pos[1]+self.view_distance[1]+1, map_shape[1])))\n\n def select(l):\n return l[view_slices[0][0]:view_slices[0][1], view_slices[1][0]:view_slices[1][1]]\n\n obs = np.vectorize(lambda v: 1 if v else 0)(select(walls))\n obs = obs + np.vectorize(lambda v: 2 if v else 0)(select(food))\n\n def pos_to_relative_pos(pos):\n if (pos[0] < view_slices[0][0] or view_slices[0][1] <= pos[0]\n or pos[1] < view_slices[1][0] or view_slices[1][1] <= pos[1]):\n return None\n else:\n return pos[0]-view_slices[0][0], pos[1]-view_slices[1][0]\n\n for c_relative_pos in filter(lambda x: x is not None, map(pos_to_relative_pos, capsules)):\n obs[c_relative_pos[0], c_relative_pos[1]] = 3\n\n for i, g_relative_pos in enumerate(map(pos_to_relative_pos, gosts_pos)):\n if (g_relative_pos is not None):\n obs[int(g_relative_pos[0]), int(g_relative_pos[1])\n ] = 5 if gosts_scared[i] else 4\n\n pacman_relative_pos = pos_to_relative_pos(pacman_pos)\n\n obs[pacman_relative_pos[0], pacman_relative_pos[1]] = 6\n\n obs[0, 0] = 2 if np.any(\n food[0:pacman_pos[0]+1, 0:pacman_pos[1]+1]) else 0\n obs[obs.shape[0]-1,\n 0] = 2 if np.any(food[pacman_pos[0]:map_shape[0], 0:pacman_pos[1]+1])else 0\n\n obs[0, obs.shape[1] -\n 1] = 2 if np.any(food[0:pacman_pos[0]+1, pacman_pos[1]:map_shape[0]]) else 0\n obs[obs.shape[0]-1, obs.shape[1]-1] = 2 if np.any(\n food[pacman_pos[0]:map_shape[0], pacman_pos[1]:map_shape[0]]) else 0\n\n # print(np.transpose(obs)[::-1, :])\n\n return obs", "def __getitem__(self, type: str):\n nodes = pandas.read_csv(join(self.base_path, \"nodes.csv\"))\n edges = pandas.read_csv(join(self.base_path, \"held.csv\"))\n if type == \"link\":\n # nodes = pandas.read_csv(join(self.base_path, \"nodes.csv\"))\n held = pandas.read_csv(join(self.base_path, \"held.csv\"))\n\n held = held.query('type == 8')[['src', 'dst']]\n\n # node_pool = set(self.splits[2])\n # held = keep_from_set(held, node_pool)\n\n return Experiment(self.embed, nodes, edges, held, split_on=\"nodes\", neg_sampling_strategy=\"word2vec\", compact_dst=False)\n\n elif type == \"apicall\":\n api_seq = pandas.read_csv(self.experiments['apicall'])\n\n # unique_nodes = set(nodes['id'].values.tolist())\n\n # api_seq_test = api_seq.copy()\n # api_seq_test['src'] = api_seq_test['src'].apply(lambda nid: nid if nid in unique_nodes else None)\n # api_seq_test['dst'] = api_seq_test['dst'].apply(lambda nid: nid if nid in unique_nodes else None)\n # api_seq_test.dropna(axis=0, inplace=True)\n\n # disabled for testing\n # api_seq = api_seq[\n # api_seq['src'].apply(lambda nid: nid in unique_nodes)\n # ]\n #\n # api_seq = api_seq[\n # api_seq['dst'].apply(lambda nid: nid in unique_nodes)\n # ]\n\n node_pool = set(self.splits[2])\n api_seq = keep_from_set(api_seq, node_pool)\n\n return Experiment(self.embed, nodes, edges, api_seq, split_on=\"nodes\", neg_sampling_strategy=\"word2vec\", compact_dst=False)\n\n elif type == \"typeuse\":\n held = pandas.read_csv(join(self.base_path, \"held.csv\"))\n\n held = held.query('type == 2')[['src', 'dst']]\n\n # node_pool = set(self.splits[2])\n # held = keep_from_set(held, node_pool)\n\n return Experiment(self.embed, nodes, edges, held, split_on=\"nodes\", neg_sampling_strategy=\"word2vec\", compact_dst=False)\n\n elif type == \"varuse\":\n var_use = pandas.read_csv(self.experiments['varuse'])\n\n # unique_nodes = set(nodes['id'].values.tolist())\n node_pool = set(self.splits[2])\n\n var_use = var_use[\n var_use['src'].apply(lambda nid: nid in node_pool)\n ]\n\n return Experiment2(self.embed, nodes, edges, var_use, split_on=\"nodes\", neg_sampling_strategy=\"word2vec\")\n\n elif type == \"fname\":\n\n # fname = pandas.read_csv(self.experiments['fname'])\n functions = nodes.query('label == 4096')\n functions['fname'] = functions['name'].apply(lambda name: name.split(\".\")[-1])\n\n functions['src'] = functions['id']\n functions['dst'] = functions['fname']\n\n # unique_nodes = set(nodes['id'].values.tolist())\n node_pool = set(self.splits[2])\n\n functions = functions[\n functions['src'].apply(lambda nid: nid in node_pool)\n ]\n\n # use edge splits when outgoing degree is 1\n\n return Experiment2(self.embed, nodes, edges, functions[['src', 'dst']], split_on=\"edges\", neg_sampling_strategy=\"word2vec\")\n\n elif type == \"nodetype\":\n\n types = nodes.copy()\n types['src'] = nodes['id']\n types['dst'] = nodes['label']\n\n print(\"WARNING: Make sure that you target label is stored in the field: label\")\n # raise Warning(\"Make sure that you target label is stored in the field: label\")\n\n node_pool = set(self.splits[2])\n\n types['src'] = types['src'].apply(lambda nid: nid if nid in node_pool else None)\n types = types.dropna(axis=0)\n\n return Experiment3(self.embed, nodes, edges, types[['src', 'dst']], split_on=\"edges\", neg_sampling_strategy=\"word2vec\")\n else:\n raise ValueError(f\"Unknown experiment: {type}. The following experiments are available: [apicall|link|typeuse|varuse|fname|nodetype].\")", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def assign_agents(particle,self):\n\n self.models[particle].state2agents(self.states[particle])\n\n return self.models[particle]", "def execute(self):\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation\n # main loop\n while not self.environment.end_episode:\n # each agent choose its action\n self.environment.choose_action()\n # next state\n self.environment.calculate_next_state()\n # is the end of the episode\n self.environment.calculate_end_episode()\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation", "def agent_start(self,thisObs): \n action={'vol':0,'price':0}\n \n \"\"\"Changes for Boltzman Exploration\"\"\"\n #choice=self.pick_action_from_dist()\n #action_bin=self.prob_dist_action[choice]\n #action=self.unbin_action(action_bin,thisObs)\n \n \"\"\"Changes for epsilon greedy method\"\"\"\n action= self.return_random_action(thisObs)\n \n self.lastAction=action\n self.lastObs=thisObs\n return action", "def training(self, dataset, repeat=1, gamma=1.0, learning_rate=0.1, model='3yo'):\n for _ in range(repeat):\n for episode in dataset:\n # 1- Get the data stored inside the dataset\n image_index = episode[0] # image of the object\n label_index = episode[1] # label given by the informant\n informant_index = episode[2] # a integer representing the informant\n informant_action = episode[3] # 0=reject, 1=accept\n\n # 2- The agent take an action (with softmax) considering is current state-action table\n # [0=cup, 1=book, 2=ball]\n col = (image_index * self.tot_images) + label_index\n action_array = self.actor_matrix[:, col]\n action_distribution = self._softmax(action_array)\n child_action = np.random.choice(self.tot_actions,\n 1,\n p=action_distribution) # select the action through softmax\n\n # 3- (External) New state and reward obtained from the environment\n # u_t = self.critic_vector[0, col] # previous state\n # New state is estimated, in this simple case nothing happen\n # because the next state is terminal\n # u_t1 = u_t # Only in this example they are the same\n\n # 4- (Intrinsic) The informant_reputation is updated:\n # agent_action, agent_confidence, informant_action, reward\n # informant_vector: 0=unreliable, 1=reliable\n # do_actions_agree: False, True\n # Estimating child_confidence\n distance = np.absolute(action_distribution[0] - action_distribution[1])\n child_confidence_distribution = [1 - distance, distance] # non-knowledgeable, knowledgeable\n child_confidence = np.random.choice(2, 1, p=child_confidence_distribution)\n # Check if child and informant agree\n if (child_action == informant_action):\n do_actions_agree = True\n else:\n do_actions_agree = False\n # Increment the counter in the informant_vector.\n # Here we update the counter distribtuion only if\n # the child is confident, because it is only in that\n # case that the child can say if the informant is\n # reliable or not.\n if (do_actions_agree == False and child_confidence == 1):\n self.informant_vector[informant_index][0] += 1 # unreliable\n elif (do_actions_agree == True and child_confidence == 1):\n self.informant_vector[informant_index][1] += 1 # reliable\n elif (do_actions_agree == False and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n elif (do_actions_agree == True and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n else:\n raise ValueError(\"ERROR: anomaly in the IF condition for informant_vector update\")\n # Using the informant_vector given as input it estimates the reputation of the informant\n informant_reputation_distribution = np.true_divide(self.informant_vector[informant_index],\n np.sum(self.informant_vector[informant_index]))\n informant_reputation = np.random.choice(2, 1, p=informant_reputation_distribution)\n\n # 5- (Intrinsic) The Cost is estimated:\n # current_state, agent_action, agent_confidence, informant_action, informant_reputation\n # child_confidence: 0=non-knowledgeable, 1=knowledgeable\n # informant_reputation: 0=non-knowledgeable, 1=knowledgeable\n # action: 0=reject, 1=accept\n # informant_action: 0=reject, 1=accept\n cost = self._return_cost(child_confidence,\n informant_reputation,\n child_action,\n informant_action,\n value=model)\n\n # 6- The utility table is updated using: previous_state, current_state, cost, reward\n # Updating the critic using Temporal Differencing Learning\n # In this simple case there is not a u_t1 state.\n # The current state is considered terminal.\n # We can delete the term (gamma*u_t1)-u_t and considering\n # only (reward-cost) as utility of the state (see Russel Norvig).\n reward = 0 # only for intrinsic learning reward=0\n delta = (reward - cost) # + (gamma*u_t1) - u_t\n self.critic_vector[0, col] += learning_rate * delta\n\n # 7- The actor table is updated using the delta from the critic\n # Update the ACTOR using the delta\n self.actor_matrix[child_action, col] += learning_rate * delta # the current action\n self.actor_matrix[1 - child_action, col] -= learning_rate * delta # the opposite action", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")", "def algorithm(df, params):\n\n output = {}\n\n # PUT YOUR OWN IMPLEMENTATION HERE\n # STORE YOUR ANALYSIS OUTPUT IN OUTPUT\n\n return output", "def actor_director_experiment():\n dataset = datasets.ActorDirectorDataset()\n\n # Run the diffusion algorithm\n for left, right in hypalgorithms.find_max_cut(dataset.hypergraph):\n dataset.log_confusion_matrix([left, right])\n dataset.show_clustering_stats([left, right])\n\n hyplogging.logger.info(\"\")\n\n # Run the clique algorithm\n for left, right in hypalgorithms.find_max_cut(dataset.hypergraph, algorithm='clique'):\n dataset.log_confusion_matrix([left, right])\n dataset.show_clustering_stats([left, right])", "def default_params():\n params = {}\n params['dataset'] = 'adult'\n params['engines'] = ['MD','RDA']\n params['iters'] = 10000\n params['epsilon'] = 1.0\n params['delta'] = 0.0\n params['bounded'] = True\n params['frequency'] = 1\n params['seed'] = 0\n params['save'] = None\n params['load'] = None\n params['plot'] = None\n\n return params", "def get_agent(drs):\n agents = []\n for line in drs:\n if line.strip().startswith('sem'):\n datalist = line.split(':')\n for word in datalist:\n if word.count('agent') > 0:\n variable = word[6:7]\n for word in datalist:\n if word.startswith('pred({0}'.format(variable)):\n agents.append(word.split(',')[1])\n return agents", "def get(env, args):\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0]\n\n hidden_sizes_actor = hyper_params[\"NETWORK\"][\"ACTOR_HIDDEN_SIZES\"]\n hidden_sizes_critic = hyper_params[\"NETWORK\"][\"CRITIC_HIDDEN_SIZES\"]\n\n # create actor\n actor = MLP(\n input_size=state_dim,\n output_size=action_dim,\n hidden_sizes=hidden_sizes_actor,\n output_activation=torch.tanh,\n ).to(device)\n\n actor_target = MLP(\n input_size=state_dim,\n output_size=action_dim,\n hidden_sizes=hidden_sizes_actor,\n output_activation=torch.tanh,\n ).to(device)\n actor_target.load_state_dict(actor.state_dict())\n\n # create critic1\n critic1 = MLP(\n input_size=state_dim + action_dim,\n output_size=1,\n hidden_sizes=hidden_sizes_critic,\n ).to(device)\n\n critic1_target = MLP(\n input_size=state_dim + action_dim,\n output_size=1,\n hidden_sizes=hidden_sizes_critic,\n ).to(device)\n critic1_target.load_state_dict(critic1.state_dict())\n\n # create critic2\n critic2 = MLP(\n input_size=state_dim + action_dim,\n output_size=1,\n hidden_sizes=hidden_sizes_critic,\n ).to(device)\n\n critic2_target = MLP(\n input_size=state_dim + action_dim,\n output_size=1,\n hidden_sizes=hidden_sizes_critic,\n ).to(device)\n critic2_target.load_state_dict(critic2.state_dict())\n\n # concat critic parameters to use one optim\n critic_parameters = list(critic1.parameters()) + list(critic2.parameters())\n\n # create optimizer\n actor_optim = optim.Adam(\n actor.parameters(),\n lr=hyper_params[\"LR_ACTOR\"],\n weight_decay=hyper_params[\"WEIGHT_DECAY\"],\n )\n\n critic_optim = optim.Adam(\n critic_parameters,\n lr=hyper_params[\"LR_CRITIC\"],\n weight_decay=hyper_params[\"WEIGHT_DECAY\"],\n )\n\n # noise\n exploration_noise = GaussianNoise(\n action_dim,\n min_sigma=hyper_params[\"EXPLORATION_NOISE\"],\n max_sigma=hyper_params[\"EXPLORATION_NOISE\"],\n )\n\n target_policy_noise = GaussianNoise(\n action_dim,\n min_sigma=hyper_params[\"TARGET_POLICY_NOISE\"],\n max_sigma=hyper_params[\"TARGET_POLICY_NOISE\"],\n )\n\n # make tuples to create an agent\n models = (actor, actor_target, critic1, critic1_target, critic2, critic2_target)\n optims = (actor_optim, critic_optim)\n noises = (exploration_noise, target_policy_noise)\n\n # create an agent\n return Agent(env, args, hyper_params, models, optims, noises)", "def ANOVA_stats(subject_list, data_dir, h5_type, model_types):\n\t\n\tall_subjs = []\n\tall_models = []\n\tall_corrs = []\n\tcorrs = dict()\n\n\tfor model in model_types: # 3 total models we are comparing\n\t\tfor s in subject_list:\n\t\t\t# Load the STRF file for each individual model for the subject of interest\n\t\t\t# (phnfeat only, env only, or pitch only)\n\t\t\tstrf_file = '%s/%s/%s_STRF_by_%s_%s.hf5'%(data_dir, s, s, model, h5_type) # The STRF for this subject and this model type (env, phnfeat, or pitch)\n\t\t\twith h5py.File(strf_file,'r') as hf:\n\t\t\t\tcorrs[s] = hf['corrs_%s' %(h5_type.lower())][:] # Load the corrs\n\t\t\tfor ch in np.arange(64):\n\t\t\t\t# We have to do this so we have the subjects and models\n\t\t\t\t# columns that match the correlations vector\n\t\t\t\tall_subjs.append(s)\n\t\t\t\tall_models.append(model)\n\t\t\t\tall_corrs.append(corrs[s][ch])\n\tdata= {'corrs': np.array(all_corrs).ravel(), 'subject': all_subjs, 'STRF_type': all_models}\n\tdf = pd.DataFrame.from_dict(data)\n\tdf\n\t\n\t# Run a Friedman ANOVA (non-parametric equivalent of the repeated measures ANOVA)\n\t# with STRF performance as yhour dependent variable, STRF type (env, phnfeat, pitch) \n\t# as your within subjects measure, and subject as your subject. Look at p-unc for\n\t# the p value\n\tdata = df.groupby(['subject', 'STRF_type']).mean().reset_index()\n\t#print(data)\n\tpg.friedman(data=df, dv='corrs', within='STRF_type', subject='subject')\n\t\n\t# if p<0.05, run post-hoc sign-rank tests\n\n\t#extract just the corr values from the dataframe - will be used for post-hoc sign-rank tests\n\tpitch_x = data['corrs'][np.where(data['STRF_type']=='pitch')[0]]\n\tphnfeat_x = data['corrs'][np.where(data['STRF_type']=='phnfeat')[0]]\n\tenvs_x = data['corrs'][np.where(data['STRF_type']=='envs')[0]]\n\ttotalmodel_x = data['corrs'][np.where(data['STRF_type']=='pitchenvsphnfeat')[0]]\n\n\n\t#run wilcoxon signrank test - compare total model with individual features\n\tprint(pg.wilcoxon(totalmodel_x, phnfeat_x, tail='two-sided')) \n\tprint(pg.wilcoxon(totalmodel_x, envs_x, tail='two-sided')) \n\tprint(pg.wilcoxon(totalmodel_x, pitch_x, tail='two-sided'))\n\n\t#run wilcoxon signrank test - compare individual feature models with each other \n\tprint(pg.wilcoxon(phnfeat_x,pitch_x, tail='two-sided'))\n\tprint(pg.wilcoxon(envs_x, pitch_x, tail='two-sided'))\n\tprint(pg.wilcoxon(phnfeat_x, envs_x, tail='two-sided'))", "def get_traj(agent, env, episode_max_length):\n # MARK: changed\n\n env.reset()\n obs = []\n acts = []\n rews = []\n entropy = []\n info = []\n machines = []\n ob = env.observe()\n\n for _ in range(episode_max_length):\n\n act_prob = agent.get_action(ob)\n machine_prob = agent.get_machine(ob)\n # csprob_n = np.cumsum(act_prob)\n # a = (csprob_n > np.random.rand()).argmax()\n # print(act_prob)\n ob = ob.reshape(ob.shape[0]*ob.shape[1])\n obs.append(ob) # store the ob at current decision making step\n acts.append(act_prob)\n machines.append(machine_prob)\n ob, rew, done, info = env.step(act_prob,machine_prob, repeat=True)\n\n rews.append(rew)\n\n entropy.append(get_entropy(act_prob))\n if done: break\n return {'reward': np.array(rews),\n 'ob': np.array(obs),\n 'action': np.array(acts),\n 'entropy': entropy,\n 'info': info,\n 'machine':np.array(machines)\n }", "def ca_parameters(agent_data, agents_df, agent_name):\r\n rn = random()\r\n agent_data.at[0, 'location_1'] = agents_df.loc[0, 'Location1']\r\n agent_data.at[0, 'location_2'] = agents_df.loc[0, 'Location2']\r\n agent_data.at[0, 'location'] = agents_df.loc[0, 'Location']\r\n agent_data.at[0, 'T1'] = 250 + (rn * 100) # between 250-350\r\n agent_data.at[0, 'T2'] = 550 + (rn * 100) # between 550-650\r\n agent_data.at[0, 'T3'] = 800 + (rn * 100) # between 800-900\r\n agent_data.at[0, 'T4'] = 600 + (rn * 100) # between 600-700\r\n agent_data.at[0, 'T5'] = 300 + (rn * 100) # between 300-400\r\n agent_data.at[0, 'q'] = 0.5 + (rn / 10) # between 05-0.6\r\n return agent_data", "def init_params() -> Tuple[TeamDict, int, int, int, str, str, List[str]]:\n with io.open('dados.txt', 'r', encoding='utf-8') as params:\n params_strategies = {}\n PARAMS_LEAGUES, STAKE, PATH, RISK_UPPER, RISK_LOWER, j = [], 0, '', 0, 0, 0\n global_vars = [STAKE, RISK_UPPER, RISK_LOWER, PATH, PARAMS_LEAGUES]\n for i, line in enumerate(params.readlines()):\n line = line.strip(\" \\n\")\n if i <= 24:\n if '*' in line:\n if i == 0:\n pass\n else:\n params_strategies[strategy[:-1]] = params_strategy #dict of dicts\n params_strategy = {} #dict \n strategy = line #key of params_strategies\n params_strategy['name'] = strategy[:-1]\n \n else:\n params_strategy[line.split(':')[0]] = float(line.split(':')[1])\n if i == 24:\n params_strategies[strategy[:-1]] = params_strategy\n\n else:\n if '$' in line:\n j += 1\n continue\n if j < 4:\n global_vars[j-1] = float(line)/100 #stake, risk_upper, risk_lower\n elif j == 4:\n global_vars[j-1] = str(line) #path\n else:\n global_vars[4].append(line) #param_leagues\n \n params_strategies['e11'] = params_strategies['e1']\n return (params_strategies, global_vars[0], global_vars[1], global_vars[2], global_vars[3], global_vars[4])", "def agent_init(self, agent_info):\n\n # First, we get the relevant information from agent_info \n # Note: we use np.random.RandomState(seed) to set the two different RNGs\n # for the planner and the rest of the code\n try:\n self.num_states = agent_info[\"num_states\"]\n self.num_actions = agent_info[\"num_actions\"]\n except:\n print(\"You need to pass both 'num_states' and 'num_actions' in agent_info to initialize the action-value table\")\n self.gamma = agent_info.get(\"discount\", 0.95)\n self.step_size = agent_info.get(\"step_size\", 0.1)\n self.epsilon = agent_info.get(\"epsilon\", 0.1)\n self.planning_steps = agent_info.get(\"planning_steps\", 10)\n self.kappa = agent_info.get(\"kappa\", 0.001)\n\n self.rand_generator = np.random.RandomState(agent_info.get('random_seed', 42))\n self.planning_rand_generator = np.random.RandomState(agent_info.get('planning_random_seed', 42))\n\n # Next, we initialize the attributes required by the agent, e.g., q_values, model, tau, etc.\n # The visitation-counts can be stored as a table as well, like the action values \n self.q_values = np.zeros((self.num_states, self.num_actions))\n self.tau = np.zeros((self.num_states, self.num_actions))\n self.actions = list(range(self.num_actions))\n self.past_action = -1\n self.past_state = -1\n self.model = {}", "def agents_train(self, game_step, episode_now, args):\n # update all trainers, if not in display or benchmark mode\n if episode_now < args.learning_start_episode: return \n if self.update_cnt > 0 and self.var >= self.min_var: self.var *= args.var_discount\n #if episode_now > self.last_update_episode and (episode_now - args.learning_start_episode) % args.learning_fre == 0:\n if game_step % args.learning_fre_step == 0:\n if self.update_cnt == 0: print('\\r=start training ...'+' '*100)\n self.last_update_episode = episode_now\n self.update_cnt += 1\n\n # update every agent in different memory batch\n for agent_idx, (actor_c, actor_t, critic_c, critic_t, opt_a, opt_c) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, \\\n self.critics_tar, self.optimizers_a, self.optimizers_c)):\n # del if opt_c == None: continue # jump to the next model update\n\n # sample the experience\n _obs_n_o, _action_n, _rew_n, _obs_n_n, _done_n = self.memory.sample( \\\n args.batch_size, agent_idx) # Note_The func is not the same as others\n \n # --use the date to update the CRITIC\n rew = torch.tensor(_rew_n, device=args.device, dtype=torch.float) # set the rew to gpu\n done_n = torch.tensor(~_done_n, dtype=torch.float, device=args.device) # set the rew to gpu\n action_cur_o = torch.from_numpy(_action_n).to(args.device, torch.float)\n obs_n_o = torch.from_numpy(_obs_n_o).to(args.device, torch.float)\n obs_n_n = torch.from_numpy(_obs_n_n).to(args.device, torch.float)\n\n action_tar = torch.cat([a_t(obs_n_n[:, self.obs_size[idx][0]:self.obs_size[idx][1]]).detach() \\\n for idx, a_t in enumerate(self.actors_tar)], dim=1)\n q = critic_c(obs_n_o, action_cur_o).reshape(-1) # q \n q_ = critic_t(obs_n_n, action_tar).reshape(-1) # q_ \n q_ = q_*args.gamma*done_n + rew*torch.tensor(args.reward_scale_par, device=args.device) # q_*gamma*done + reward\n loss_c = torch.nn.MSELoss()(q, q_.detach()) # bellman equation\n opt_c.zero_grad()\n loss_c.backward()\n nn.utils.clip_grad_norm_(critic_c.parameters(), args.max_grad_norm)\n opt_c.step()\n\n # --use the data to update the ACTOR\n # There is no need to cal other agent's action\n opt_c.zero_grad()\n model_out, policy_c_new = actor_c( \\\n obs_n_o[:, self.obs_size[agent_idx][0]:self.obs_size[agent_idx][1]], model_original_out=True)\n # update the aciton of this agent\n action_cur_o[:, self.action_size[agent_idx][0]:self.action_size[agent_idx][1]] = policy_c_new \n loss_pse = torch.mean(torch.pow(model_out, 2))\n loss_a = torch.mul(torch.tensor(-1.0, device=args.device), torch.mean(critic_c(obs_n_o, action_cur_o)))\n\n opt_a.zero_grad()\n (2e-3*loss_pse+loss_a).backward()\n #loss_a.backward()\n nn.utils.clip_grad_norm_(actor_c.parameters(), args.max_grad_norm)\n opt_a.step()\n\n # save the model to the path_dir ---cnt by update number\n #if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model == 0:\n if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model_step == 0:\n time_now = time.strftime('%y%m_%d%H%M')\n print('=time:{} step:{} save'.format(time_now, game_step))\n model_file_dir = os.path.join(args.save_dir, '{}_{}_{}'.format( \\\n args.scenario_name, time_now, game_step))\n if not os.path.exists(model_file_dir): # make the path\n os.mkdir(model_file_dir)\n for agent_idx, (a_c, a_t, c_c, c_t) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, self.critics_tar)):\n torch.save(a_c, os.path.join(model_file_dir, 'a_c_{}.pt'.format(agent_idx)))\n torch.save(a_t, os.path.join(model_file_dir, 'a_t_{}.pt'.format(agent_idx)))\n torch.save(c_c, os.path.join(model_file_dir, 'c_c_{}.pt'.format(agent_idx)))\n torch.save(c_t, os.path.join(model_file_dir, 'c_t_{}.pt'.format(agent_idx)))\n\n # update the tar par\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, args.tao) \n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, args.tao)", "def get_agent(env) -> DDPGAgent:\n assert len(env.action_space.shape) == 1\n nb_actions = env.action_space.shape[0]\n action_input = Input(shape=(nb_actions,), name='action_input')\n observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')\n\n range_action_input = 0.5 * (env.action_space.high - env.action_space.low)\n constantBias = 1\n lowb = env.action_space.low\n\n # actor = Flatten(input_shape=(1,) + env.observation_space.shape)(observation_input)\n y = Flatten()(observation_input)\n y = Dense(16)(y)\n y = BatchNormalization()(y)\n y = Activation('relu')(y)\n y = Dense(16)(y)\n y = BatchNormalization()(y)\n y = Activation('relu')(y)\n pht = Dense(1)(y)\n pht = BatchNormalization()(pht)\n pht = Activation('tanh')(pht)\n pht = Lambda(lambda a: (a + K.constant(constantBias)) * K.constant(range_action_input[0])\n + K.constant(lowb[0]))(pht)\n rht = Dense(1)(y)\n rht = BatchNormalization()(rht)\n rht = Activation('tanh')(rht)\n rht = Lambda(lambda a: (a + K.constant(constantBias)) * K.constant(range_action_input[1])\n + K.constant(lowb[1]))(rht)\n axn = Concatenate()([pht, rht])\n actor = Model(inputs=observation_input, outputs=axn)\n\n flattened_observation = Flatten()(observation_input)\n x = Concatenate()([action_input, flattened_observation])\n x = Dense(32)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dense(32)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dense(32)(x)\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = Dense(1)(x)\n x = Activation('linear')(x)\n critic = Model(inputs=[action_input, observation_input], outputs=x)\n\n memory = SequentialMemory(limit=1000, window_length=1)\n\n random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.5, size=nb_actions)\n agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,\n memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,\n gamma=.99, target_model_update=1e-3, random_process=random_process)\n agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae'])\n return agent", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"kappa_W[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_Z[1,0.0,2.0]\") \n self.modelBuilder.doVar(\"kappa_tau[1,0.0,3.0]\")\n self.modelBuilder.doVar(\"kappa_mu[1,0.0,5.0]\") \n self.modelBuilder.factory_(\"expr::kappa_mu_expr(\\\"@0*@1+(1-@0)*@2\\\", CMS_use_kmu[0], kappa_mu, kappa_tau)\")\n self.modelBuilder.doVar(\"kappa_t[1,0.0,4.0]\")\n # additional kappa for the anomalous coupling\n self.modelBuilder.doVar(\"kappa_tilde_t[0.0,0.0,4.0]\")\n self.modelBuilder.doVar(\"kappa_b[1,0.0,3.0]\")\n if not self.resolved:\n self.modelBuilder.doVar(\"kappa_g[1,0.0,2.0]\")\n self.modelBuilder.doVar(\"kappa_gam[1,0.0,2.5]\")\n\tself.modelBuilder.doVar(\"BRinv[0,0,1]\")\n self.modelBuilder.out.var(\"BRinv\").setConstant(True)\n # adding additional kappa to list of parameters of interest\n pois = 'kappa_W,kappa_Z,kappa_tau,kappa_t,kappa_tilde_t,kappa_b'\n if not self.resolved:\n pois += ',kappa_g,kappa_gam'\n self.doMH()\n self.modelBuilder.doSet(\"POI\",pois)\n # use modified Higgs Builder\n self.SMH = AnomalousTopHiggsBuilder(self.modelBuilder)\n self.setup()", "def finish_episode(tribes, learners, optimizers, gamma, cuda): \n \n num_learners = len(learners)\n total_norms = [0 for i in range(num_learners)]\n policy_losses = [[] for i in range(num_learners)]\n losses = [[] for i in range(num_learners)]\n T_reward = []\n\n \n for i in range(num_learners):\n\n R = 0\n saved_actions = learners[i].saved_actions\n \n for t in tribes:\n if t.name is learners[i].tribe:\n \n # Based on team culture, calculate the team reward for the agent \n culture = t.culture['name']\n \n if culture is 'cooperative':\n T_reward = t.tribal_awards()\n elif culture is 'individualist':\n T_reward = t.tribal_awards()\n elif culture is 'no_fragging':\n T_reward = t.tribal_awards(US_hits = learners[i].US_hits)\n elif culture is 'pacifist':\n T_reward = t.tribal_awards(tag_hist = learners[i].tag_hist)\n elif culture is 'pacifist_exile':\n T_reward = t.tribal_awards(tag_hist = learners[i].tag_hist, \\\n in_banned_hist=learners[i].in_banned_hist)\n elif culture is 'pacifist_follower':\n T_reward = t.tribal_awards(tag_hist = learners[i].tag_hist, \\\n in_target_hist=learners[i].in_target_hist)\n elif culture is 'warlike':\n T_reward = t.tribal_awards(US_hits = learners[i].US_hits,THEM_hits = learners[i].THEM_hits)\n else:\n T_reward = t.tribal_awards()\n \n # For debug only\n # print('Agent{} receives tribal award from Tribe{}'.format(i,t.name))\n # print (T_reward)\n # print (learners[i].rewards)\n \n # Do not implement actor-critic for now\n # value_losses = []\n \n rewards = deque()\n\n for r,T in zip(learners[i].rewards[::-1],T_reward[::-1]):\n # The agent is incentivized to cooperate by an award of 30% of what the tribe takes\n # in by all its members\n R = r + T + gamma * R\n rewards.appendleft(R)\n \n rewards = list(rewards)\n rewards = torch.Tensor(rewards)\n if cuda:\n rewards = rewards.cuda()\n\n # z-score rewards\n rewards = (rewards - rewards.mean()) / (1.1e-7+rewards.std())\n \n #Debug \n #print (rewards) \n \n \"\"\"\n Do not implement actor-critic for now!!!\n for (log_prob, state_value), r in zip(saved_actions, rewards):\n reward = r - state_value.data[0]\n policy_losses.append(-log_prob * Variable(reward))\n r = torch.Tensor([r])\n if cuda:\n r = r.cuda()\n value_losses.append(torch.nn.functional.smooth_l1_loss(state_value,\n Variable(r)))\n\n optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()\n loss.backward() \n \n \n \"\"\"\n for log_prob, r in zip(saved_actions, rewards):\n r = torch.Tensor([r])\n if cuda:\n r = r.cuda()\n policy_losses[i].append(-log_prob * Variable(r))\n\n optimizers[i].zero_grad()\n losses[i] = torch.stack(policy_losses[i]).sum()\n losses[i].backward()\n \n # Gradient Clipping Update: prevent exploding gradient\n total_norms[i] = torch.nn.utils.clip_grad_norm_(learners[i].parameters(), 8000)\n \n optimizers[i].step()\n learners[i].clear_history() # clear an agent's history at the end of episode\n\n\n return total_norms", "def _get_obs(self) -> np.ndarray:\n if self._obs_type == \"ram\":\n return self.ale.getRAM()\n elif self._obs_type == \"rgb\":\n return self.ale.getScreenRGB()\n elif self._obs_type == \"grayscale\":\n return self.ale.getScreenGrayscale()\n else:\n raise Error(f\"Unrecognized observation type: {self._obs_type}\")", "def generate_data(params):\n data = {}\n pop = model.init_pop(params.ages, params.size, np.exp(params.logInitial), params.seroprevalence)\n model_out = model.solve_ode(params, pop)\n res = model.trace_ages(model_out)\n\n for ii, k in [(Sub.T, 'cases'), (Sub.D, 'deaths'), (Sub.H, 'hospitalized'), (Sub.I, 'infectious'), (Sub.S, 'susceptible'), (Sub.C, 'icu')]:\n data[k] = np.ma.array(res[:,ii])\n\n return data", "def optimize_agent(trial):\n\tmodel_params = optimize_ppo2(trial)\n\t\n\t\"\"\"\n\tenv = SubprocVecEnv([make_env(i, agents) for i in range(num_cpu)])\n\tmodel = PPO2(POLICY_TYPE, env, nminibatches=1, **model_params) \n\t# n_steps (int) – The number of steps to run for each environment per update (i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)\n\t# by default n_steps=128. After 128 steps for each env, the policy will be updated. If 3 days per game and 2 seq per day, then every update reqires 128/2/3 = 21 games\n\tenv.env_method(\"set_model_reference\", model.get_parameters())\n\t\"\"\"\n\tenv = TradingGameEnv.TradingGameEnv(player_count = NUM_PLAYERS, other_agent_list = agents,\n\t\t\tseq_per_day = SEQ_PER_DAY, cards_per_suit = CARDS_PER_SUIT, player_hand_count = HAND_COUNT,\n\t\t\trandom_seq = True, self_play = SELF_PLAY, policy_type = POLICY_TYPE, self_copy_freq = SELF_COPY_FREQ,\n\t\t\tobs_transaction_history_size=TRANSACTION_HISTORY_SIZE)\n\tmodel = PPO2(POLICY_TYPE, env, nminibatches=1, **model_params) \n\tenv.set_model_reference(model.get_parameters())\n\t\n\t# save a copy of model every 5e4*num_cpu games\n\tcopy_call_back = CustomCallback(model, env)\n\tcall_back_list = [EveryNTimesteps(n_steps=model_params['n_steps']*10, callback=copy_call_back)]\n\n\tmodel.learn(total_timesteps=TRAINING_TIME_STEPS, callback=call_back_list)\n\t\n\t# Evaluate the result against baseline agent\n\tenv = TradingGameEnv.TradingGameEnv(player_count = NUM_PLAYERS, other_agent_list = agents,\n\t\tseq_per_day = SEQ_PER_DAY, cards_per_suit = CARDS_PER_SUIT, player_hand_count = HAND_COUNT,\n\t\trandom_seq = True, self_play = False, obs_transaction_history_size=TRANSACTION_HISTORY_SIZE,\n\t\teval=True)\n\n\tmean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=EVAL_EPISODES)\n\t\n\twith open(\"optuna_params/\"+str(trial.number)+\".txt\", \"w\") as file:\n\t\t# Writing data to a file\n\t\tfile.write(\"mean reward: \" + str(mean_reward) + \"\tstd reward: \" + str(std_reward) +\"\\n\")\n\t\tfile.write(str(model_params))\n\t\n\treturn -1 * mean_reward", "def get_el_targets(params):\n data = queryDevice.queryDevice(\"\"\"\n SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)\n FROM assays ass\n JOIN(\n SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc\n FROM target_dictionary td\n JOIN target_components tc\n ON tc.tid = td.tid\n\t\t JOIN component_sequences cs\n\t\t\tON cs.component_id = tc.component_id\n JOIN component_domains cd\n \t\t\tON cd.component_id = cs.component_id\n WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')\n GROUP BY td.tid\n ) as dc\n ON dc.tid = ass.tid\n JOIN activities act\n ON act.assay_id = ass.assay_id\n WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')\n AND ass.relationship_type = 'D'\n AND assay_type IN('B')\n AND act.standard_relation IN('=')\n AND standard_units = 'nM'\n AND standard_value <= %s\n GROUP BY dc.tid ORDER BY COUNT(activity_id)\"\"\" % (int(params['threshold']) * 1000) , params)\n print \"retrieved data for \", len(data), \"tids.\"\n return data", "def get_variables():\n policer_data = {\n \"policer_data\": {\n \"name\": \"policy1\",\n \"cir\": 450,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n }\n },\n \"policer_data_oper\": {\n \"name\": \"policy1\",\n \"cir\": 450,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n }\n },\n \"policer_data_2\": {\n \"name\": \"policy1\",\n \"cir\": 900,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n }\n },\n \"policer_data_oper_2\": {\n \"name\": \"policy1\",\n \"cir\": 900,\n \"cb\": 50000,\n \"rate-type\": \"kbps\",\n \"round-type\": \"closest\",\n \"type\": \"1r2c\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n }\n },\n \"policer_data_3\": {\n \"name\": \"policy1\",\n \"cir\": 100,\n \"eir\": 150,\n \"cb\": 200,\n \"eb\": 300,\n \"rate-type\": \"pps\",\n \"round-type\": \"closest\",\n \"type\": \"2r3c-2698\",\n \"conform-action\": {\n \"meter-action-type\": \"meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"meter-action-mark-dscp\",\n \"dscp\": \"AF22\"\n },\n \"violate-action\": {\n \"meter-action-type\": \"meter-action-drop\"\n },\n \"color-aware\": True\n },\n \"policer_data_oper_3\": {\n \"name\": \"policy1\",\n \"cir\": 100,\n \"eir\": 150,\n \"cb\": 200,\n \"eb\": 300,\n \"rate-type\": \"pps\",\n \"round-type\": \"closest\",\n \"type\": \"2r3c-2698\",\n \"conform-action\": {\n \"meter-action-type\": \"policer:meter-action-transmit\"\n },\n \"exceed-action\": {\n \"meter-action-type\": \"policer:meter-action-mark-dscp\",\n },\n \"violate-action\": {\n \"meter-action-type\": \"policer:meter-action-drop\"\n },\n \"color-aware\": True\n },\n\n \"acl_tables\": {\n # settings for policer tables\n \"hc_acl_table\": {\n \"name\": \"table0\",\n \"nbuckets\": 2,\n \"memory_size\": 1048576,\n \"skip_n_vectors\": 12,\n \"miss_next\": \"permit\",\n \"mask\": \"00:00:00:00:00:00:00:00:00:00:00:00:ff:ff:ff:ff\"\n },\n # setting for acl sessions\n \"hc_acl_session\": {\n \"match\": \"00:00:00:00:00:00:00:00:00:00:00:00:C0:A8:7A:01\",\n \"policer_hit_next\": \"policy1\",\n \"color_classfier\": \"exceed-color\",\n },\n \"hc_acl_session2\": {\n \"match\": \"00:00:00:00:00:00:00:00:00:00:00:00:C0:A8:7A:02\",\n \"policer_hit_next\": \"policy1\",\n \"color_classfier\": \"exceed-color\",\n },\n },\n }\n return policer_data", "def work(self, agentInput, type_=\"selectAction\"):\n agentInput = from_numpy(np.array(agentInput)).float().unsqueeze(0) # Add batch dimension with unsqueeze\n\n if self.use_cuda:\n agentInput = agentInput.cuda()\n\n with no_grad():\n action_prob = self.actor_net(agentInput)\n\n if type_ == \"selectAction\":\n c = Categorical(action_prob)\n action = c.sample()\n return action.item(), action_prob[:, action.item()].item()\n elif type_ == \"selectActionMax\":\n return np.argmax(action_prob).item(), 1.0", "def grid_search_param(environmnet, policy='ε–greedy', parameter='alpha'):\n\n\tparameter_values = []\n\tavg_scores = []\n\tavg_steps = []\n\n\tcount = 1\n\n\tfor param_num in np.linspace(0.2, 1, 9):\n\t\tif parameter == 'alpha':\n\t\t\tagent = Q_Agent(alpha=param_num)\n\t\telif parameter == 'gamma':\n\t\t\tagent = Q_Agent(gamma=param_num)\n\n\t\tall_iterations, all_rewards, step_count = agent.train(environmnet, print_results=True, iter_n=1000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t policy=policy)\n\t\tavg_scores.append(np.mean(all_rewards))\n\t\tavg_steps.append(np.mean(step_count))\n\t\tparameter_values.append(param_num)\n\t\trewards_data = np.array([all_iterations, all_rewards])\n\t\tstep_data = np.array([all_iterations, step_count])\n\n\t\tnp.savetxt(\n\t\t\t'/Users/matthewgalloway/Documents/RF/q_learning/' + parameter + '_inv/' + parameter + '_rewards_' + str(\n\t\t\t\tparam_num) + '.csv', rewards_data.transpose(), delimiter=\",\")\n\t\tnp.savetxt(\n\t\t\t'/Users/matthewgalloway/Documents/RF/q_learning/' + parameter + '_inv/' + parameter + '_steps_' + str(\n\t\t\t\tparam_num) + '.csv', step_data.transpose(), delimiter=\",\")\n\t\tif count % 50 == 0:\n\t\t\tprint('iteration {} of 10'.format(count))\n\n\t\tcount += 1\n\tresults = {\n\t\t'alpha_values': parameter_values,\n\t\t'avg_scores': avg_scores,\n\t\t'avg_steps': avg_steps,\n\n\t}\n\tprint(results)\n\treturn pd.DataFrame(results)", "def learning_Utility(self):\n # Shape the input that we give to the neural network with the value of sensors, the previous actions the life of the agent \n # Get the results from the sensors according the different movement executed by the agent \n sensors_result_N = self.agent.sensors(self, direction=3) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(3)+[self.agent.get_previous_collision()]\n sensors_result_O = self.agent.sensors(self, direction=2) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(2) + [self.agent.get_previous_collision()]\n sensors_result_S = self.agent.sensors(self, direction=1) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(1) + [self.agent.get_previous_collision()]\n sensors_result_E = self.agent.sensors(self, direction=0) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(0) + [self.agent.get_previous_collision()]\n\n input_nn_N = np.asarray(sensors_result_N).astype(int) # input when the Nord action is performed \n input_nn_O = np.asarray(sensors_result_O).astype(int) # input when the West action is performed\n input_nn_S = np.asarray(sensors_result_S).astype(int) # input when the South action is performed\n input_nn_E = np.asarray(sensors_result_E).astype(int) # input when the West action is performed\n\n self.input_list = [input_nn_E.reshape(1,145),\n input_nn_S.reshape(1,145),\n input_nn_O.reshape(1,145),\n input_nn_N.reshape(1,145)]\n self.U_list = [self.nn.predict(i) for i in self.input_list ] #The utility according the different acts performed \n return self.actionSelector() #Select the action acording a propbabilitics distribution given in the paper", "def target_params(disttype):\n if disttype == 'round':\n me = np.array([2,2])\n cov = np.array([[1,0],[0,1]])\n elif disttype == 'correlated':\n me = np.array([2,2])\n cov = np.array([[1,0.9],[0.9,1]])\n elif disttype == 'close_bimodal':\n me = np.array([[0,0],[4,4]])\n cov = np.array([[1,0],[0,1]])\n elif disttype == 'bimodal':\n me = np.array([[0,0],[10,10]])\n cov = np.array([[1,0],[0,1]])\n return me, cov", "def ensemble_models(input_data: str, test_file=None,models=None,\n models_file=None,\n genome_handler_file=None,\n top_n=10,\n trained=True,\n ensemble_method=\"average\",\n batch_size=64, nb_epoch=100, early_stop=None, mod=None,\n max_x_length=50, min_rt=0, max_rt=120, unit=\"s\", out_dir=\"./\", prefix=\"test\"):\n from AutoSeq import GenomeHandler\n\n # print(\"The number of models:\", len(models))\n\n # test data\n X_test = np.empty(1)\n Y_test = np.empty(1)\n\n y_pr = []\n score = []\n\n model_list = dict()\n\n\n if genome_handler_file is not None:\n X_train, Y_train, X_test, Y_test, min_rt, max_rt = data_processing(input_data=input_data, test_file=test_file,\n mod=mod, max_x_length=max_x_length,\n min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir)\n model_list['dp_model'] = dict()\n model_list['max_x_length'] = X_train.shape[1]\n model_list['aa'] = out_dir + \"/aa.tsv\"\n print(\"max_x_length: %s\" % (max_x_length))\n # read models from genetic search result configure file\n optimizer_name = dict()\n if models_file is not None:\n models = dict()\n gn = pd.read_csv(models_file)\n select_models = gn.sort_values('Val Accuracy', ascending=True).head(top_n)\n genome_handler = pickle.load(open(genome_handler_file, \"rb\"))\n genome_handler.input_shape = X_train.shape[1:]\n select_models = np.array(select_models.iloc[:, 0:(select_models.shape[1] - 2)])\n for i in range(0, select_models.shape[0]):\n #models[i], optimizer_name = genome_handler.decodeOneHot(select_models[i],return_optimizer=True)\n models[i], optimizer_name[i] = genome_handler.decodeOneHotPlusLSTM(select_models[i], return_optimizer=True)\n\n trained = False\n else:\n print(\"\")\n\n if not trained:\n print(\"Training ...\")\n # For each model, train the model\n for (name, model) in models.items():\n print(\"Train model:\", name)\n # perform sample specific training\n res_map = train_model(input_data=input_data, test_file=test_file, batch_size=batch_size,\n nb_epoch=nb_epoch, early_stop=early_stop, mod=mod,\n max_x_length=max_x_length, min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir, prefix=str(name), model=model,\n optimizer_name=optimizer_name[name])\n\n ## save the model to a file:\n model_file_name = \"model_\" + str(name) + \".h5\"\n model_file_path = out_dir + \"/\" + model_file_name\n res_map[\"model\"].save(model_file_path)\n\n model_list['dp_model'][name] = model_file_path\n\n del res_map\n gc.collect()\n K.clear_session()\n tf.reset_default_graph()\n else:\n print(\"The models have been trained!\")\n\n\n else:\n\n ## Transfer learning\n with open(models_file, \"r\") as read_file:\n model_list = json.load(read_file)\n\n model_folder = os.path.dirname(models_file)\n aa_file = os.path.basename(model_list['aa'])\n aa_file = model_folder + \"/\" + aa_file\n X_train, Y_train, X_test, Y_test, min_rt, max_rt = data_processing(input_data=input_data, test_file=test_file,\n mod=mod, max_x_length=model_list['max_x_length'],\n min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir,aa_file=aa_file)\n\n\n new_model_list = dict()\n new_model_list['dp_model'] = dict()\n for (name, dp_model_file) in model_list['dp_model'].items():\n print(\"\\nDeep learning model:\", name)\n # keras model evaluation: loss and accuracy\n # load model\n model_name = os.path.basename(dp_model_file)\n model_full_path = model_folder + \"/\" + model_name\n\n model = load_model(model_full_path)\n #new_model = change_model(model, X_train.shape[1:])\n new_model = model\n\n print(\"Perform transfer learning ...\")\n n_layers = len(new_model.layers)\n print(\"The number of layers: %d\" % (n_layers))\n #for layer in new_model.layers:\n # layer_name = str(layer.name)\n # if layer_name.startswith(\"dense\"):\n # break\n # else:\n # layer.trainable = False\n # print(\"layer (frozen:True): %s\" % (layer_name))\n\n new_model.compile(loss='mean_squared_error',\n ## In this case, we cannot change the learning rate.\n optimizer=model.optimizer,\n #optimizer=Adam(lr=0.0001),\n #optimizer=SGD(lr=1e-3, decay=1e-4, momentum=0.9, nesterov=True),\n metrics=['mse', 'mae'])\n my_callbacks = RegCallback(X_train, X_test, Y_train, Y_test, min_rt=min_rt, max_rt=max_rt)\n # Save model\n model_chk_path = out_dir + \"/best_model.hdf5\"\n mcp = ModelCheckpoint(model_chk_path, monitor=\"val_mean_squared_error\", save_best_only=True,\n save_weights_only=False,\n verbose=1, mode='min')\n\n ## monitor training information\n # tbCallBack = callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)\n new_model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, validation_data=(X_test, Y_test),\n callbacks=[my_callbacks, mcp])\n\n ## get the best model\n best_model = load_model(model_chk_path)\n ## save the model to a file:\n model_file_name = \"model_\" + str(name) + \".h5\"\n model_file_path = out_dir + \"/\" + model_file_name\n best_model.save(model_file_path)\n\n new_model_list['dp_model'][name] = model_file_path\n\n gc.collect()\n K.clear_session()\n tf.reset_default_graph()\n\n new_model_list['max_x_length'] = model_list['max_x_length']\n new_aa_file = out_dir + \"/\" + os.path.basename(model_list['aa'])\n copyfile(aa_file, new_aa_file)\n new_model_list['aa'] = new_aa_file\n\n ## Useful for new data prediction\n new_model_list['min_rt'] = min_rt\n new_model_list['max_rt'] = max_rt\n\n model_list = new_model_list\n\n\n # save model data\n #file_all_models = open(out_dir + \"/all_models.obj\", 'wb')\n #pickle.dump(models, file_all_models)\n #file_all_models.close()\n\n ####################################################################################################################\n print(\"Ensemble learning ...\")\n\n\n para = dict()\n para['min_rt'] = min_rt\n para['max_rt'] = max_rt\n\n ## save result\n model_json = out_dir + \"/model.json\"\n with open(model_json, 'w') as f:\n json.dump(model_list, f)\n\n ## evaluation\n if test_file is not None:\n ensemble_predict(model_json,x=X_test,y=Y_test,para=para, batch_size=batch_size,method=ensemble_method,\n out_dir=out_dir,\n prefix=\"final_eval\")\n\n ####################################################################################################################", "def gather_experiment_parameters(self):\n consts = win32com.client.constants.__dicts__[0]\n exp_params = [r for r in consts.keys() if len(r.split(\"EXP_\")) > 1]\n dm_params = [r for r in consts.keys() if len(r.split(\"DM_\")) > 1]\n self.app_param = {} \n self.appdoc_param = {} \n for p in exp_params:\n self.app_param.update({p:self.app.GetParam(consts[p])})\n\n for p in dm_params:\n #self.appdoc_param.update({p:self.app.GetParam(consts[p])}) bug? call appdoc? CP\n\n self.appdoc_param.update({p:self.app.GetParam(consts[p])})", "def main(params):\n\n train = []\n test = []\n imdir = params['dest'] + '/{0}/COCO_{0}_{1:012d}.jpg'\n\n if params['v'] == 2:\n train_annotations_file = params['dir'] + '/v2_mscoco_train2014_annotations.json'\n val_annotations_file = params['dir'] + '/v2_mscoco_val2014_annotations.json'\n train_questions_file = params['dir'] + '/v2_OpenEnded_mscoco_train2014_questions.json'\n val_questions_file = params['dir'] + '/v2_OpenEnded_mscoco_val2014_questions.json'\n test_questions_file = params['dir'] + '/v2_Questions_Test_mscoco/v2_OpenEnded_mscoco_test2015_questions.json'\n else:\n train_annotations_file = params['dir'] + '/mscoco_train2014_annotations.json'\n val_annotations_file = params['dir'] + '/mscoco_val2014_annotations.json'\n train_questions_file = params['dir'] + '/OpenEnded_mscoco_train2014_questions.json'\n val_questions_file = params['dir'] + '/OpenEnded_mscoco_val2014_questions.json'\n test_questions_file = params['dir'] + '/Questions_Test_mscoco/v2_OpenEnded_mscoco_test2015_questions.json'\n\n if params['split'] == 1:\n\n print('Loading annotations and questions...')\n train_anno = json.load(open(train_annotations_file, 'r'))\n val_anno = json.load(open(val_annotations_file, 'r'))\n\n train_ques = json.load(open(train_questions_file, 'r'))\n val_ques = json.load(open(val_questions_file, 'r'))\n\n subtype = 'train2014'\n for i in range(len(train_anno['annotations'])):\n ans = train_anno['annotations'][i]['multiple_choice_answer']\n\n answer_dict = sum_over_occurences(train_anno['annotations'][i]['answers'])\n question_id = train_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, train_anno['annotations'][i]['image_id'])\n\n question = train_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans,\n 'answers': answer_dict})\n\n subtype = 'val2014'\n for i in range(len(val_anno['annotations'])):\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n\n # A modification to count the number of occurences of each answer and then store\n # them in the json file as well\n answer_dict = sum_over_occurences(val_anno['annotations'][i]['answers'])\n\n question_id = val_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, val_anno['annotations'][i]['image_id'])\n\n question = val_ques['questions'][i]['question']\n\n test.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans,\n 'answers': answer_dict})\n else:\n print('Loading annotations and questions...')\n train_anno = json.load(open(train_annotations_file, 'r'))\n val_anno = json.load(open(val_annotations_file, 'r'))\n\n train_ques = json.load(open(train_questions_file, 'r'))\n val_ques = json.load(open(val_questions_file, 'r'))\n test_ques = json.load(open(test_questions_file, 'r'))\n\n subtype = 'train2014'\n for i in range(len(train_anno['annotations'])):\n ans = train_anno['annotations'][i]['multiple_choice_answer']\n question_id = train_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, train_anno['annotations'][i]['image_id'])\n\n question = train_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n subtype = 'val2014'\n for i in range(len(val_anno['annotations'])):\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n question_id = val_anno['annotations'][i]['question_id']\n image_path = imdir.format(subtype, val_anno['annotations'][i]['image_id'])\n\n question = val_ques['questions'][i]['question']\n\n train.append(\n {'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n subtype = 'test2015'\n for i in range(len(test_ques['questions'])):\n print(test_ques.keys())\n ans = val_anno['annotations'][i]['multiple_choice_answer']\n question_id = test_ques['questions'][i]['question_id']\n image_path = imdir.format(subtype, test_ques['questions'][i]['image_id'])\n\n question = test_ques['questions'][i]['question']\n\n test.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'ans': ans})\n\n print('Training sample %d, Testing sample %d...' % (len(train), len(test)))\n\n if v2:\n json.dump(train, open('data/vqa_raw_train.json', 'w'))\n json.dump(test, open('data/vqa_raw_test.json', 'w'))\n else:\n json.dump(train, open('data/VQAv1/vqa_raw_train.json', 'w'))\n json.dump(test, open('data/VQAv1/vqa_raw_test.json', 'w'))", "def info_from_behaviors(behaviors):\n base_abilities = []\n hp_checkpoints = set()\n hp_checkpoints.add(100)\n card_checkpoints = set()\n has_enemy_remaining_branch = False\n\n for idx, es in enumerate(behaviors):\n # Extract the passives and null them out to simplify processing\n if type(es) in PASSIVE_MAP.values():\n base_abilities.append(es)\n behaviors[idx] = None\n\n # Find candidate branch HP values\n if type(es) == ESBranchHP:\n hp_checkpoints.add(es.branch_value)\n hp_checkpoints.add(es.branch_value - 1)\n\n # Find candidate action HP values\n if skill_has_condition(es):\n cond = es.condition\n if cond and cond.hp_threshold:\n hp_checkpoints.add(cond.hp_threshold)\n hp_checkpoints.add(cond.hp_threshold - 1)\n\n # Find checks for specific cards.\n if type(es) == ESBranchCard:\n card_checkpoints.update(es.branch_value)\n\n # Find checks for specific amounts of enemies.\n if type(es) == ESBranchRemainingEnemies or type(es) == ESAttackUPRemainingEnemies:\n has_enemy_remaining_branch = True\n\n return base_abilities, hp_checkpoints, card_checkpoints, has_enemy_remaining_branch", "def evaluate(self):\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if p.useSegm is not None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print(\n \"useSegm (deprecated) is not None.\"\n \"Running {} evaluation\".format(p.iouType)\n )\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params = p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n self.ious = {\n (imgId, catId): self.computeIoU(imgId, catId)\n for imgId in p.imgIds\n for catId in catIds\n }\n # ignore any categories that is not having any attributes\n self.f1s = {\n (imgId, catId): self.computeF1(imgId, catId)\n for imgId in p.imgIds\n for catId in catIds\n if catId in self.FPParams.catsWithAttributes\n }\n\n # self.gt_attributes_ids = []\n\n # loop through images, area range, max detection number\n self.evalImgs = [\n self.evaluateImg(imgId, catId, areaRng, p.maxDets[-1])\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n # self._paramsEval = copy.deepcopy(self.params) # seems do not need it\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))", "def evaluate_obj(self, hparams):\n\n return [self.id, hparams, self.objective(hparams, self.device)]", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def build_experiments(self):\n\n # width=500, height=350, pos_x= 2.0, pos_y=0.0, pos_z= 1.4, angle=-30.0\n cameraRGB = Camera('Camera', PostProcessing='SceneFinal')\n cameraRGB.set_image_size(500, 350)\n cameraRGB.set_position(2.0, 0.0, 1.4)\n cameraRGB.set_rotation(-30.0, 0.0, 0.)\n cameraRGB.set(FOV=100)\n\n camera = Camera('CameraSem', PostProcessing='SemanticSegmentation')\n camera.set_image_size(320, 180)\n camera.set_position(2.0, 0.0, 1.4)\n camera.set_rotation(-30.0, 0.0, 0.)\n camera.set(FOV=100)\n\n if self._city_name == 'Town01':\n poses_tasks = self._poses_town01()\n vehicles_tasks = []\n pedestrians_tasks = []\n for i in range(len(poses_tasks)):\n vehicles_tasks.append(0)\n pedestrians_tasks.append(0)\n\n experiment_vector = []\n\n for weather in self.weathers:\n\n for iteration in range(len(poses_tasks)):\n poses = poses_tasks[iteration]\n vehicles = vehicles_tasks[iteration]\n pedestrians = pedestrians_tasks[iteration]\n\n conditions = CarlaSettings()\n conditions.set(\n SendNonPlayerAgentsInfo=True,\n NumberOfVehicles=vehicles,\n NumberOfPedestrians=pedestrians,\n WeatherId=weather,\n QualityLevel=1\n )\n\n conditions.set(SynchronousMode=True)\n conditions.set(DisableTwoWheeledVehicles=True)\n\n conditions.add_sensor(camera)\n conditions.add_sensor(cameraRGB)\n\n experiment = Experiment()\n experiment.set(\n Conditions=conditions,\n Poses=poses,\n Task=iteration,\n Repetitions=1\n )\n\n experiment_vector.append(experiment)\n\n return experiment_vector", "def learn(self):\n for a in self.agents:\n a.learn()", "def infer_data(self):\n ibs = self.ibs\n # The two matching aids\n self.aid_pair = (self.aid1, self.aid2)\n (aid1, aid2) = self.aid_pair\n self.match_text = ibs.get_match_text(self.aid1, self.aid2)\n # The names of the matching annotations\n self.nid1, self.nid2 = ibs.get_annot_name_rowids((aid1, aid2))\n self.name1, self.name2 = ibs.get_annot_names((aid1, aid2))\n self.other_valid_nids = []\n # The other annotations that belong to these two names\n self.gts_list = ibs.get_annot_groundtruth((aid1, aid2))\n self.gt1, self.gt2 = self.gts_list\n # A flat list of all the aids we are looking at\n self.is_split_case = self.nid1 == self.nid2\n self.all_aid_list = ut.unique_ordered([aid1, aid2] + self.gt1 + self.gt2)\n self.all_nid_list_orig = ibs.get_annot_name_rowids(self.all_aid_list)\n self.other_aids = list(set(self.all_aid_list) - {self.aid1, self.aid2})\n\n if self.is_split_case:\n # Split case\n self.nCols = max(2, len(self.other_aids))\n self.nRows = 2 if len(self.other_aids) > 0 else 1\n else:\n # Merge/New Match case\n self.nCols = max(len(self.gt1) + 1, len(self.gt2) + 1)\n self.nRows = 2\n self.nCols = min(self.max_cols, self.nCols)\n\n # Grab not just the exemplars\n\n if ut.VERBOSE or ut.is_developer():\n logger.info(\n '[matchver] __init__ nid1={!r}, nid2={!r} '.format(self.nid1, self.nid2)\n )\n logger.info('[matchver] __init__ self.gts_list=%r ' % (self.gts_list))\n\n if ut.VERBOSE or ut.is_developer():\n logger.info(\n '[matchver] __init__ nid1={!r}, nid2={!r} '.format(self.nid1, self.nid2)\n )\n logger.info('[matchver] __init__ self.gts_list=%r ' % (self.gts_list))", "def get_parameters(self): \n audio_emotions_topic = rospy.get_param(\"~audio_emotions_topic\")\n gcp_name = rospy.get_param(\"~gcp_name\")\n gcp_project = rospy.get_param(\"~gcp_project\")\n gcp_version = rospy.get_param(\"~gcp_version\")\n json_path = rospy.get_param(\"~json_path\")\n model_path = rospy.get_param(\"~model_path\")\n emotions_logfile = rospy.get_param(\"~emotions_logfile\")\n robot_ip = rospy.get_param(\"~robot_IP\")\n s2t_topic = rospy.get_param(\"~s2t_topic\")\n pred_mode = rospy.get_param(\"~pred_mode\")\n raw_audio_topic = rospy.get_param(\"~raw_audio_topic\")\n dest_num_channels = rospy.get_param(\"~dest_num_channels\")\n dest_rate = rospy.get_param(\"~dest_rate\")\n max_iter = rospy.get_param(\"~max_iter\") \n sound_path = rospy.get_param(\"~sound_path\")\n wav_topic = rospy.get_param(\"~wav_topic\")\n stats_logfile = rospy.get_param(\"~stats_logfile\")\n stats_topic = rospy.get_param(\"~stats_topic\")\n return (audio_emotions_topic, gcp_name, gcp_project, gcp_version, json_path, model_path, emotions_logfile, robot_ip, s2t_topic, pred_mode, raw_audio_topic, dest_num_channels, dest_rate, max_iter, sound_path, wav_topic, stats_logfile, stats_topic)", "def get_obs(self):\n\n # Get Distance Object to Gripper and Objectposition from Service Call. Needs to be done a second time cause we need the distance and position after the Step execution\n distance_gripper_to_object, position_xyz_object = U.get_distance_gripper_to_object()\n object_pos_x = position_xyz_object[0]\n object_pos_y = position_xyz_object[1]\n object_pos_z = position_xyz_object[2]\n\n # Get Joints Data out of Subscriber\n joint_states = self.joints_state\n elbow_joint_state = joint_states.position[0]\n shoulder_lift_joint_state = joint_states.position[1]\n shoulder_pan_joint_state = joint_states.position[2]\n wrist_1_joint_state = joint_states.position[3]\n wrist_2_joint_state = joint_states.position[4]\n wrist_3_joint_state = joint_states.position[5]\n\n for joint in joint_states.position:\n if joint > 2 * math.pi or joint < -2 * math.pi:\n print(joint_states.name)\n print(np.around(joint_states.position, decimals=3))\n sys.exit(\"Joint exceeds limit\")\n\n # Get Contact Forces out of get_contact_force Functions to be able to take an average over some iterations otherwise chances are high that not both sensors are showing contact the same time\n contact_1_force = self.get_contact_force_1()\n contact_2_force = self.get_contact_force_2()\n\n # Stack all information into Observations List\n observation = []\n for obs_name in self._list_of_observations:\n if obs_name == \"distance_gripper_to_object\":\n observation.append(distance_gripper_to_object)\n elif obs_name == \"elbow_joint_state\":\n observation.append(elbow_joint_state)\n elif obs_name == \"shoulder_lift_joint_state\":\n observation.append(shoulder_lift_joint_state)\n elif obs_name == \"shoulder_pan_joint_state\":\n observation.append(shoulder_pan_joint_state)\n elif obs_name == \"wrist_1_joint_state\":\n observation.append(wrist_1_joint_state)\n elif obs_name == \"wrist_2_joint_state\":\n observation.append(wrist_2_joint_state)\n elif obs_name == \"wrist_3_joint_state\":\n observation.append(wrist_3_joint_state)\n elif obs_name == \"contact_1_force\":\n observation.append(contact_1_force)\n elif obs_name == \"contact_2_force\":\n observation.append(contact_2_force)\n elif obs_name == \"object_pos_x\":\n observation.append(object_pos_x)\n elif obs_name == \"object_pos_y\":\n observation.append(object_pos_y)\n elif obs_name == \"object_pos_z\":\n observation.append(object_pos_z)\n elif obs_name == \"object_type\":\n observation.append(self.object_type)\n elif obs_name == \"min_distance_gripper_to_object\":\n observation.append(self.min_distace)\n else:\n raise NameError('Observation Asked does not exist==' + str(obs_name))\n\n return observation", "def set_parameters(self):\n params = {}\n if self.modelname == 'SI':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after splot\n # Ts: Time from split to present, in 2*Na generation units\n names = ['N1', 'N2', 'Ts']\n values = [1, 1, 1]\n upper_bounds = [20, 20, 10]\n lower_bounds = [0.01, 0.01, 0]\n elif self.modelname == 'IM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Ts']\n values = [1, 1, 1, 1, 1]\n upper_bounds = [20, 20, 20, 20, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0]\n elif self.modelname == 'AM':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n names = ['N1', 'N2', 'm21', 'm12', 'Tam', 'Ts']\n values = [1, 1, 1, 1, 0.1, 1]\n upper_bounds = [20, 20, 20, 20, 2, 10]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'SC':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n names = ['N1', 'N2', 'm21', 'm12', 'Ts', 'Tsc']\n values = [1, 1, 1, 1, 1, 0.1]\n upper_bounds = [20, 20, 20, 20, 10, 2]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0]\n elif self.modelname == 'IM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'AM2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Tam: Time from end of anc migration to split, in 2*Na gens\n # Ts: Time from split to present, in 2*Na generations\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Tam', 'Ts', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 0.1, 1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 2, 10, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n elif self.modelname == 'SC2M':\n # N1: Pop 1 size after split\n # N2: Pop 2 size after split\n # m21: Migration from 1 to 2 (2*Na*mm21)\n # m12: Migration from 2 to 1 (2*Na*m12)\n # mi21: Migration from 1 to 2 in \"islands\" (2*Na*mi21)\n # mi12: Migration from 1 to 2 in \"islands\" (2*Na*mi12)\n # Ts: Time from split to secondary contact, in 2*Na generations\n # Tsc: Time from secondary contact to presesnt, in 2*Na gens\n # p: Porpotion of genome evoloving in \"islands\"\n names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'Tsc', 'p']\n values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.1, 0.5]\n upper_bounds = [20, 20, 30, 30, 5, 5, 10, 2, 0.95]\n lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05]\n params['Names'] = names\n params['Values'] = values\n params['Upper'] = upper_bounds\n params['Lower'] = lower_bounds\n return params", "def init_trainers(self, args):\n self.actors_cur = [None for _ in range(self.num_agents)]\n self.critics_cur = [None for _ in range(self.num_agents)]\n self.actors_tar = [None for _ in range(self.num_agents)]\n self.critics_tar = [None for _ in range(self.num_agents)]\n self.optimizers_c = [None for _ in range(self.num_agents)]\n self.optimizers_a = [None for _ in range(self.num_agents)]\n input_size_global = sum(self.obs_shape_n) + sum(self.action_shape_n)\n\n if args.restore == True: # restore the model\n game_step = int(args.old_model_name.split('_')[-1][:-1])\n for idx in range(self.num_agents):\n self.actors_cur[idx] = torch.load(args.old_model_name+'a_c_{}.pt'.format(idx))\n self.actors_tar[idx] = torch.load(args.old_model_name+'a_t_{}.pt'.format(idx))\n self.critics_cur[idx] = torch.load(args.old_model_name+'c_c_{}.pt'.format(idx))\n self.critics_tar[idx] = torch.load(args.old_model_name+'c_t_{}.pt'.format(idx))\n self.optimizers_a[idx] = optim.Adam(self.actors_cur[idx].parameters(), args.lr_a)\n self.optimizers_c[idx] = optim.Adam(self.critics_cur[idx].parameters(), args.lr_c)\n self.var = self.var - (game_step-args.learning_start_episode*args.per_episode_max_len)*args.var_discount\n self.var = self.min_var if self.var < self.min_var else self.var\n old_data = {'game_step':game_step, 'episode_gone_old':int(game_step/args.per_episode_max_len)}\n\n # Note: if you need load old model, there should be a procedure for juding if the trainers[idx] is None\n for i in range(self.num_agents):\n self.actors_cur[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_cur[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.actors_tar[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_tar[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.optimizers_a[i] = optim.Adam(self.actors_cur[i].parameters(), args.lr_a)\n self.optimizers_c[i] = optim.Adam(self.critics_cur[i].parameters(), args.lr_c)\n\n # return the old data, no need to update the trainers\n if args.restore == True: return old_data\n\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, 1.0) # update the target par using the cur\n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, 1.0) # update the target par using the cur", "def train(opts):\n # Set number of actions\n opts.A = opts.delta_M * opts.delta_N\n # Set random seeds\n set_random_seeds(opts.seed)\n # Create actions mapping\n count_act = 0\n opts.act_to_delta = {}\n opts.delta_to_act = {}\n for i in range(-(opts.delta_N//2), opts.delta_N//2+1):\n for j in range(-(opts.delta_M//2), opts.delta_M//2+1):\n opts.act_to_delta[count_act] = (i, j)\n opts.delta_to_act[(i, j)] = count_act\n count_act += 1\n\n if opts.expert_rewards:\n from data_loader import DataLoaderExpert as DataLoader\n elif opts.expert_trajectories or opts.actorType == 'demo_sidekick' or opts.actorType == 'peek_saliency':\n from data_loader import DataLoaderExpertPolicy as DataLoader\n else:\n from data_loader import DataLoaderSimple as DataLoader\n\n if opts.dataset == 0:\n opts.num_channels = 3\n if opts.mean_subtract:\n # R, G, B means and stds\n opts.mean = [119.16, 107.68, 95.12]\n opts.std = [61.88, 61.72, 67.24]\n else:\n opts.mean = [0, 0, 0]\n opts.std = [1, 1, 1]\n elif opts.dataset == 1:\n opts.num_channels = 1\n if opts.mean_subtract:\n # R, G, B means and stds\n opts.mean = [193.0162338615919]\n opts.std = [37.716024486312811]\n else:\n opts.mean = [0]\n opts.std = [1]\n else:\n raise ValueError('Dataset %d does not exist!'%(opts.dataset))\n\n if opts.expert_trajectories:\n opts.T_sup = opts.T-1\n loader = DataLoader(opts)\n if opts.expert_trajectories:\n agent = AgentSupervised(opts)\n else:\n agent = Agent(opts)\n\n # Create tensorboard writer\n writer = SummaryWriter(log_dir=opts.save_path)\n # Set networks to train\n agent.policy.train()\n # Initiate statistics storage variables\n if opts.load_model == '': \n best_val_error = 100000\n train_history = []\n val_history = []\n epoch_start = 0\n else:\n best_val_error, train_history, val_history, epoch_start = load_module(agent, opts)\n\n # To handle job eviction and restarts\n if os.path.isfile(os.path.join(opts.save_path, 'model_latest.net')):\n print('====> Resuming training from previous checkpoint')\n # undo most of the loading done before\n loaded_model = torch.load(os.path.join(opts.save_path, 'model_latest.net'))\n opts = loaded_model['opts']\n epoch_start = loaded_model['epoch'] + 1\n\n loader = DataLoader(opts)\n if opts.expert_trajectories:\n agent = AgentSupervised(opts)\n agent.T_sup = loaded_model['T_sup']\n else:\n agent = Agent(opts) \n\n agent.policy.load_state_dict(loaded_model['state_dict'])\n train_history = loaded_model['train_history']\n val_history = loaded_model['val_history']\n #agent.optimizer.load_state_dict(loaded_model['optimizer'])\n best_val_error = loaded_model['best_val_error']\n\n # Some random selection of images to display\n rng_choices = random.sample(range(300//opts.batch_size), 2) \n # Start training\n for epoch in range(epoch_start, opts.epochs):\n # Initialize epoch specific variables\n depleted = False\n train_err = 0\n train_count = 0\n iter_count = 0\n avg_colln_loss = 0\n\n while not depleted:\n # pano - BxNxMxCx32x32\n if opts.expert_rewards:\n pano, pano_rewards, depleted = loader.next_batch('train')\n pano_maps = None\n elif opts.expert_trajectories or opts.actorType == 'demo_sidekick' or opts.actorType == 'peek_saliency':\n pano, pano_maps, depleted = loader.next_batch('train')\n pano_rewards = None\n else:\n pano, depleted = loader.next_batch('train')\n pano_rewards = None\n pano_maps = None\n # Note: This batch size is the current batch size, not the global batch size. This varies\n\n # when you reach the boundary of the dataset.\n batch_size = pano.shape[0]\n start_idx = get_starts(opts.N, opts.M, batch_size, opts.start_view)\n state = State(pano, pano_rewards, start_idx, opts)\n if opts.expert_trajectories:\n if opts.hybrid_train:\n rec_errs = agent.train_agent_hybrid(state, pano_maps, opts)\n else:\n rec_errs = agent.train_agent(state, pano_maps, opts)\n else:\n # Forward pass\n log_probs, rec_errs, rewards, entropies, decoded, values,\\\n visited_idxes, decoded_all, _ = agent.gather_trajectory(state, eval_opts=None, pano_maps=pano_maps, opts=opts)\n # Backward pass\n agent.update_policy(rewards, log_probs, rec_errs, entropies, values) \n\n # Accumulate statistics\n train_err += rec_errs[-1].data.sum()\n train_count += batch_size\n iter_count += 1\n\n train_err /= train_count\n\n # Evaluate the agent after every epoch\n val_err, _, _, decoded_images = evaluate(loader, agent, 'val', opts)\n\n # Write out statistics to tensorboard\n writer.add_scalar('data/train_error', train_err, epoch+1)\n writer.add_scalar('data/val_error', val_err, epoch+1)\n\n # Write out models and other statistics to torch format file\n train_history.append([epoch, train_err])\n val_history.append([epoch, val_err])\n if best_val_error > val_err:\n best_val_error = val_err\n save_state = {\n 'epoch': epoch,\n 'state_dict': agent.policy.state_dict(),\n 'optimizer': agent.optimizer.state_dict(),\n 'opts': opts, \n 'best_val_error': best_val_error,\n 'train_history': train_history,\n 'val_history': val_history\n }\n if opts.expert_trajectories:\n save_state['T_sup'] = agent.T_sup\n\n torch.save(save_state, os.path.join(opts.save_path, 'model_best.net'))\n\n save_state = {\n 'epoch': epoch,\n 'state_dict': agent.policy.state_dict(),\n 'optimizer': agent.optimizer.state_dict(),\n 'opts': opts, \n 'best_val_error': best_val_error,\n 'train_history': train_history,\n 'val_history': val_history\n }\n if opts.expert_trajectories:\n save_state['T_sup'] = agent.T_sup\n torch.save(save_state, os.path.join(opts.save_path, 'model_latest.net'))\n\n print('Epoch %d : Train loss: %9.6f Val loss: %9.6f'%(epoch+1, train_err, val_err))\n\n # Reduce supervision gradually\n if opts.expert_trajectories and opts.hybrid_train:\n if (epoch+1) % opts.hybrid_schedule == 0 and agent.T_sup > 0:\n agent.T_sup -= 1\n # Save the model after the first schedule is over\n if epoch+1 == opts.hybrid_schedule:\n torch.save(save_state, os.path.join(opts.save_path, 'model_after_one_schedule.net'))\n\n # Decay expert reward gradually\n if opts.expert_rewards and (epoch+1) % opts.expert_rewards_decay == 0:\n agent.reward_scale_expert /= opts.expert_rewards_decay_factor\n\n # Display three randomly selected batches of panoramas every 10 epochs\n if (epoch+1) % 10 == 0 or epoch == 0:\n for choice in rng_choices:\n for pano_count in range(decoded_images[choice].size(0)):\n x = vutils.make_grid(decoded_images[choice][pano_count], padding=5, normalize=True, scale_each=True, nrow=opts.T//2+1) \n writer.add_image('Validation batch # : %d image # : %d'%(choice, pano_count), x, 0) # Converting this to 0 to save disk space, should be epoch ideally", "def search_a_for_genetic(env:RailEnv,randomized):\r\n schedules = []\r\n occupancy_map=[[] for i in range(len(env.agents))]\r\n\r\n n_timesteps = np.array([])\r\n state_schedule =[]\r\n conv = StateConverter(env)\r\n # Compute the transition and valid action table\r\n model = convert_to_transition(env, conv)\r\n # Calculate the shortest dist from one state to another state\r\n shortest = all_pairs_shortest_paths(conv.num_states, model[0])\r\n random_order_agent = randomized\r\n print(random_order_agent)\r\n\r\n for i in random_order_agent:\r\n # Compute occupancy map\r\n occupancy_map[i] = compute_map(i, random_order_agent, n_timesteps, state_schedule, conv)\r\n\r\n # Compute schedule for each agent based on the occupancy map\r\n each_schedule = a_star_search(SearchEnv(env,conv,model,shortest,i).get_root_node(),occupancy_map[i])\r\n #print(each_schedule)\r\n schedules.append(each_schedule[0])\r\n state_schedule.append(each_schedule[1])\r\n n_timesteps = np.append(n_timesteps, [len(each_schedule[1])])\r\n\r\n # Combine separate actions into a list\r\n actions = combine(schedules,random_order_agent,int(np.max(n_timesteps)))\r\n\r\n return actions", "def extract_data_in_pylot_format(actor_list):\n # Note: the output will include the ego vehicle as well.\n vec_actors = actor_list.filter('vehicle.*')\n vehicles = [\n Obstacle.from_carla_actor(vec_actor) for vec_actor in vec_actors\n ]\n\n person_actors = actor_list.filter('walker.pedestrian.*')\n people = [\n Obstacle.from_carla_actor(ped_actor) for ped_actor in person_actors\n ]\n\n tl_actors = actor_list.filter('traffic.traffic_light*')\n traffic_lights = [\n TrafficLight.from_carla_actor(tl_actor) for tl_actor in tl_actors\n ]\n\n speed_limit_actors = actor_list.filter('traffic.speed_limit*')\n speed_limits = [\n SpeedLimitSign.from_carla_actor(ts_actor)\n for ts_actor in speed_limit_actors\n ]\n\n traffic_stop_actors = actor_list.filter('traffic.stop')\n traffic_stops = [\n StopSign.from_carla_actor(ts_actor) for ts_actor in traffic_stop_actors\n ]\n\n return (vehicles, people, traffic_lights, speed_limits, traffic_stops)", "def agent(obs):\n # dictionary for Memory Patterns data\n obs[\"memory_patterns\"] = {}\n # We always control left team (observations and actions\n # are mirrored appropriately by the environment).\n controlled_player_pos = obs[\"left_team\"][obs[\"active\"]]\n # get action of appropriate pattern in agent's memory\n action = get_action_of_agent(obs, controlled_player_pos[0], controlled_player_pos[1])\n # return action\n return action", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def get_parameters(cause_id, age_start, age_end, model_version_type_id,\n hybridizer=False,\n start_date=datetime(2019, 1, 11), end_date=datetime.now(),\n jobs=None):\n if model_version_type_id == 3 and not hybridizer:\n raise ValueError(\"Cannot pull non-hybridizer model version type IDs when you've asked for hybridizer jobs.\")\n if hybridizer:\n logger.info(\"Getting parameters for cause_id {c}, age_start {s}, age_end {e}, and model_version_type_id {m}\".format(\n c=cause_id, s=age_start, e=age_end, m=model_version_type_id\n ))\n model_versions = get_model_versions(cause_id, age_start, age_end, model_version_type_id)\n if jobs is None:\n jobs = get_jobs(start_date=start_date, end_date=end_date, hybridizer=hybridizer)\n jobs = jobs.loc[jobs.model_version_id.isin(model_versions)].copy()\n jobs['ran_covariate_selection'] = jobs['model_version_id'].apply(lambda x:\n check_covariate_selection(x, conn_def='codem'))\n logger.info(f\"{jobs['ran_covariate_selection'].mean()*100} % of jobs ran covariate selection.\")\n jobs.loc[~jobs.ran_covariate_selection, 'runtime_min'] = \\\n jobs.loc[~jobs.ran_covariate_selection, 'runtime_min'] + 60*24*2\n jobs = jobs.loc[(jobs.exit_status == 0) & (jobs.failed == 0)]\n if jobs.empty:\n jobs = pd.DataFrame.from_dict(DEFAULT_PARAMS, orient='columns')\n warnings.warn(\"QPID did not capture any run information for these model versions {}.\".format(\n ', '.join([str(x) for x in model_versions])\n ), RuntimeWarning)\n bad_jobs = jobs.loc[jobs.ram_gb == -1]\n if not bad_jobs.empty:\n warnings.warn(\"Cannot have -1 for ram GB. Defaulting to have the ram GB requested instead.\", RuntimeWarning)\n for index, row in bad_jobs.iterrows():\n job_number = row['job_number']\n job_name = row['job_name']\n start_time = row['start_time']\n file_name = f\"FILEPATH\"\n if not os.path.exists(file_name):\n f = open(file_name, 'w')\n f.close()\n jobs.loc[jobs.ram_gb < 0, 'ram_gb'] = jobs.loc[jobs.ram_gb < 0]['ram_gb_requested']\n parameters = jobs[['cores_requested', 'ram_gb', 'ram_gb_requested', 'runtime_min']].mean().to_dict()\n else:\n parameters = {\n 'cores_requested': 3,\n 'ram_gb': 1,\n 'ram_gb_requested': 1,\n 'runtime_min': int(60*24*5)\n }\n logger.info(f\"parameters: {parameters}\")\n return parameters", "def params(self) -> ObjectiveParams:\n pass", "def _set_cv_params(self):\n _base_estimator = self.init_params['base_estimator'] \n ada = {'n_estimators': randint(10, 1000),\n 'learning_rate': _uniform(0.01, 0.1)} \n \n if isinstance(_base_estimator, DecisionTreeClassifier().__class__):\n base = {\n 'base_estimator__criterion': ('gini', 'entropy'),\n 'base_estimator__max_depth': randint(1, 8), \n 'base_estimator__min_samples_leaf': randint(2, 20),\n 'base_estimator__max_features': (0.1, 'auto', 'log2'),\n 'base_estimator__class_weight': ('balanced', None) } \n \n elif isinstance(_base_estimator, LogisticRegression().__class__): \n base = {\n 'base_estimator__C': uniform(0, 1000),\n 'base_estimator__fit_intercept': (True, False),\n 'base_estimator__penalty': ('l1', 'l2') } \n else:\n base = {} \n ada.update(base) \n \n return [ada]", "def __init__(self, env, args):\n super(Agent_PG,self).__init__(env)\n\n ##################\n # YOUR CODE HERE #\n ##################\n self.print_every = args.print_every\n self.n_episode = args.episode\n self.gamma = args.gamma\n self.episode_len = args.episode_len\n self.update_every = args.update_every\n self.var_reduce = args.var_reduce\n self.gae = args.gae\n self.step_upd = args.step_upd\n self.max_step = args.step_train\n self.clip = args.clip\n\n if not self.gae:\n if args.cnn:\n self.model = Model2()\n else:\n self.model = Model()\n else:\n if args.cnn:\n self.model = ModelGAE2()\n else:\n self.model = ModelGAE()\n\n print(self.model)\n\n if args.cnn:\n self.opt = optim.RMSprop(self.model.parameters(), lr=args.learning_rate, weight_decay=0.99)\n else:\n self.opt = optim.Adam(self.model.parameters(), lr=args.learning_rate)\n\n self.state = np.zeros((1, 80, 80))\n self.log_probs = []\n self.rewards = []\n if self.gae:\n self.values = []\n self.entropies = []\n\n self.model_fn = args.model\n if self.model_fn == '':\n self.model_fn = 'agent_pg.pt'\n if args.test_pg:\n self.model_fn = 'pg.baseline.pt'\n\n if args.test_pg:\n #you can load your model here\n print('loading trained model :%s.' % self.model_fn)\n state_dict = torch.load(self.model_fn, map_location=lambda storage, location: storage)\n self.model.load_state_dict(state_dict)\n if USE_CUDA:\n self.model.cuda()", "def get_params(self):", "def omission_params(n, prop, model_params, ukf_params):\n \n model_params[\"pop_total\"] = n\n model_params[\"station\"] = None\n \n base_model = Model(**model_params)\n\n ukf_params[\"prop\"] = prop\n ukf_params[\"sample_size\"]= floor(n * prop)\n \n ukf_params[\"index\"], ukf_params[\"index2\"] = omission_index(n, ukf_params[\"sample_size\"])\n \n ukf_params[\"p\"] = np.eye(2 * n) #inital guess at state covariance\n ukf_params[\"q\"] = np.eye(2 * n)\n ukf_params[\"r\"] = np.eye(2 * ukf_params[\"sample_size\"])#sensor noise\n \n ukf_params[\"fx\"] = fx\n ukf_params[\"fx_kwargs\"] = {\"base_model\" : base_model} \n ukf_params[\"fx_kwargs_update\"] = None\n \n ukf_params[\"hx\"] = hx1\n ukf_params[\"hx_kwargs\"] = {\"index2\" : ukf_params[\"index2\"], \n \"n\" : n,\n \"index\" : ukf_params[\"index\"],}\n \n ukf_params[\"obs_key_func\"] = obs_key_func \n ukf_params[\"file_name\"] = ex1_pickle_name(n, prop)\n \n ukf_params[\"light\"] = True\n ukf_params[\"record\"] = True\n return model_params, ukf_params, base_model", "def test(args):\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # dim of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # dim of the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n\n agent = MADDPG(state_size, action_size, actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3)\n\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n test_scores = []\n for i_episode in tqdm(range(1, 1+args.test_n_run)):\n # initialize the scores\n scores = np.zeros(num_agents)\n env_info = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_info.vector_observations # get the current states\n dones = [False]*num_agents\n while not np.any(dones):\n actions = agent.act(states) # select actions\n # send the actions to the environment\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations # get the next states\n rewards = env_info.rewards # get the rewards\n dones = env_info.local_done # see if episode has finished\n scores += rewards # update the scores\n # roll over the states to next time step\n states = next_states\n\n test_scores.append(np.max(scores))\n\n avg_score = sum(test_scores)/len(test_scores)\n print(\"Test Score: {}\".format(avg_score))\n\n return avg_score", "def load_processed_data(animal, name, arguments):\r\n\tpath = os.path.join(paths.path2Output, animal, str(arguments['Function']), \r\n\t\t\t\t\t\tstr(arguments['Init']), str(arguments['Rank']), name)\r\n\t\r\n\tmeta_df = pd.read_csv(os.path.join(path,'meta_df.csv'))\r\n\troi_tensor = np.load(os.path.join(path,'roi_tensor.npy'))\r\n\tacti = np.load(os.path.join(path,'acti.npy'))\r\n\tnorm_acti = np.load(os.path.join(path,'norm_acti.npy'))\r\n\tsmoothed_acti = np.load(os.path.join(path,'smoothed_acti.npy'))\r\n\r\n\r\n\treturn meta_df, roi_tensor, acti, norm_acti, smoothed_acti", "def ALLEN_st_cells_1_movies(self):\n exp_dict = self.template_dataset()\n exp_dict = self.add_globals(exp_dict)\n exp_dict['experiment_name'] = 'ALLEN_st_cells_1_movies'\n exp_dict['only_process_n'] = None # MICHELE\n exp_dict['randomize_selection'] = True\n exp_dict['reference_image_key'] = {'proc_stimuli': 'image'}\n exp_dict['reference_label_key'] = {'neural_trace_trimmed': 'label'}\n exp_dict['rf_query'] = [{\n 'rf_coordinate_range': { # Get all cells\n 'x_min': 20,\n 'x_max': 30,\n 'y_min': 50,\n 'y_max': 60,\n },\n 'cre_line': 'Cux2',\n 'structure': 'VISp'}]\n exp_dict['cross_ref'] = 'rf_coordinate_range_and_stimuli'\n exp_dict['store_means'] = [\n 'image',\n 'label'\n ]\n exp_dict['deconv_method'] = 'OASIS'\n exp_dict['cv_split'] = {\n 'cv_split_single_stim': {\n 'target': 0,\n 'split': 0.95\n }\n }\n # exp_dict['cv_split'] = {\n # 'split_on_stim': 'natural_movie_two' # Specify train set\n # }\n exp_dict['neural_delay'] = [8, 13] # MS delay * 30fps for neural data\n exp_dict['slice_frames'] = 2 # 4 MICHELE\n exp_dict['st_conv'] = len(\n range(\n exp_dict['neural_delay'][0],\n exp_dict['neural_delay'][1]))\n exp_dict['grid_query'] = False # False = evaluate all neurons at once\n exp_dict['cc_repo_vars'] = {\n 'output_size': [1, 1],\n 'model_im_size': [152, 304, 1],\n 'loss_function': 'l2',\n 'score_metric': 'pearson',\n 'preprocess': 'resize'\n }\n exp_dict['weight_sharing'] = True\n return exp_dict", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def ALLEN_ss_cells_1_movies(self):\n exp_dict = self.template_dataset()\n exp_dict = self.add_globals(exp_dict)\n exp_dict['experiment_name'] = 'ALLEN_ss_cells_1_movies'\n exp_dict['only_process_n'] = 1\n exp_dict['randomize_selection'] = True\n exp_dict['reference_image_key'] = {'proc_stimuli': 'image'}\n exp_dict['reference_label_key'] = {'neural_trace_trimmed': 'label'}\n exp_dict['rf_query'] = [{\n 'rf_coordinate_range': { # Get all cells\n 'x_min': 20,\n 'x_max': 30,\n 'y_min': 50,\n 'y_max': 60,\n },\n 'cre_line': 'Cux2',\n 'structure': 'VISp'}]\n exp_dict['cross_ref'] = 'rf_coordinate_range_and_stimuli'\n exp_dict['store_means'] = [\n 'image',\n 'label'\n ]\n # exp_dict['deconv_method'] = 'c2s'\n exp_dict['cc_repo_vars'] = {\n 'output_size': [1, 1], # target variable -- neural activity,\n 'model_im_size': [152, 304, 1], # [152, 304, 1],\n 'loss_function': 'pearson',\n 'score_metric': 'pearson',\n 'preprocess': 'resize'\n }\n exp_dict['cv_split'] = {\n 'cv_split_single_stim': {\n 'target': 0,\n 'split': 0.9\n }\n }\n # exp_dict['cv_split'] = {\n # 'split_on_stim': 'natural_movie_two' # Specify train set\n # }\n return exp_dict", "def run_actor(self, local_others, local_v, goals, epsilon, sess):\n # convert to batch\n obs_others = np.array(local_others)\n v_obs = np.array(local_v)\n\n feed = {self.obs_others:obs_others, self.v_obs:v_obs,\n self.v_goal:goals}\n actions_argmax = sess.run(self.argmax_Q, feed_dict=feed)\n\n actions = np.zeros(self.n_agents, dtype=int)\n for idx in range(self.n_agents):\n if np.random.rand(1) < epsilon:\n actions[idx] = np.random.randint(0, self.l_action)\n else:\n actions[idx] = actions_argmax[idx]\n\n return actions", "def def_actif_param(self):\n\n self.param_is_actif={}\n try:\n ff=open('Cards/mapping_card.dat')\n except:\n for i in range(1,self.nb_card+1):\n self.param_is_actif[i]=1 #if no file defined all card are supose to be used\n self.actif_param=range(1,self.nb_card+1)\n return\n\n self.actif_param=[]\n for line in ff:\n split=line.split()\n nb=int(split[0])\n actif=int(split[-1])\n self.param_is_actif[nb]=actif\n if actif:\n self.actif_param.append(nb)\n\n if len(self.param_is_actif)!=self.nb_card:\n print 'WARNING: wrong mapping file'", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.70,0.70]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"(1.+@0)\",Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(1.-@0)\",Afb)')", "def prepare_experiment(assumptions):\n print(\"\\nGenerate species parameters\")\n np.random.seed(assumptions['seed']) \n params = MakeParams(assumptions) \n if assumptions[\"selected_function\"] == \"f5_invader_suppression\":\n print(\"\\nDraw invader feature\")\n params = create_invader(params, assumptions)\n print(params[\"c\"])\n \n print(\"\\nDraw per-capita function and cost\")\n f1_species_smooth, f1_species_rugged, f2_species_smooth, f2_species_rugged = draw_species_function(assumptions)\n params.update({\"f1_species_smooth\": f1_species_smooth, \"f1_species_rugged\": f1_species_rugged, \"f2_species_smooth\": f2_species_smooth, \"f2_species_rugged\": f2_species_rugged})\n gi = draw_species_cost(f1_species_smooth, assumptions)\n params.update({\"g\": gi})\n \n print(\"\\nConstruct plate\")\n np.random.seed(assumptions['seed']) \n plate = make_plate(assumptions,params)\n \n print(\"\\nAdd community function to plate\")\n plate = add_community_function(plate, assumptions, params)\n \n if not pd.isnull(assumptions[\"overwrite_plate\"]) :\n print(\"\\nUpdating the initial plate composition by overwrite_plate\")\n plate = overwrite_plate(plate, assumptions)\n \n print(\"\\nPrepare Protocol\")\n #Extract Protocol from protocol database\n algorithms = make_algorithms(assumptions)\n params_algorithm = algorithms[algorithms['algorithm_name'] == assumptions['protocol']]\n \n #Params_simulation by default contains all assumptions not stored in params.\n params_simulation = dict((k, assumptions[k]) for k in assumptions.keys() if k not in params.keys())\n \n return params, params_simulation , params_algorithm, plate", "def measureAll(authors_texts,sectorialized_agents):\n authors_texts=P.text.aux.textFromAuthors(authors_texts,self.topm_dict[\"sectorialized_agents\"])\n authors_measures={}\n # análise de cada mensagem e de cada autor\n for author in authors_texts:\n authors_measures[author]={}\n texts=authors_texts[author]\n authors_measures[author][\"raw_strings\"]=P.text.raw.analyseAll(texts)\n authors_measures[author][\"pos\"]= P.text.pos.analyseAll(authors_analysis[author][\"raw_analysis\"])\n authors_measures[author][ \"wordnet\" ]=P.text.wordnet.analyseAll(authors_analysis[author][\"pos_analysis\"])\n authors_measures[author][\"tfIdf\"]=P.text.tfIdf.analyseAll(texts) # tfIdf de cada texto e do autor, numeric: mean e std das distancias\n # análise de cada setor e da estrutura toda\n# sectors_texts=P.text.aux.textFromSectors(authors_text,sectorialized_agents)\n sectors_measures={}\n for sector in sectorialized_agents:\n sectors_measures[sector][\"raw_strings\"]=P.text.raw.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"pos\"]= P.text.pos.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n sectors_measures[sector][\"wordnet\"]= P.text.wordnet.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n # tfIdf de cada texto e de cada autor, numeric: mean e std das distancias por texto e por autor, e media e etd dos autores\n sectors_measures[sector][\"tfIdf\"]= P.text.tfIdf.sectorsAnalyseAll(authors_analysis,sectorialized_agents[sector])\n\n# texts=[sectors_texts[i] for i in (\"peripherals\",\"intermediaries\",\"hubs\")]\n# sectors_analysis[\"raw_strings\"]=P.text.raw.analyseAll(texts)\n# sectors_analysis[\"pos\"]= P.text.pos.analyseAll(sectors_analysis[\"raw_analysis\"])\n# sectors_analysis[ \"wordnet\" ]=P.text.wordnet.analyseAll(sectors_analysis[\"pos_analysis\"])\n# sectors_analysis[\"tfIdf\"]=P.text.tfIdf.tfIdf(texts)\n\n overall_measures[\"raw_strings\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"pos\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n overall_measures[\"wordnet\"]=P.text.raw.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n # tfIdf measurespor texto, autor e setor, numeric: media e desvio das distancias por cada grupo, media e desvio dos setores e dos autores\n overall_measures[\"tfIdf\"]=P.text.tfIdf.systemAnalysis(sectors_analysis) # medias de toda a rede por mensagem, por autor e por setor\n\n del authors_texts,sectorialized_agents,author, sector\n return locals()", "def param_info():\n\n\tgizmo_names = syn.getGizmoNames()\n\n\tfor gizmo in gizmo_names:\n\t\tparams = syn.getParameterNames(gizmo)\n\t#doesnt get all parameters from gizmos i.e. WaveFreq\n\n\t# get all info on the 'WaveFreq' parameter\n\tGIZMO = 'aStim2'\n\tPARAMETER = 'WaveFreq'\n\n\t# info = syn.getParameterInfo(GIZMO, PARAMETER)\n\t#\n\t# # get the array size (should be 100)\n\t# sz = syn.getParameterSize(GIZMO, PARAMETER)\n\t#\n\t# # write values 1 to 50 in second half of buffer\n\t# result = syn.setParameterValues(GIZMO, PARAMETER, np.arange(1, 51), 50)\n\t#\n\t# # read all values from buffer\n\t# syn.getParameterValues(GIZMO, PARAMETER, sz)\n\t#\n\t# # get all info on the 'Go' parameter\n\t# PARAMETER = 'Go'\n\t# info = syn.getParameterInfo(GIZMO, PARAMETER)\n\t#\n\t# # flip the switch\n\t# result = syn.setParameterValue(GIZMO, PARAMETER, 1)\n\t#\n\t# # check the value\n\tfreq = syn.getParameterValue(GIZMO, PARAMETER)\n\tprint('value =', freq)\n\tfreq = [freq]\n\n\t# also verify visually that the switch slipped in the run\n\t# time interface. This state change will be logged just\n\t# like any other variable change and saved with the runtime\n\t# state.\n\n\tnumTrials = 5 #total number of trials across stimuli\n\tISI = [2.0, 3.0, 4.0, 5.0] # ISI in seconds\n\n\t# flash parameters\n\tflash_dur = [.001] # flash durs in seconds (100 ms, 200 ms)\n\tluminance = [[1, 1, 1], [.86, .86, .86], [0, .1, 1]] # white , grayish, purple just for testing\n\n\t# auditory parameters\n\tduration = [.005] # in seconds; pulseDur in TDT\n\tsound_levels = [20.0, 40.0, 60.0, 80.0] # dB; waveAmp in TDT\n\n\t# Auditory on (T/F? if T then A+V, if F then Visual only)\n\tstims = {0: \"auditory_only\",\n\t\t\t 1: \"visual_only\",\n\t\t\t 2: \"A+V\"\n\t\t\t }\n\n\texper = Experiment(numTrials=numTrials, ISI=ISI, flash_dur=flash_dur, luminance=luminance, wave_freq=freq,\n\t\t\t\t\t pulse_dur=duration, wave_amp=sound_levels, stimulus=stims)\n\texper.run_experiment()", "def agent_init(self, agent_info):\n\n # First, we get the relevant information from agent_info \n # NOTE: we use np.random.RandomState(seed) to set the two different RNGs\n # for the planner and the rest of the code\n try:\n self.num_states = agent_info[\"num_states\"]\n self.num_actions = agent_info[\"num_actions\"]\n except:\n print(\"You need to pass both 'num_states' and 'num_actions' in agent_info to initialize the action-value table\")\n self.gamma = agent_info.get(\"discount\", 0.95)\n self.step_size = agent_info.get(\"step_size\", 0.1)\n self.epsilon = agent_info.get(\"epsilon\", 0.1)\n self.planning_steps = agent_info.get(\"planning_steps\", 10)\n\n self.rand_generator = np.random.RandomState(agent_info.get('random_seed', 42))\n self.planning_rand_generator = np.random.RandomState(agent_info.get('planning_random_seed', 42))\n\n # Next, we initialize the attributes required by the agent, e.g., q_values, model, etc.\n # A simple way to implement the model is to have a dictionary of dictionaries, \n # mapping each state to a dictionary which maps actions to (reward, next state) tuples.\n self.q_values = np.zeros((self.num_states, self.num_actions))\n self.actions = list(range(self.num_actions))\n self.past_action = -1\n self.past_state = -1\n self.model = {} # model is a dictionary of dictionaries, which maps states to actions to \n # (reward, next_state) tuples", "def get_subset_by_areas(sess_no, raw_path, \n align_on, from_time, to_time, \n target_areas,\n only_correct_trials = True, renorm = False, elec_type = 'grid' ):\n tinfo_path = raw_path + 'trial_info.mat'\n rinfo_path = raw_path + 'recording_info.mat'\n \n # get all data\n data_filtered = get_preprocessed_from_raw(sess_no, raw_path, \n align_on, from_time, to_time)\n \n # don't keep missing data // keep only_correct_trials if True\n \n responses = io.get_responses(tinfo_path)\n if only_correct_trials == False:\n ind_to_keep = (responses == responses).flatten()\n else:\n ind_to_keep = (responses == 1).flatten()\n \n #data1 =data1[ind_to_keep, :, :] # in the same time\n #data2 =data2[ind_to_keep, :, :]\n \n data_filtered = data_filtered[ind_to_keep,:,:]\n\n \n # select electrode and cut the additionnal time\n \n area_names = io.get_area_names(rinfo_path)\n \n idx = []\n for count, area in enumerate(area_names):\n if area in target_areas:\n idx.append(count) \n \n data_filtered = data_filtered[:, idx, :] \n \n\n ## change type \n data_filtered = data_filtered.astype(np.float32)\n \n if elec_type == 'single':\n data_filtered = data_filtered.reshape(data_filtered.shape[0]*data_filtered.shape[1], data_filtered.shape[2])\n data_filtered = np.expand_dims(data_filtered, axis=1)\n \n\n \n elif elec_type == 'average':\n data_filtered = np.mean(data_filtered, axis=1, keepdims=True)\n\n \n #elif elec_type == 'grid':\n #data_filtered = data_filtered\n\n elif elec_type != 'grid':\n raise ValueError('Type \\'' + elec_type + '\\' not supported. Please ' + \n 'choose one of \\'single\\'|\\'grid\\'|\\'average\\'.')\n \n # renorm data : mean = 0 and var = 1\n if renorm == True :\n data_filtered = pp.renorm(data_filtered)\n \n ### variable for shape\n #n_chans1 = len(idx)\n \n #samples_per_trial = data_filtered.shape[2] \n \n return( data_filtered )", "def get_estimates(model):\n from itertools import product\n from pymc3 import summary\n\n subjects = model.data['subject'].unique().astype(np.int)\n parameters = ['v', 'gamma', 's', 'tau', 't0']\n estimates = pd.DataFrame()\n MAP = extract_modes(model.trace)\n combinations = list(product(*[model.design['factor_conditions'][factor]\n for factor in model.design['factors']]))\n subject_template = pd.DataFrame({factor: [combination[f]\n for combination in combinations]\n for f, factor\n in enumerate(model.design['factors'])},\n index=np.zeros(1))\n if model.type == 'hierarchical':\n summary_table = summary(model.trace[0])\n elif model.type == 'individual':\n summary_tables = [summary(trace)\n for trace in model.trace]\n else:\n raise ValueError(\n 'Model type not understood. Make sure \"make_model\" has already been called.')\n for subject in subjects:\n subject_estimates = subject_template.copy()\n subject_estimates.loc[:, 'subject'] = np.array([subject])\n for parameter in parameters:\n subject_template[parameter] = np.nan\n subject_template[parameter + '_hpd_2.5'] = np.nan\n subject_template[parameter + '_hpd_97.5'] = np.nan\n subject_template[parameter] = np.nan\n\n dependence = model.design[parameter]['dependence']\n if dependence is None:\n # Parameter is fixed\n if model.type == 'hierarchical':\n # add participant paramaters\n subject_estimates[parameter] = MAP[0][parameter][subject][0]\n subject_estimates[parameter + '_hpd_2.5'] = summary_table.loc[parameter +\n '__{}_0'.format(subject), 'hpd_2.5']\n subject_estimates[parameter + '_hpd_97.5'] = summary_table.loc[parameter +\n '__{}_0'.format(subject), 'hpd_97.5']\n # add population parameters\n if (parameter + '_mu') in summary_table.index:\n subject_estimates[parameter +\n '_mu'] = summary_table.loc[parameter + '_mu', 'mean']\n subject_estimates[parameter +\n '_mu_hpd_2.5'] = summary_table.loc[parameter + '_mu', 'hpd_2.5']\n subject_estimates[parameter +\n '_mu_hpd_97.5'] = summary_table.loc[parameter + '_mu', 'hpd_97.5']\n\n elif model.type == 'individual':\n # add participant paramaters\n subject_estimates[parameter] = MAP[subject][parameter][0][0]\n subject_estimates[parameter +\n '_hpd_2.5'] = summary_tables[subject].loc[parameter + '__0_0', 'hpd_2.5']\n subject_estimates[parameter +\n '_hpd_97.5'] = summary_tables[subject].loc[parameter + '__0_0', 'hpd_97.5']\n else:\n # Parameter has dependence\n conditions = model.design[parameter]['conditions']\n for condition in conditions:\n if condition not in model.data.loc[model.data['subject'] == subject, dependence].values:\n subject_estimates = subject_estimates.drop(subject_estimates[subject_estimates[dependence] == condition].index,\n axis=0)\n else:\n # Check if subject is in condition\n if subject in model.design[parameter][condition]['subjects']:\n parameter_condition = parameter + '_' + condition\n if model.type == 'hierarchical':\n index = model.design[parameter][condition]['subject_mapping'][subject]\n # extract participant parameters\n estimate = MAP[parameter_condition][index]\n hpd25 = summary_table.loc[parameter_condition +\n '__{}'.format(index), 'hpd_2.5']\n hpd975 = summary_table.loc[parameter_condition +\n '__{}'.format(index), 'hpd_97.5']\n # extract population parameters\n if (parameter_condition + '_mu') in summary_table.index:\n pop_estimate = summary_table.loc[parameter_condition + '_mu', 'mean']\n pop_hpd25 = summary_table.loc[parameter_condition +\n '_mu', 'hpd_2.5']\n pop_hpd975 = summary_table.loc[parameter_condition +\n '_mu', 'hpd_97.5']\n\n elif model.type == 'individual':\n if model.design[parameter]['type'] == 'between':\n estimate = MAP[subject][parameter]\n hpd25 = summary_tables[subject].loc[parameter +\n '__0_0', 'hpd_2.5']\n hpd975 = summary_tables[subject].loc[parameter +\n '__0_0', 'hpd_97.5']\n elif model.design[parameter]['type'] == 'within':\n estimate = MAP[subject][parameter_condition]\n hpd25 = summary_tables[subject].loc[parameter_condition +\n '__0_0', 'hpd_2.5']\n hpd975 = summary_tables[subject].loc[parameter_condition +\n '__0_0', 'hpd_97.5']\n else:\n raise ValueError('Parameter dependence not understood for {}: {} ({}).'.format(\n parameter, dependence, condition))\n else:\n raise ValueError(\n 'Model type not understood. Make sure \"make_model\" has already been called.')\n # add participant parameters\n subject_estimates.loc[subject_estimates[dependence]\n == condition, parameter] = estimate\n subject_estimates.loc[subject_estimates[dependence]\n == condition, parameter + '_hpd_2.5'] = hpd25\n subject_estimates.loc[subject_estimates[dependence]\n == condition, parameter + '_hpd_97.5'] = hpd975\n # add population parameters\n if model.type == 'hierarchical':\n subject_estimates.loc[subject_estimates[dependence]\n == condition, parameter + '_mu'] = pop_estimate\n subject_estimates.loc[subject_estimates[dependence]\n == condition, parameter + '_mu_hpd_2.5'] = pop_hpd25\n subject_estimates.loc[subject_estimates[dependence] ==\n condition, parameter + '_mu_hpd_97.5'] = pop_hpd975\n\n estimates = pd.concat([estimates, subject_estimates])\n\n estimates.reset_index(inplace=True, drop=True)\n return estimates", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def train(args):\n # prepare environment\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # size of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # examine the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n print('There are {} agents. Each observes a state with length: {}'.format(\n states.shape[0], state_size))\n print('The state for the first agent looks like:', states[0])\n\n # Crate instance of MADDPG Class, mainly possible to control the model dimensions, learnrates and batch sizes\n agent = MADDPG(state_size,\n action_size,\n lr_actor=args.lr_actor,\n lr_critic=args.lr_critic,\n lr_decay=args.lr_decay,\n replay_buff_size=args.replay_buff_size,\n gamma=args.gamma,\n batch_size=args.batch_size,\n random_seed=args.random_seed,\n soft_update_tau=args.soft_update_tau,\n actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3\n\n )\n\n total_rewards = []\n avg_scores = []\n max_avg_score = -1\n max_score = -1\n threshold_init = 20\n noise_t = args.epsilon\n noise_decay = args.epsilon_decay\n latest_avg_score = -1\n # for early-stopping training if consistently worsen for # episodes\n worsen_tolerance = threshold_init\n for i_episode in range(1, 1+args.num_episodes):\n\n env_inst = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_inst.vector_observations # get the current state\n # initialize score array\n scores = np.zeros(num_agents)\n dones = [False]*num_agents\n while not np.any(dones):\n # select an action\n actions = agent.act(states, noise_t)\n # send the action to the environment\n env_inst = env.step(actions)[brain_name]\n next_states = env_inst.vector_observations # get the next state\n rewards = env_inst.rewards # get the reward\n dones = env_inst.local_done # see if episode has finished\n agent.update(states, actions, rewards, next_states, dones)\n\n noise_t *= noise_decay\n scores += rewards # update scores\n states = next_states\n\n episode_score = np.max(scores)\n total_rewards.append(episode_score)\n print(\"\\rEpisodic {} Score: {:.4f}\\t Avg Score: {:.4f}\".format(\n i_episode, episode_score, latest_avg_score), end=' ')\n\n if max_score <= episode_score:\n max_score = episode_score\n # save best model so far\n agent.save(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n # record avg score for the latest 100 steps\n if len(total_rewards) >= args.test_n_run:\n latest_avg_score = sum(\n total_rewards[(len(total_rewards)-args.test_n_run):]) / args.test_n_run\n avg_scores.append(latest_avg_score)\n\n if max_avg_score <= latest_avg_score: # record better results\n worsen_tolerance = threshold_init # re-count tolerance\n max_avg_score = latest_avg_score\n else:\n if max_avg_score > 0.5:\n worsen_tolerance -= 1 # count worsening counts\n print(\"Loaded from last best model.\")\n # continue from last best-model\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n if worsen_tolerance <= 0: # earliy stop training\n print(\"Early Stop Training.\")\n break\n del agent\n return total_rewards", "def main():\n argparser = ArgumentParser()\n argparser.add_argument('--case', type=int, required=True,\n help='case number to create observations e.g. 1 if 1.json')\n args = argparser.parse_args()\n\n case = args.case\n observation_file = os.path.join(OBSERVATION_DIR, '{}.json'.format(case))\n with open(observation_file, 'r') as f:\n observation_config = json.load(f)\n\n nodes = observation_config['nodes']\n edges = observation_config['edges']\n observations = observation_config['observations']\n\n # solution part\n parameters = _get_learned_parameters(nodes=nodes, edges=edges, observations=observations)\n # end solution part\n\n # json only recognises floats, not np.float, so we need to cast the values into floats.\n for node, node_params in parameters.items():\n for param, val in node_params.items():\n node_params[param] = float(val)\n parameters[node] = node_params\n\n if not os.path.exists(PREDICTION_DIR):\n os.makedirs(PREDICTION_DIR)\n prediction_file = os.path.join(PREDICTION_DIR, '{}.json'.format(case))\n\n with open(prediction_file, 'w') as f:\n json.dump(parameters, f, indent=1)\n print('INFO: Results for test case {} are stored in {}'.format(case, prediction_file))", "def run(self):\n data_provider = DataProvider(self.config)\n hex_attr_df = data_provider.read_hex_bin_attributes()\n hex_distance_df = data_provider.read_hex_bin_distances()\n city_states = data_provider.read_city_states(self.city_states_filename)\n neighborhood = data_provider.read_neighborhood_data()\n popular_bins = data_provider.read_popular_hex_bins()\n num_episodes = self.config['RL_parameters']['num_episodes']\n ind_episodes = self.config['RL_parameters']['ind_episodes']\n exp_decay_multiplier = self.config['RL_parameters']['exp_decay_multiplier']\n\n q_ind = None\n r_table = None\n xi_matrix = None\n\n best_episode = None\n best_model = {}\n\n progress_bar = tqdm(xrange(num_episodes))\n for episode_id in progress_bar:\n progress_bar.set_description(\"Episode: {}\".format(episode_id))\n current_best = -1000000\n\n # Create episode\n ind_exploration_factor = np.e ** (-1 * episode_id * exp_decay_multiplier / ind_episodes)\n\n episode = Episode(self.config,\n episode_id,\n ind_exploration_factor,\n hex_attr_df,\n hex_distance_df,\n city_states,\n neighborhood,\n popular_bins,\n q_ind,\n r_table,\n xi_matrix)\n\n # Run episode\n tables = episode.run()\n q_ind = tables['q_ind']\n r_table = tables['r_table']\n xi_matrix = tables['xi_matrix']\n episode_tracker = tables['episode_tracker']\n\n # Uncomment for logging if running a job, comment during experiments\n # otherwise it leads to insanely huge logging output which is useless\n\n # self.logger.info(\"\"\"\n # Expt: {} Episode: {} Earnings: {}\n # Pax rides: {} Relocation rides: {} Unmet demand: {}\n # \"\"\".format(self.expt_name, episode_id,\n # episode_tracker.gross_earnings,\n # episode_tracker.successful_waits,\n # episode_tracker.relocation_rides,\n # episode_tracker.unmet_demand))\n # self.logger.info(\"----------------------------------\")\n\n self.training_tracker.update_RL_tracker(\n episode_id, episode_tracker.gross_earnings,\n episode_tracker.successful_waits, episode_tracker.unsuccessful_waits,\n episode_tracker.unmet_demand, episode_tracker.relocation_rides,\n episode_tracker.DET, episode_tracker.DPRT, episode_tracker.DWT,\n episode_tracker.DRT, episode_tracker.DCT)\n\n # Keep track of the best episode\n if self.objective == 'revenue':\n if episode_tracker.gross_earnings >= current_best:\n best_episode = episode_tracker\n current_best = best_episode.gross_earnings\n else: # self.objective == 'pickups':\n if episode_tracker.successful_waits >= current_best:\n best_episode = episode_tracker\n current_best = episode_tracker.successful_waits\n\n # Keep track of the best model\n best_model['ind_exploration_factor'] = ind_exploration_factor\n best_model['config'] = self.config\n best_model['q_ind'] = q_ind\n best_model['r_table'] = r_table\n best_model['xi_matrix'] = xi_matrix\n best_model['training_tracker'] = self.training_tracker\n\n # After finishing training\n self.logger.info(\"Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}\".format(self.expt_name,\n best_episode.gross_earnings,\n best_episode.successful_waits,\n best_episode.unmet_demand))\n return best_episode, best_model, self.training_tracker", "def test_intent_classifier_set_params(self):\n pass", "def get_approaches(self):\n # acc. to ATV-A 121 chap. 5.2.1\n if self.worksheet == ATV:\n self._data.append(OrderedDict({PARAM_COL.FROM: None,\n PARAM_COL.TO: None,\n PARAM_COL.U: LOG2,\n PARAM_COL.W: LOG1}))\n\n elif self.worksheet == DWA:\n duration_bound_1, duration_bound_2 = self.get_duration_steps()\n\n self._data.append(OrderedDict({PARAM_COL.FROM: 0,\n PARAM_COL.TO: duration_bound_1,\n PARAM_COL.U: HYP,\n PARAM_COL.W: LOG2}))\n self._data.append(OrderedDict({PARAM_COL.FROM: duration_bound_1,\n PARAM_COL.TO: duration_bound_2,\n PARAM_COL.U: LOG2,\n PARAM_COL.W: LOG2}))\n self._data.append(OrderedDict({PARAM_COL.FROM: duration_bound_2,\n PARAM_COL.TO: np.inf,\n PARAM_COL.U: LIN,\n PARAM_COL.W: LIN}))\n\n else:\n raise NotImplementedError", "def analyze():\n\n\t# Analyze the available data\n\tcoins = ''\n\tparams = []\n\tdone = False\n\twhile done != True:\n\t\tS_0_dat, K_dat, V_dat, T, coin = analysis.load()\n\t\ttheta, T = analysis.LM(S_0_dat, K_dat, V_dat, T)\n\t\tparams.append([theta, T, coin])\n\t\tif coins == '':\n\t\t\tcoins = coin\n\t\telse:\n\t\t\tcoins += ', ' + coin + '.'\n\t\ttry:\n\t\t\tprint(\"Current coins analyzed:\", coins)\n\t\t\tinp = input(\"Analyze another dataset? (y/n)\t\")\n\t\t\tif inp.lower() == 'y':\n\t\t\t\tcontinue\n\t\t\tif inp.lower() == 'n':\n\t\t\t\tprint(\"\\nFinal parameters:\", params)\n\t\t\t\tdone = True\n\t\texcept ValueError:\n\t\t\tprint(\"\\nUnable to interpret input. Please try again.\")\n\treturn params", "def movies(self):\n return self.data.groupby('Parameters')", "def get_properties_technical_systems(locator, prop_hvac):\n\n prop_emission_heating = pd.read_excel(locator.get_database_air_conditioning_systems(), 'HEATING')\n prop_emission_cooling = pd.read_excel(locator.get_database_air_conditioning_systems(), 'COOLING')\n prop_emission_dhw = pd.read_excel(locator.get_database_air_conditioning_systems(), 'HOT_WATER')\n prop_emission_control_heating_and_cooling = pd.read_excel(locator.get_database_air_conditioning_systems(),\n 'CONTROLLER')\n prop_ventilation_system_and_control = pd.read_excel(locator.get_database_air_conditioning_systems(), 'VENTILATION')\n df_emission_heating = prop_hvac.merge(prop_emission_heating, left_on='type_hs', right_on='code')\n df_emission_cooling = prop_hvac.merge(prop_emission_cooling, left_on='type_cs', right_on='code')\n df_emission_control_heating_and_cooling = prop_hvac.merge(prop_emission_control_heating_and_cooling,\n left_on='type_ctrl', right_on='code')\n df_emission_dhw = prop_hvac.merge(prop_emission_dhw, left_on='type_dhw', right_on='code')\n df_ventilation_system_and_control = prop_hvac.merge(prop_ventilation_system_and_control, left_on='type_vent',\n right_on='code')\n fields_emission_heating = ['Name', 'type_hs', 'type_cs', 'type_dhw', 'type_ctrl', 'type_vent', 'heat_starts',\n 'heat_ends', 'cool_starts', 'cool_ends', 'class_hs', 'convection_hs',\n 'Qhsmax_Wm2', 'dThs_C', 'Tshs0_ahu_C', 'dThs0_ahu_C', 'Th_sup_air_ahu_C', 'Tshs0_aru_C',\n 'dThs0_aru_C', 'Th_sup_air_aru_C', 'Tshs0_shu_C', 'dThs0_shu_C']\n fields_emission_cooling = ['Name', 'Qcsmax_Wm2', 'dTcs_C', 'Tscs0_ahu_C', 'dTcs0_ahu_C', 'Tc_sup_air_ahu_C',\n 'Tscs0_aru_C', 'dTcs0_aru_C', 'Tc_sup_air_aru_C', 'Tscs0_scu_C', 'dTcs0_scu_C',\n 'class_cs', 'convection_cs']\n fields_emission_control_heating_and_cooling = ['Name', 'dT_Qhs', 'dT_Qcs']\n fields_emission_dhw = ['Name', 'Tsww0_C', 'Qwwmax_Wm2']\n fields_system_ctrl_vent = ['Name', 'MECH_VENT', 'WIN_VENT', 'HEAT_REC', 'NIGHT_FLSH', 'ECONOMIZER']\n\n result = df_emission_heating[fields_emission_heating].merge(df_emission_cooling[fields_emission_cooling],\n on='Name').merge(\n df_emission_control_heating_and_cooling[fields_emission_control_heating_and_cooling],\n on='Name').merge(df_emission_dhw[fields_emission_dhw],\n on='Name').merge(df_ventilation_system_and_control[fields_system_ctrl_vent], on='Name')\n # verify hvac and ventilation combination\n verify_hvac_system_combination(result, locator)\n # read region-specific control parameters (identical for all buildings), i.e. heating and cooling season\n result['has-heating-season'] = result.apply(lambda x: verify_has_season(x['Name'],\n x['heat_starts'],\n x['heat_ends']), axis=1)\n result['has-cooling-season'] = result.apply(lambda x: verify_has_season(x['Name'],\n x['cool_starts'],\n x['cool_ends']), axis=1)\n\n # verify seasons do not overlap\n result['overlap-season'] = result.apply(lambda x: verify_overlap_season(x['Name'],\n x['has-heating-season'],\n x['has-cooling-season'],\n x['heat_starts'],\n x['heat_ends'],\n x['cool_starts'],\n x['cool_ends']), axis=1)\n return result", "def main(cls, args):\n #cls.trainOfflineAndTest(100, 0.1, 0.1, 0.9);\n #cls.trainOfflineAndTest(500, 0.1, 0.1, 1.0);\n\n cls.trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10, 0.8, 1.0 ,1.0, 0.0, 0.3, True, True,True);\n cls.trainer.teachActiveAndSaveStatistics(\"path\", 10, 0.0, 0.0, 0.0, 0.0, 0.0, True, False, False)\n\n #trainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,true, true, true);\n # \t\ttrainer.teachActiveAndSaveStatistics(\"onlineTest\", 10000, 0.1f, 0.1f, 1.0f, 0.0f, 0.1f,\n # \t\t\t\tfalse, true, true);\n # \t\t\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, true);\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10000, true);\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_first.net\", 10000, False)\n #cls.testAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1_secound.net\", 10, False)\n # \t\ttestAgentFromFileWithOutLearning(\"net10000_a0.1_b0.1g_1.0e_0.1.net\", 10000, false);", "def get_data_loaders():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2sentences_finalgenerated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+5):]\n \n #history_complete.append(history)\n if len(persona) == 4:\n if len(history) > (len(persona)+3):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)", "def build_model(self):\n insts1, attrs1, rels1 = self.arg1.get_triples()\n insts2, attrs2, rels2 = self.arg2.get_triples()\n for items, shld_norm in [(insts1, True), (insts2, True), (attrs1, True),\n (attrs2, True), (rels1, False), (rels2, False)]:\n for i in range(len(items)):\n # GUROBI cant handle Unicode so step down to ASCII\n items[i] = [items[i][0].encode('ascii', 'ignore').lower(),\n items[i][1].encode('ascii', 'ignore'),\n items[i][2].encode('ascii', 'ignore')]\n # normalize concept names -- instances and attributes\n if shld_norm:\n items[i][2] = SmatchILP.normalize(items[i][2])\n\n # Attributes are same as relations\n rels1.extend(attrs1)\n rels2.extend(attrs2)\n\n log.debug(\"AMR 1 Instances:\\n %s\" % insts1)\n log.debug(\"AMR 1 Relations:\\n %s\" % rels1)\n log.debug(\"AMR 2 Instances:\\n %s\" % insts2)\n log.debug(\"AMR 2 Relations:\\n %s\" % rels2)\n\n for index, items in [(self.arg1vars, insts1), (self.arg2vars, insts2)]:\n for name, var, concept in items:\n assert name == 'instance' # relation name is instance ==> variable definition\n assert var not in index # variable name is unique\n index[var] = concept\n\n var_choices = set() # possible variable matches\n for v1 in self.arg1vars.keys():\n for v2 in self.arg2vars.keys():\n var_choices.add((v1, v2))\n\n # instances are relations too\n rels1.extend(insts1)\n rels2.extend(insts2)\n\n self.arg1size = len(rels1)\n self.arg2size = len(rels2)\n\n trpl_choices = set()\n trpl_var_consts = {}\n for name1, var11, var12 in rels1:\n id1 = \"%s:%s:%s\" % (name1, var11, var12)\n for name2, var21, var22 in rels2:\n possible = 0\n id2 = \"%s:%s:%s\" % (name2, var21, var22)\n # triple name matches && first argument to triples can be matched\n if name1 == name2 and (var11, var21) in var_choices:\n # second argument to triple can also be matched OR\n possible += 1\n if (var12, var22) in var_choices or (\n # they are the same concepts\n # var12 not in self.arg1vars and var22 not in self.arg2vars and\n var12 == var22):\n possible += 1\n trpl_choices.add((id1, id2))\n # constrains between variables and triples\n trpl_var_consts[id1, id2] = [(var11, var21)]\n # if second argument is also variable\n\n if (var12, var22) in var_choices:\n trpl_var_consts[id1, id2].append((var12, var22))\n log.debug('\\t %s <--> %s ? %s ' % (id1, id2, possible))\n\n # Add variables to ILP model\n model = GRBModel('Smatch ILP')\n if log.getLogger().getEffectiveLevel() >= log.INFO:\n model.Params.OutputFlag = 0 # disable output\n log.info(\"Number of possible variable matches %s\" % len(var_choices))\n log.info(\"Number of possible triple matches %s\" % len(trpl_choices))\n\n self.vars = model.addVars(var_choices, vtype=GRB.BINARY, name=\"v\")\n self.trpls = model.addVars(trpl_choices, vtype=GRB.BINARY, name=\"t\")\n\n # constraints\n for v1 in self.arg1vars:\n model.addConstr(self.vars.sum(v1, '*') <= 1, name='to max 1 var')\n for v2 in self.arg2vars:\n model.addConstr(self.vars.sum('*', v2) <= 1, name='from max 1 var')\n\n for trpl_idx, var_idxs in trpl_var_consts.items():\n for var_idx in var_idxs:\n model.addConstr(self.trpls[trpl_idx] <= self.vars[var_idx], name=\"%s::%s\" % (trpl_idx, var_idx))\n\n # objective\n model.setObjective(self.trpls.sum(), GRB.MAXIMIZE)\n self.model = model\n\n # stats for how big the problem is\n var_trpl_consts_count = sum(len(x) for x in trpl_var_consts.values())\n num_constr = len(var_choices) + len(trpl_choices) + var_trpl_consts_count\n num_vars = len(var_choices) + len(trpl_choices)\n log.info(\"ILP SIZE: %d binary variables (%d vars + %d triple vars)\" % (num_vars, len(var_choices), len(trpl_choices)))\n log.info(\"ILP SIZE: %d constraints (%d b/w arg vars and triples)\" % (num_constr, var_trpl_consts_count))", "def _get_obs(self):\n pos = []\n z = []\n for i in range(params['memory_size']):\n if self._step - i * params['memory_size'] > 1:\n pos.append(self._track_item['joint_pos'][self._step - i * params['memory_size'] - 1].copy())\n z.append(self._track_item['z'][self._step - i * params['memory_size'] - 1].copy())\n else:\n pos.append(self._track_item['joint_pos'][0].copy())\n if len(self._track_item['z']) < 1:\n z.append(self.z.copy())\n else:\n z.append(self._track_item['z'][0].copy())\n out = pos\n if params['observation_version'] == 1:\n out += z\n ob = {\n 'observation' : np.concatenate(out, -1),\n 'desired_goal' : self.desired_goal.copy(),\n 'achieved_goal' : self.achieved_goal.copy(),\n 'z' : self.z.copy()\n }\n return ob", "def get_observations(self):\n # Check hyper_params and criterion\n if len(self.hyper_params) == 0:\n raise AssertionError('!! Hyper-Parameters has not been set.')\n # Check criterion\n if self.criterion is None:\n raise AssertionError(\n '!! Criterion for hyper-parameter searching has not been set.')\n # Fetch notes\n notes = self.summary_fetcher()\n if len(notes) == 0: return []\n # Peel of Note wrapper\n observations = []\n for note in notes:\n # Every note in the note list must contain the criterion\n if self.criterion not in note.criteria: raise AssertionError(\n '!! Every note must contain the criterion `{}`'.format(self.criterion))\n # This note will be ignored if it does not contain all the information\n # in self.hyper_params or the config value is not within the range\n if not all([hp.name in note.configs and hp.within(note.configs[hp.name])\n for hp in self.hyper_params]): continue\n # Gather observation\n od = OrderedDict()\n # self.scroll.hyper_params.values() may have been found themselves\n for hp in self.scroll.hyper_params.values():\n assert isinstance(hp, HyperParameter)\n od[hp] = note.configs[hp.name]\n # Organize the observation list as a list of tuples\n observations.append((od, note.criteria[self.criterion]))\n return observations", "def evaluate(self, params):\n model = self.train(params)\n Y_pred = model.predict(self.X_test)\n\n if self.dataset_type == PROBLEM.CLASSIFICATION:\n return {\n 'score': accuracy_score(self.Y_test, Y_pred),\n 'matrix': confusion_matrix(self.Y_test, Y_pred).tolist(),\n 'report': classification_report(self.Y_test, Y_pred,\n target_names=self.labels,\n zero_division=1)\n }\n else:\n return {\n 'max_error': max_error(self.Y_test, Y_pred),\n 'mae': mean_absolute_error(self.Y_test, Y_pred),\n 'mse': mean_squared_error(self.Y_test, Y_pred)\n }", "def _get_learned_parameters(nodes, edges, observations):\n parameters = {}\n\n \"\"\" YOUR CODE HERE \"\"\"\n for node in nodes:\n parent_nodes = []\n for edge in edges:\n if node == edge[1]:\n parent_nodes.append(edge[0])\n output = np.array(observations[node])\n inputs = []\n for p_node in parent_nodes:\n inputs.append(observations[p_node])\n if inputs!=[]:\n inputs = np.array(inputs).T\n else:\n inputs = None\n weights = _learn_node_parameter_w(output, inputs)\n variance = _learn_node_parameter_var(output, weights, inputs)\n parameters[node] = {}\n parameters[node][\"variance\"] = variance\n parameters[node][\"bias\"] = weights[0]\n for p_node_index in range(len(parent_nodes)):\n parameters[node][parent_nodes[p_node_index]] = weights[p_node_index+1]\n \"\"\" END YOUR CODE HERE \"\"\"\n\n return parameters", "def coil_parameters(agent_data, agents_df, agent_name):\r\n rn = random()\r\n agent_data.at[0, 'int_fab'] = 0\r\n agent_data.at[0, 'location'] = agents_df.loc[0, 'Location']\r\n agent_data.at[0, 'to_do'] = 0\r\n agent_data.at[0, 'coil_length'] = 5000 + (rn*1000) # between 5000 - 6000 m\r\n agent_data.at[0, 'coil_width'] = 1000 + (rn*500) # between 1000-1500\r\n agent_data.at[0, 'coil_thickness'] = 2.5 + (rn/2) # between 2.5-3\r\n agent_data.at[0, 'coil_weight'] = agent_data.at[0, 'coil_length'] * agent_data.at[0, 'coil_width'] * agent_data.at[0, 'coil_thickness'] * (1/1000) * (1/100) *(1/ 7850)\r\n agent_data.at[0, 'setup_speed'] = 10 + (rn/2) # between 10-10.5 m/s. Fab takes between 8 and 10 min with this conditions. process time = length / speed\r\n agent_data.at[0, 'T1'] = 250 + (rn*100) # between 250-350\r\n agent_data.at[0, 'T2'] = 550 + (rn*100) # between 550-650\r\n agent_data.at[0, 'T3'] = 800 + (rn*100) # between 800-900\r\n agent_data.at[0, 'T4'] = 600 + (rn*100) # between 600-700\r\n agent_data.at[0, 'T5'] = 300 + (rn*100) # between 300-400\r\n agent_data.at[0, 'q'] = 0.5 + (rn/10) # between 0.5-0.6\r\n agent_data.at[0, 'ship_date'] = random_date(datetime.datetime.now(), datetime.datetime.now() + datetime.timedelta(minutes=40)) # Planning: between now and in 40 min.\r\n if rn < 0.15:\r\n agent_data.at[0, 'budget'] = 100 + (20 * random())\r\n else:\r\n agent_data.at[0, 'budget'] = 100\r\n # if rn < 0.2:\r\n # agent_data.at[0, 'location'] = \"I\"\r\n # elif 0.2 < rn < 0.4:\r\n # agent_data.at[0, 'location'] = \"J\"\r\n # elif 0.4 < rn < 0.6:\r\n # agent_data.at[0, 'location'] = \"K\"\r\n # elif 0.6 < rn < 0.8:\r\n # agent_data.at[0, 'location'] = \"L\"\r\n # elif 0.8 < rn < 1:\r\n # agent_data.at[0, 'location'] = \"M\"\r\n return agent_data", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doSet(\"POI\",\"Afb\")\n\n # ss templates\n self.modelBuilder.doVar(\"Dilu_ratio[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_mumu_ss[1.0,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rdy_ee_ss[1.0,0.0,10.0]\");\n \n self.modelBuilder.factory_('expr::Rpl(\"0.5*(1.+@0*@1)\",Afb, Dilu_ratio)')\n self.modelBuilder.factory_('expr::Rmn(\"0.5*(1.-@0*@1)\",Afb, Dilu_ratio)')", "def get_new_modelling_data():\n # get latest epidemic data from OWID \n\n df = pd.read_json(requests.get(\"https://covid.ourworldindata.org/data/owid-covid-data.json\").content)\n data = pd.DataFrame(df[\"POL\"][\"data\"])\n\n # get latest government restriction data from Oxford tracker\n response = requests.get(\"https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv\").content\n rest = pd.read_csv(io.StringIO(response.decode('utf-8')))\n rest = rest[rest.CountryName == \"Poland\"]\n\n modelling = pd.DataFrame(Mobility.objects.values())\n prepare_model_data(data,rest,modelling)" ]
[ "0.5695615", "0.5606592", "0.56018066", "0.5594157", "0.55773884", "0.55531305", "0.5503704", "0.549341", "0.54833376", "0.54497266", "0.5354468", "0.5281192", "0.52682257", "0.52564496", "0.5245968", "0.52178377", "0.52174217", "0.52096766", "0.52088886", "0.5184523", "0.5180947", "0.5174916", "0.516812", "0.51590014", "0.5141455", "0.51264626", "0.5103872", "0.51012665", "0.50947946", "0.50829166", "0.50805503", "0.507667", "0.507407", "0.5066707", "0.50609976", "0.5049347", "0.5037261", "0.50341016", "0.5025998", "0.5024898", "0.50237066", "0.5023148", "0.50149333", "0.5012856", "0.501207", "0.5009959", "0.5007494", "0.5004174", "0.49989942", "0.49920934", "0.49890304", "0.49878907", "0.49838886", "0.4983766", "0.49733472", "0.49714732", "0.497114", "0.49697456", "0.49680737", "0.49638504", "0.49605462", "0.49580657", "0.4948675", "0.4948669", "0.4942975", "0.49359673", "0.49282384", "0.492517", "0.49222752", "0.49095786", "0.4899965", "0.4898282", "0.48958886", "0.4860986", "0.48579597", "0.48560923", "0.4855247", "0.48549214", "0.48497272", "0.48465243", "0.48448867", "0.48428383", "0.4840303", "0.48363873", "0.48363402", "0.4835587", "0.4830812", "0.48288244", "0.48257443", "0.48250157", "0.48194095", "0.48119217", "0.48082215", "0.48051867", "0.47944677", "0.47885793", "0.47880885", "0.4784778", "0.4784135", "0.4775629", "0.4772016" ]
0.0
-1
Save control data from evaluation to disk.
def output_data(self): if not self.is_record: logging.error('Env: no record to output!') else: control_data = pd.DataFrame(self.control_data) control_data.to_csv(self.output_path + ('%s_%s_control.csv' % (self.name, self.agent)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_data(self):\n pass", "def saveData(self):\n pass", "def save(self) -> None:\n self.saver.save_model_and_weights(self.model)\n self.saver.save_data_shuffle_indices(\n self.data.eval_shuffler.ds_inds\n )\n self.saver.save_input_scaler(self.data.x.scaler)", "def save(self, output, data):", "def save():", "def _save_state(self, saver, session, data, checkpts_path):\n # Save variable state\n if checkpts_path:\n logging.info('Saving cotrain checkpoint at %s.', checkpts_path)\n saver.save(session, checkpts_path, write_meta_graph=False)\n\n # Save dataset state.\n if self.data_dir:\n logging.info('Saving self-labeled dataset backup.')\n data.save_state_to_file(self.data_dir)", "def save(self, data):\n self.write(data)", "def save_prediction(self):\n if DataLoader.data is None:\n messagebox.showerror(\"Information\", \"Data file is empty, please load the data first.\")\n return\n if Trainer.y_pred is None:\n messagebox.showerror(\"Information\", \"Preciction has not been made, please train a new model and predict or \"\n \"load a model and predict.\")\n return\n\n path = filedialog.asksaveasfile(mode='w', defaultextension=\".csv\", filetypes=[(\"csv files\", '*.csv'),\n (\"xlsx files\", '*.xlsx'),\n (\"dat files\", '*.dat')])\n\n copy_data = DataLoader.data.copy()\n copy_data['prediction'] = Trainer.y_pred\n copy_data.to_csv(path, index=False)\n\n # Clears memory\n copy_data.drop(copy_data.index, inplace=True)\n del copy_data", "def _save(self, data: np.ndarray) -> None:\n ...", "def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")", "def save(self):\n output = self.prepare_results()\n\n override_name = output[\"config\"][\"sysconfig\"].get(\"output_filename\", None)\n scenario_name = (\n override_name if override_name else output[\"config\"][\"scenario\"][\"name\"]\n )\n filename = f\"{scenario_name}_{output['timestamp']}.json\"\n log.info(\n \"Saving evaluation results to path \"\n f\"{self.scenario_output_dir}/{filename} \"\n \"inside container.\"\n )\n output_path = os.path.join(self.scenario_output_dir, filename)\n with open(output_path, \"w\") as f:\n json_utils.dump(output, f)\n if os.path.getsize(output_path) > 2**27:\n log.warning(\n \"Results json file exceeds 128 MB! \"\n \"Recommend checking what is being recorded!\"\n )", "def save(self, output, data):\n pass", "def saveModel(self):\n with open(self.modelSaveFile, 'wb') as f:\n pickle.dump(self.values, f, pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.policy, f, pickle.HIGHEST_PROTOCOL)", "def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()", "def save(circuit):\n with open(globals.circuitcache / (str(circuit.circuitnr) + '.pickle'), 'wb') as f:\n pickle.dump(circuit, f, pickle.HIGHEST_PROTOCOL)", "def __savePreProcessedData(self):\n np.savetxt(self.X_filename, self.X, delimiter=',')\n np.savetxt(self.y_filename, self.le.fit_transform(self.y), delimiter=',')\n #Need to save the label Enconder to inverse transform later\n joblib.dump(self.le, self.le_filename)\n\n print(\"Saved X and y\")", "def save(self):\n self.backend.save(list(self._d.items()))\n log.debug(\"save: {}\".format(self.backend.filename))", "def save(self):\n # Sanity checks\n assert len(self.actions) == len(self.rewards)\n assert len(self.actions) == len(self.episode_starts)\n assert len(self.actions) == len(self.images_path)\n assert len(self.actions) == len(self.ground_truth_states)\n assert len(self.target_positions) == self.episode_idx + 1\n\n data = {\n 'rewards': np.array(self.rewards),\n 'actions': np.array(self.actions),\n 'episode_starts': np.array(self.episode_starts)\n }\n\n ground_truth = {\n 'target_positions': np.array(self.target_positions),\n 'ground_truth_states': np.array(self.ground_truth_states),\n 'images_path': np.array(self.images_path)\n }\n print(\"Saving preprocessed data...\")\n np.savez('{}/preprocessed_data.npz'.format(self.data_folder), **data)\n np.savez('{}/ground_truth.npz'.format(self.data_folder), **ground_truth)", "def _save_data(self):\n self.data.to_csv('data/c&le/{}'.format(self.name))", "def save(self, output, data):\n return", "def save_data(self):\n # Command to get the download data\n pass", "def save_outputs(self):\n write_pickled(join(self.output_folder, \"results.pkl\"), self.get_results())", "def save_vals (self):\n raise NotImplementedError", "def _save(self):\n\t\t\n\t\tdirectory = self.Output_path\n\n\t\t# replace with \n\t\t# file_name = hermes.mk_themis_file_name(themis_obj = self)\n\t\tfile_name = f'Themis_{self.CELL_ID[\"experiment\"]}_u{self.CELL_ID[\"unit\"]}_c{self.CELL_ID[\"cell\"]}_r{self.CELL_ID[\"run\"]}.pkl'\n\n\t\tsave_path = directory / file_name\n\n\t\t# Atomic saving (helpful?)\n\t\ttemp_path = save_path.with_suffix(save_path.suffix + '.tmp')\n\t\t\n\t\tself.SavePath = save_path\n\n\t\t\n\t\twith open(temp_path, 'wb') as f:\n\t\t\tpickle.dump(self, f)\n\n\t\ttemp_path.rename(save_path)\n\n\t\tprint(f'Saved {self.RUN_KEY} as {save_path}')", "def save():\n pass", "def save(self):\n pickle_save(self.results, 'results', self.main_dir)", "def save(estimator, path):\n saver = tf.train.Saver()\n if \"/\" not in path:\n path = \"./\" + path\n saver.save(estimator.sess, path + \".ckpt\")\n\n save_dr = directRanker()\n for key in estimator.get_params():\n # ToDo: Need to be fixed to also restore the cost function\n if key == \"cost\":\n save_dr.__setattr__(key, None)\n else:\n save_dr.__setattr__(key, estimator.get_params()[key])\n\n with open(path + \".pkl\", 'wb') as output:\n pickle.dump(save_dr, output, 0)", "def save(self,sess):\n self.saver.save(sess,\"./Models/\" + self.mod_name + \".ckpt\")", "def _saveState(self):\n assertMainThread()\n self._defineProperties()\n propertyCollection = self._config.guiState()\n try:\n propertyCollection.setProperty(\"RecordingControl_directory\", self._directory)\n except PropertyCollectionPropertyNotFound:\n pass", "def _save(self, itr):\n # using keep_checkpoint_every_n_hours as proxy for iterations between saves\n if self.saver and (itr + 1) % self.saver._keep_checkpoint_every_n_hours == 0:\n\n # collect params (or stuff to keep in general)\n params = dict()\n params['critic'] = self.critic.network.get_param_values()\n\n # if the environment is wrapped in a normalizing env, save those stats\n normalized_env = hgail.misc.utils.extract_normalizing_env(self.env)\n if normalized_env is not None:\n params['normalzing'] = dict(\n obs_mean=normalized_env._obs_mean,\n obs_var=normalized_env._obs_var\n )\n\n # save hierarchy\n for i, level in enumerate(self.hierarchy):\n params[i] = dict()\n params[i]['policy'] = level.algo.policy.get_param_values()\n \n # save params \n save_dir = os.path.split(self.saver_filepath)[0]\n hgail.misc.utils.save_params(save_dir, params, itr+1, max_to_keep=50)", "def save(self, data, **kwargs):\n if self.persist==['data']: # 1 data shortcut\n self.output().save(data, **kwargs)\n else:\n targets = self.output()\n if not set(data.keys())==set(targets.keys()):\n raise ValueError('Save dictionary needs to consistent with Task.persist')\n for k, v in data.items():\n targets[k].save(v, **kwargs)", "def save(self):\n\n or_none = lambda x: x if x is not None else \"none\"\n with h5py.File(self.filename, \"a\") as hf:\n for attr in self._SAVE_ATTRS + self._save_attrs:\n hf.attrs[attr] = or_none(getattr(self, attr, None))", "def save(self):\n # TODO: save the file", "def checkpoint(self):\n save()", "def save(self, experiment_dir):\n date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())\n\n if self.eval_results is not None:\n # print(self.eval_results)\n assert isinstance(self.eval_results, dict)\n # present the dict in str form\n # res_str = ''.join(''.join(str(x) for x in tup) for tup in self.eval_results.items())\n\n self._path = os.path.join(\n experiment_dir, self.CHECKPOINT_DIR_NAME, date_time,\n )\n path = self._path\n\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n\n torch.save(\n {'epoch': self.epoch, 'optimizer': self.optimizer},\n os.path.join(path, self.TRAINER_STATE_NAME)\n )\n torch.save(self.model, os.path.join(path, self.MODEL_NAME))\n\n # save parameters to txt\n txt_file = open(os.path.join(path, self.PARAMETERS), \"w\")\n\n txt_file.write(f\"ckpt name: '{date_time}'\\n\")\n txt_file.write(f\"epoch: {self.epoch}\\n\")\n\n if self.eval_results is not None: \n for key, value in self.eval_results.items():\n txt_file.write(str(key)+': '+str(value)+'\\n')\n # if 'acc' in self.eval_results:\n # txt_file.write(f\"acc: {self.eval_results['acc']}\\n\")\n # if 'p' in self.eval_results:\n # txt_file.write(f\"p: {self.eval_results['p']}\\n\")\n # if 'r' in self.eval_results:\n # txt_file.write(f\"r: {self.eval_results['r']}\\n\")\n # if 'f1' in self.eval_results:\n # txt_file.write(f\"f1: {self.eval_results['f1']}\\n\")\n \n txt_file.close()\n\n return path", "def save(self):\n return self.write()", "def save(self):\n #test output\n pywikibot.output('PICKLING %s records at %s' % (len(self.historyDict),datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n with open(self.datfilename, 'wb') as f:\n pickle.dump(self.historyDict, f, protocol=config.pickle_protocol)", "def save (self, filename) :\n\t\tserialFile = open (filename, \"wb\")\n\t\tpickle.dump (self.production_rules, serialFile)\n\t\tpickle.dump (self.unitrelation, serialFile)\n\t\tpickle.dump (self.labels, serialFile)\n\t\tpickle.dump (self.keeper, serialFile)\n\t\tpickle.dump (self.strnodes, serialFile)\n\t\tpickle.dump (self.tokens, serialFile)\n\t\tserialFile.close()", "def save(self,fout):\n\n # only process 0 should save\n if COMM_WORLD.rank == 0:\n\n # The file format is:\n # L,nterms,masks,signs,coefficients\n # where each is just a binary blob, one after the other.\n\n # do this first so that we haven't already created the file if\n # it fails for some reason\n msc = self.get_MSC()\n\n with open(fout,mode='wb') as f:\n\n # write the chain length to the file. This is the only parameter\n # that we save other than the MSC representation.\n L = self.L\n if L is None:\n raise ValueError('L must be set before saving to disk.')\n\n # cast it to the type that C will be looking for\n int_t = msc.dtype[0].type\n L = int_t(L)\n\n f.write(L.tobytes())\n\n # write out the length of the MSC representation\n size = int_t(msc.size)\n f.write(size.tobytes())\n\n f.write(msc['masks'].tobytes())\n f.write(msc['signs'].tobytes())\n f.write(msc['coeffs'].tobytes())\n\n COMM_WORLD.barrier()", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n FileRep.save(self,outPath)", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n FileRep.save(self,outPath)", "def save(self, filename=\"matpipe.p\"):\n temp_backend = self.learner.backend\n self.learner._backend = self.learner.backend.fitted_pipeline_\n for obj in [self, self.learner, self.reducer, self.cleaner,\n self.autofeaturizer]:\n obj._logger = None\n with open(filename, 'wb') as f:\n pickle.dump(self, f)\n self.learner._backend = temp_backend", "def write(self):\n #\n if self.what == 'ecutwfc':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.SYSTEM.set_ecutwfc(self.values[i])\n self.pwinput.write()\n #\n elif self.what == 'ecutrho':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.SYSTEM.ecutrho = self.values[i]\n self.pwinput.write()\n elif self.what == 'kpoints':\n for i in range(self.Ndata):\n self.pwinput.filename = self.inpFiles[i]\n self.pwinput.Nk = self.values[i]\n self.pwinput.write()\n #\n else:\n raise RuntimeError('what = %s is not implemented yet' % (self.what))\n #\n self.inputs_have_been_written = True", "def save(self):\n # TODO (Pierre): code", "def save_data_pickle(self, save_full=False):\n self.train.to_pickle('../input/train_mod.pkl')\n self.test.to_pickle('../input/test_mod.pkl')\n if save_full:\n self.train_full.to_pickle('../input/train_full_mod.pkl')", "def save(self):\n\n # TODO:Find place to save data, write logic to save images(Filter out video?)", "def save_results(self, data, prefix, mode=\"train\", compute_loss=False):\n # save predictions\n self.save_predictions(prefix, mode)", "def savePolicyInc(self, FORCE_SAVE=False):\n if self.episodecount % self.save_step == 0:\n if self.learning or (FORCE_SAVE and self.doForceSave):\n self.saveLSPIParameters()\n # Fotis\n if self.dae is not None:\n self.dae.save_variables()\n\n print('savePolicyInc')\n # print \"episode\", self.episodecount\n # save_path = self.saver.save(self.sess, self.out_policy_file+'.ckpt')\n '''self.dqn.save_network(self.out_policy_file + '.dqn.ckpt')\n\n f = open(self.out_policy_file + '.episode', 'wb')\n for obj in [self.samplecount, self.episodes[self.domainString]]:\n pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)\n f.close()\n '''\n # logger.info(\"Saving model to %s and replay buffer...\" % save_path) ", "def save(self):\n joblib.dump(\n self.classifier, \"data/models/repeatsfinder/repeatsfinder.joblib\",\n )", "def save(self):\n with open(self.fkit.path) as fp:\n for processor in self._field.processors:\n fp = processor(fp)\n storage = FileStorage(fp)\n storage.filename = self.get_filename()\n self.uset.save(storage, folder=self.folder, name=self.get_filename())", "def save(self):\n data = (\n self.Joints,\n self.Links,\n self.joint_syms,\n self.global_syms,\n self.name,\n self.sym_prefix,\n )\n cloudpickle.dump(data, open(self.save_filename, \"wb\"))", "def save_model(self, filename) -> None:\n #t.save(self, filename)\n traced=t.jit.script(self)\n t.jit.save(traced,filename)", "def save_data(self) -> None:\n # Construct a grid in physical space\n rvals = np.logspace(start=-3,\n stop=2.5,\n num=21,\n endpoint=True)\n # Compute C, D, K1 and F on that grid\n Cvals = np.array([self.compute_C(r, Suppression.RAW) for r in rvals])\n Dvals = np.array([self.compute_D(r, Suppression.RAW) for r in rvals])\n K1vals = np.array([self.compute_K1(r, Suppression.RAW) for r in rvals])\n Fvals = np.array([self.compute_F(r, Suppression.RAW) for r in rvals])\n # Save them to file\n df = pd.DataFrame([rvals, Cvals[:, 0], Dvals[:, 0], K1vals[:, 0], Fvals[:, 0],\n Cvals[:, 1], Dvals[:, 1], K1vals[:, 1], Fvals[:, 1]]).transpose()\n df.columns = ['r', 'C(r)', 'D(r)', 'K1(r)', 'F(r)', 'dC(r)', 'dD(r)', 'dK1(r)', 'dF(r)']\n df.to_csv(self.file_path(self.filename + '.csv'), index=False)", "def _save_state(self):\n with open(self.histFile,'wb') as hf:\n hf.write(self.dbFile.Value)", "def save(self, PATH):\n self._saver.save(self._sess, PATH)", "def save(self):\r\n try:\r\n self.process_save()\r\n except InputError as ex:\r\n print(ex)\r\n self.save()\r\n except KeyError:\r\n print(\"No saved data to save/load. Please save some data before loading in data.\")\r\n self.menu_page()", "def save(self, compute_snrs=True):\n if not self.save_mode:\n raise RuntimeError('Need to enable save mode to save')\n\n fn = os.path.join(self.output_dir,\n 'data_' + time_string() + '.h5')\n save_dict(fn=fn, d=self.data)\n if compute_snrs:\n from src.analyzer import DataAnalyzer\n da = DataAnalyzer.fromfilename(fn)\n da.snr_list()\n return fn", "def save(self):\n self.index.saveIndex(c.index_path('hnsw.index'))\n joblib.dump(self.ys, \"%s.ys\" % self.index_file_prefix)", "def save(self):\n filename = os.path.expanduser(\"~/\" + self.name)\n print(filename)\n np.savetxt(filename + \"_left.txt\", self.central)\n np.savetxt(filename + \"_right.txt\", self.boundaries)", "def save(self) -> None:\n self._save_marker = self.get_next()", "def save_states(self, checkpoint):\n raise NotImplementedError()", "def save(self):\n\n self.saver.save(self.sess, self.path + '/tensorflow-model', global_step=self.counter.count)", "def save_trainable_variables (self , sess , savefn):\r\n state = getattr (self , 'state' , {})\r\n utils.train.save_trainable_variables(\r\n sess, savefn, self._scope, **state )", "def save_result(self, results: Dict[str, Dict[str, Any]]) -> None:\n if self.out_dir:\n os.makedirs(self.out_dir, exist_ok=True)\n with open(self.eval_result_file, 'w') as f:\n json.dump(results, f, indent=2)\n else:\n raise ValueError(f'Invalid output dir: {self.out_dir}')\n\n if self.verbose:\n print(f\"======\\nPanoptic nuScenes {self.task} evaluation for {self.eval_set}\")\n print(json.dumps(results, indent=4, sort_keys=False))\n print(\"======\")", "def save_results(self, data, prefix, mode=\"train\"):\n # save predictions\n if mode != \"train\":\n self.save_predictions(prefix, mode)\n #if self.config[\"model\"][\"version\"] != \"IE\":\n #self.save_assignments(prefix, mode)\n #self.visualize_assignments(prefix=prefix, mode=mode)\n\n \"\"\" given sample data \"\"\"\n if data is not None:\n # maintain sample data\n self._set_sample_data(data)\n\n # convert data as Variables\n data = [*self.tensor2variable(data), data[1]]\n\n outputs = self.forward(data[:-1])\n logit_list = outputs[0][0]\n\n # compute loss\n loss = self.loss_fn(outputs[0], data[-2], count_loss=False)\n\n # save results of assignment of model\n if self.config[\"model\"][\"version\"] != \"IE\":\n logits = [logit.data.cpu() for logit in logit_list]\n vis_data = [*self.sample_data, self.criterion.assignments]\n if type(vis_data[0][-1]) == type(list()):\n vis_data[0][-1] = vis_data[0][-1][0]\n if self.use_knowledge_distillation:\n vis_data.append([net_utils.get_data(bout)\n for bout in self.base_outputs])\n\n class_names = [self.itoa[str(key)]\n for key in range(len(self.itoa.keys()))]\n vis_utils.save_mcl_visualization(\n self.config, vis_data, logits, class_names, \\\n self.itow, self.itoa, prefix, \\\n self.use_knowledge_distillation\n )", "def save_agent(self, path):\n # save all parameters needed to reconstruct the agent\n pickle_save(self.save_attrs, path)\n # initialize tensorflow saver\n saver = tf.train.Saver(var_list=self._variables_to_save())\n saver.save(self.sess, path + CHECKPOINT_EXTENSION)", "def saveToFile(self):\n filename = str(self.outputFileName.text())\n\n if not len(filename):\n return\n\n if os.path.exists(filename) and not self.overwriteCheck.isChecked():\n self.mainWindow.displayWarning(\"File already exists: not overwriting\")\n return\n\n # lattice object\n lattice = self.rendererWindow.getCurrentInputState()\n\n # gather vis atoms if required\n if self.writeFullLattice:\n visibleAtoms = None\n else:\n visibleAtoms = self.rendererWindow.gatherVisibleAtoms()\n\n # write Lattice\n lattice.writeLattice(filename, visibleAtoms=visibleAtoms)", "def save(self):\n\n if self.ckpt_manager is not None:\n save_path = self.ckpt_manager.save()\n print(\"Saved checkpoint at: {}\".format(save_path))\n else:\n print(\"There is no checkpoint manager supplied for saving the \"\n \"network weights, optimizer, or other trackables.\")\n print(\"Therefore these will not be saved and the training will \"\n \"start from default values in the future.\")\n print(\"Consider using a checkpoint manager to save the network \"\n \"weights and optimizer.\")", "def _save(self, step):\n\n output_path = self.output_path + '/checkpoints/'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n self.saver.save(self.session, save_path=output_path,global_step=step)", "def save_values(self):\n # TODO: Add self.prefix and extension\n NetworkTables.saveEntries(self.file.get_filename(), prefix='/vision/' + self.name + '_')", "def save(self, values):", "def save(self, filepath):\n save_ckpt = {\n 'ae': self.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n try:\n torch.save(save_ckpt, os.path.join(filepath, 'ckpt_ae.pth'))\n except:\n print('Cannot save autoencoder.')", "def save(self):\n path = self.get_benchmark_file_path(self._conf.results_dir)\n util.write_json(path, self._all_benchmarks, self.api_version)", "def save_data_to_disk(self):\n Omega_M = self.theta_fid[0]\n for key in self.data.keys():\n np.save(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy', self.data[key])", "def save(self, filename=None):\n if filename:\n self.filename = filename\n content = \"\\n\".join([x.dump() for x in self.paragraphs])\n control_file = open(self.filename, \"wb\")\n control_file.write(content.encode(\"utf-8\"))\n control_file.close()", "def write_checkpoint(self):\n self.file_checkpoint_data = open(self.path_checkpoint, \"a+\")\n array_to_write = [str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]\n self.file_checkpoint_data.write(','.join(array_to_write) + \"\\n\")\n self.file_checkpoint_data.flush()", "def save_training(self):\n\n filename = str(hashlib.sha1(str(self.training_data).encode(\"utf-8\"))\n .hexdigest())\n path = \"./training/\" + filename + \".json\"\n\n data = {\n \"states\": self.states,\n \"transitions\": self.transitions,\n \"matrix\": self.matrix.tolist()\n }\n\n with open(path, \"w\") as outfile:\n json.dump(data, outfile)", "def save_plot(self, ):\n pass", "def save_operator(operator,\n file_name=None,\n data_directory=None,\n allow_overwrite=False,\n plain_text=False):\n\n file_path = get_file_path(file_name, data_directory)\n\n if os.path.isfile(file_path) and not allow_overwrite:\n raise OperatorUtilsError(\"Not saved, file already exists.\")\n\n if isinstance(operator, FermionOperator):\n operator_type = \"FermionOperator\"\n elif isinstance(operator, BosonOperator):\n operator_type = \"BosonOperator\"\n elif isinstance(operator, QubitOperator):\n operator_type = \"QubitOperator\"\n elif isinstance(operator, QuadOperator):\n operator_type = \"QuadOperator\"\n elif isinstance(operator, (InteractionOperator, InteractionRDM)):\n raise NotImplementedError('Not yet implemented for '\n 'InteractionOperator or InteractionRDM.')\n else:\n raise TypeError('Operator of invalid type.')\n\n for term in operator.terms:\n if isinstance(operator.terms[term], sympy.Expr):\n raise TypeError('Cannot save sympy expressions.')\n\n if plain_text:\n with open(file_path, 'w') as f:\n f.write(operator_type + \":\\n\" + str(operator))\n else:\n tm = operator.terms\n with open(file_path, 'wb') as f:\n marshal.dump(\n (operator_type, dict(zip(tm.keys(), map(complex,\n tm.values())))), f)", "def save(self):\n #--Data file exists?\n filePath = self.path\n if os.path.exists(filePath):\n ins = open(filePath)\n outData = compat.uncpickle(ins)\n ins.close()\n #--Delete some data?\n for key in self.deleted:\n if key in outData:\n del outData[key]\n else:\n outData = {}\n #--Write touched data\n for key in self.changed:\n outData[key] = self.data[key]\n #--Pickle it\n tempPath = filePath+'.tmp'\n cPickle.dump(outData,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)", "def startEvaluationMode(self):\n self.saved_dat_ref = self.data_ref", "def save_state(self):\n pass", "def save(self):\n defn_dir = path.dirname(self.definition_filename)\n\n if not path.isdir(defn_dir):\n os.makedirs(defn_dir)\n\n # Force check of stopsignal\n self.stopsignal\n\n with open(self.definition_filename, 'w') as df:\n yaml.safe_dump(self.raw_data, df, default_flow_style=False)", "def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump({'wi': self.W_input_to_hidden, 'wo': self.W_hidden_to_output}, f)", "def save_dataset(self):\n if os.path.exists(self.output_path):\n print('Directory already exists. EXITING.')\n sys.exit()\n if not os.path.exists(self.output_path): os.mkdir(self.output_path)\n for window_num, cur_window in enumerate(self.time_windows):\n window_dir = (os.path.join(self.output_path, ('window %s' % str(window_num + 1))))\n if not os.path.exists(window_dir): os.mkdir(window_dir)\n for filepath in cur_window:\n topic = os.path.basename(os.path.dirname(filepath))\n topic_dir = os.path.join(os.path.join(window_dir, topic))\n if not os.path.exists(topic_dir): os.mkdir(topic_dir)\n copy(filepath, topic_dir)\n self.ground_truth.append((len(os.listdir(window_dir))))", "def save_object(self, filename, data):\n with open(filename, 'wb') as outp: # Overwrites any existing file.\n pickle.dump(data, outp, pickle.HIGHEST_PROTOCOL)", "def save (self):\n pass", "def save_end(self):\n data = self.savedata\n self.savedata = None\n return data", "def save_to_checkpoint(self, chkpt):\n chkpt[self.name] = self.state_dict()", "def save(self, checkpoint_path: str):\r\n raise NotImplementedError", "def save(self,\n path,\n save_model=False):\n if save_model:\n self.model.save(path)\n\n h5dict = H5Dict(path)\n self._update_hdf5(h5dict, self.generator_train.command_dict, 'train')\n \n try:\n self._update_hdf5(h5dict, self.generator_val.command_dict, 'val')\n except AttributeError:\n pass", "def save_to_disk(self, file_name = \"vehicle_classifier.pkl\"):\n self.classifier.save_to_disk(file_name)", "def save(self, exp_name=None, exp_id=None, path='ap_output', display=True):\n\n # Create output directory if it doesn't exist\n if path not in listdir():\n makedirs(path)\n\n # Set exp_name\n if exp_name is None:\n if 'log' in self and 'name' in self.log:\n exp_name = self.log['name']\n else:\n exp_name = 'Unnamed'\n\n exp_name = exp_name.replace(\" \", \"_\")\n\n # Set exp_id\n if exp_id is None:\n exp_id = _last_exp_id(exp_name, path) + 1\n\n # Create new directory for output\n path = f'{path}/{exp_name}_{exp_id}'\n makedirs(path)\n\n # Save experiment data\n for key, output in self.items():\n\n if isinstance(output, pd.DataFrame):\n output.to_csv(f'{path}/{key}.csv')\n\n elif isinstance(output, DataDict):\n for k, o in output.items():\n\n if isinstance(o, pd.DataFrame):\n o.to_csv(f'{path}/{key}_{k}.csv')\n elif isinstance(o, dict):\n with open(f'{path}/{key}_{k}.json', 'w') as fp:\n json.dump(o, fp, cls=NpEncoder)\n\n else: # Use JSON for other object types\n try:\n with open(f'{path}/{key}.json', 'w') as fp:\n json.dump(output, fp, cls=NpEncoder)\n except TypeError as e:\n print(f\"Warning: Object '{key}' could not be saved. \"\n f\"(Reason: {e})\")\n os.remove(f'{path}/{key}.json')\n\n # TODO Support grids & graphs\n # elif t == nx.Graph:\n # nx.write_graphml(output, f'{path}/{key}.graphml')\n\n if display:\n print(f\"Data saved to {path}\")", "def save_bgn(self):\n self.savedata = ''", "def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def save_result(self):\n np.save(os.path.join(self.outpath, self.image_name + '_run.npy'), {\n 'device' : u.get_gpu_name(),\n 'elapsed': u.sec2time(self.elapsed),\n 'outpath': self.outpath,\n 'history': self.history,\n 'mask' : self.mask,\n 'image' : self.img,\n 'output' : self.out_best,\n 'noise' : self.input_list,\n })\n \n # save the model\n if self.args.savemodel:\n torch.save(self.net.state_dict(),\n os.path.join(self.outpath, self.image_name + '_model.pth'))", "def saveauto(self):\n self.inp.getedge()\n ss=ss=strftime(\"_%Y-%m-%d_%H:%M:%S\", gmtime())\n fn=os.environ['VMEWORKDIR'] +\"/WORK/phases/\"+self.name+ss+self.inp.edge+\"_\"+self.inp.inpnum+\"_\"+self.inp.ctpnum+\".ps\"\n rc=self.c1.postscript(file=fn)\n if rc is not '':\n MywError(errmsg=\"File \"+fn+\" cannot be created.\")\n print \"rc=\",rc,len(rc)\n else:\n print \"File \",fn, \" saved.\"", "def save(self):\n return self.save_as(self.filename)", "def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)", "def saveData(self):\n\n\n path = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', os.getcwd())\n\n if path[0] != '':\n\n filepath, filename = os.path.split(path[0])\n\n if os.path.exists(filepath):\n\n self.getCurrentPanda().saveData(path[0])" ]
[ "0.6351816", "0.6287839", "0.6247706", "0.6224278", "0.6199671", "0.6142667", "0.61253536", "0.61243397", "0.61077684", "0.6106945", "0.6079188", "0.6070314", "0.6067908", "0.6066707", "0.60651135", "0.6057868", "0.6051476", "0.6047747", "0.6005017", "0.59850013", "0.5973669", "0.5962021", "0.59389085", "0.5934998", "0.5927165", "0.5910311", "0.5902203", "0.59001696", "0.58610284", "0.58420324", "0.58314025", "0.58229405", "0.58187675", "0.5818691", "0.5809296", "0.5808011", "0.58060664", "0.58024657", "0.58009493", "0.5799466", "0.5799466", "0.57939726", "0.5786149", "0.5781746", "0.57811314", "0.5766387", "0.5758609", "0.5750487", "0.5749695", "0.5730985", "0.57288593", "0.5726805", "0.5722943", "0.57185644", "0.5708688", "0.57081485", "0.56816775", "0.56785715", "0.5675151", "0.5673247", "0.5664386", "0.5662737", "0.56574696", "0.56555486", "0.56549275", "0.56547415", "0.5653248", "0.56479865", "0.56464446", "0.56407046", "0.56334406", "0.5629012", "0.56246454", "0.562448", "0.5622345", "0.5614727", "0.56022125", "0.55953944", "0.5591524", "0.55828863", "0.5581455", "0.5579565", "0.55792165", "0.55782264", "0.55766386", "0.55744797", "0.55709666", "0.5570557", "0.55681443", "0.55646807", "0.55641675", "0.5562418", "0.55567753", "0.5555978", "0.55551016", "0.5551702", "0.55499434", "0.55497247", "0.5548447", "0.55481964", "0.5538424" ]
0.0
-1
Reset environment state, set new random seeds, reset metrics, update episode counter etc.
def reset(self, gui=False, test_ind=-1): # self.gui = gui # if gui: # # save episode to disk # if self._global_frames: # make_video_from_rgb_imgs(self._global_frames, self.output_path, f"episode_global_{self.cur_episode}") # for agent_id, frames in self._agent_frames.items(): # if frames: # make_video_from_rgb_imgs(frames, self.output_path, f"episode_{self.cur_episode}_{agent_id}") # # clear frames of previous episode # self._global_frames = [] # self._agent_frames = {agent_id: [] for agent_id in self.agent_tags} if (self.train_mode): seed = self.seed elif (test_ind < 0): seed = self.seed-1 else: seed = self.test_seeds[test_ind] np.random.seed(seed) self.seed += 1 self.cur_episode += 1 self.t = 0 # step counter for each episode self.rewards = [0] # to keep track of global rewards obs = self.env.reset(done_only=False).cpu().numpy() # if self.gui: # self._global_frames.append(self.env.map_to_colors().astype(np.uint8)) # for agent_id, agent_obs in obs.items(): # self._agent_frames[agent_id].append(agent_obs.astype(np.uint8)) # obs = list(obs.values()) obs = self._get_state(obs) # new return obs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reset_seeds(self) -> None:\n self._seeds = [None for _ in range(self.num_envs)]", "def _hard_reset(self):\n self._reset_specific_envs(np.ones_like(self.episodes_done))\n self._update_other_info()", "def _soft_reset(self):\n self._reset_specific_envs(self.episodes_done)\n self._update_other_info()", "def resetEnv(self):\n obs = self.env.reset()\n self.state = torch.tensor(obs, device=self.device, dtype=torch.float).unsqueeze(0)\n return", "def set_global_seeds(seed):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)", "def env_init(self, env_info={}):\n self.dealer_sticks = env_info['dealer_sticks']\n self.random = np.random.RandomState(env_info['seed'])\n self.current_state = None", "def _reset(self, env_id: np.ndarray) -> None:", "def reset(self, **kwargs):\n if self._backend_agent:\n self._backend_agent._on_gym_reset_begin(self, **kwargs)\n\n result = self.env.reset(**kwargs)\n if self.steps_done_in_episode > 0 and not self.is_episode_done:\n self.episodes_done += 1\n self.total.episodes_done_inc()\n self.is_episode_done = False\n self.steps_done_in_episode = 0\n\n if self._backend_agent:\n self._backend_agent._on_gym_reset_end(self, result, **kwargs)\n return result", "def reset(self):\n \n if self._config.fix_seed:\n self._init_seed = (self._init_seed + 1) % 2**32 # set_seed requires int\n self.game.set_seed(self._init_seed)\n\n super(ShootEnv, self).reset()\n\n self._killcount = 0.0\n self._ammo = self.game.get_game_variable(GameVariable.AMMO2)\n self._health = self.game.get_game_variable(GameVariable.HEALTH)\n\n return self._get_observation()", "def set_all_random_seeds(self, random_seed):\n np.random.seed(random_seed)\n tf.random.set_seed(random_seed)", "def reset(self) -> None:\n self._rng = random.default_rng(self.seed)", "def reset(self):\n\n # initialize gym env variables\n self.finish = False\n self.curr_step = -1\n self.curr_episode += 1\n\n # initialize target position\n self.target = np.random.uniform(-10.0,10.0,size=(2))\n\n # initialize sheep positions\n if self.fixed_reset:\n init_sheep_pose = np.array([75.0, 75.0])\n self.sheep_poses = (np.random.uniform(-50.0, 50.0, \n size=(self.num_sheep,2))) + init_sheep_pose[None,:]\n else:\n init_sheep_pose = np.random.uniform(-self.init_sheep_root, \n self.init_sheep_root, size=(2))\n self.sheep_poses = (np.random.uniform(-self.init_sheep_range, \n self.init_sheep_range, size=(self.num_sheep,2))) \\\n + init_sheep_pose[None,:]\n self.sheep_com = self.sheep_poses.mean(axis=0)\n\n # get the farthest sheep and radius of the sheep\n dist_to_com = np.linalg.norm((self.sheep_poses - self.sheep_com[None,:]), axis=1)\n self.farthest_sheep = self.sheep_poses[np.argmax(dist_to_com),:]\n self.radius_sheep = np.array([np.max(dist_to_com)])\n\n # update distance to target\n self.target_distance = np.linalg.norm(self.target - self.sheep_com)\n\n # initialize values for reward estimation\n self.init_radius_sheep = self.radius_sheep\n self.init_target_distance = self.target_distance\n\n # initialize dog position\n if self.fixed_reset:\n init_dog_pose = np.array([0.0,75.0])\n else:\n init_theta = np.random.uniform(-np.pi,np.pi)\n init_dog_pose = init_sheep_pose + self.init_dog_distance*np.array([np.cos(init_theta), \n np.sin(init_theta)])\n self.dog_pose = init_dog_pose\n\n # initialize inertia\n self.inertia = np.ones((self.num_sheep, 2))\n\n # initialize episode reward and length\n self.episode_reward = 0\n self.episode_length = 0\n\n # get the state, reward, finish, info\n state = self._get_state()\n \n return state", "def seed_random():\n random.seed(0)", "def rngreset(seed=0):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True", "def reset(\n self,\n *,\n seed: int | None = None,\n options: dict[str, Any] | None = None,\n ) -> tuple[np.ndarray, AtariEnvStepMetadata]:\n super().reset(seed=seed, options=options)\n del options\n # Gymnasium's new seeding API seeds on reset.\n # This will cause the console to be recreated\n # and loose all previous state, e.g., statistics, etc.\n seeded_with = None\n if seed is not None:\n seeded_with = self.seed(seed)\n\n self.ale.reset_game()\n obs = self._get_obs()\n\n info = self._get_info()\n if seeded_with is not None:\n info[\"seeds\"] = seeded_with\n return obs, info", "def test_env_reset_and_step(self):\n create_env = CreateEnv()\n env = create_env.env\n\n # Assert that the total number of agents matches the sum of the 'n_agents'\n # configuration and the number of planners (1 in this case)\n num_planners = 1\n self.assertEqual(\n len(env.all_agents), create_env.env_config[\"n_agents\"] + num_planners\n )\n\n # Assert that the number of agents created in the world\n # matches the configuration specification\n self.assertEqual(len(env.world.agents), create_env.env_config[\"n_agents\"])\n\n # Assert that the planner's index in the world is 'p'\n self.assertEqual(env.world.planner.idx, \"p\")\n\n obs = env.reset()\n\n # Test whether the observation dictionary keys are created as expected\n self.assertEqual(\n sorted(list(obs.keys())),\n [str(i) for i in range(create_env.env_config[\"n_agents\"])] + [\"p\"],\n )\n\n obs, reward, done, info = env.step({})\n\n # Check that the observation, reward and info keys match\n self.assertEqual(obs.keys(), reward.keys())\n self.assertEqual(obs.keys(), info.keys())\n\n # Assert that __all__ is in done\n assert \"__all__\" in done", "def _reset(self):\n np.random.shuffle(self.id)\n self.episode_step = 0 # Reset episode step counter at the end of every episode\n self._state = self.X_train[self.id[self.episode_step]]\n self._episode_ended = False\n\n return ts.restart(self._state)", "def run(self, seed=None):\n if seed is not None:\n random_seed.set_seed(seed)\n self.reset()", "def __init__(self, env, random_seed=None):\n self.env = env \n self.RandomState = np.random.RandomState(random_seed)", "def reset(self):\n # Sample random state from initial state distribution\n self._cur_state = self._sample_state(self._mdp.I)\n self._prev_state = self._cur_state", "def _set_seed(self) -> None:\r\n random.seed(self.seed)\r\n np.random.seed(self.seed)", "def set_all_seeds(seed):\n os.environ['PYTHONHASHSEED'] = str(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "def seed_all(seed):\n\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)", "def seed(self, seed):\n\n random.seed(seed)\n np.random.seed(seed)", "def set_seeds(seed, env=None):\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed)\n random.seed(seed)\n if env is not None:\n env.seed(seed)", "def reset(self, env):\n self._env = env\n return", "def seed(self, seed=None):\n raise self.gym.seed(seed)", "def reset(ctx, with_testdb):\n ctx.invoke(init, with_testdb=with_testdb)\n ctx.invoke(seed)\n\n return None", "def reset(ctx, with_testdb):\n ctx.invoke(init, with_testdb=with_testdb)\n ctx.invoke(seed)\n\n return None", "def simulate(self, environment, seed=0):\n # set the seeds\n np.random.seed(seed)\n environment.seed(seed)\n\n # simulate\n self._train_simulate(environment)", "def reset(self):\n self.agents.reset()\n self._cur_obs, self._cur_lm = self.parallel_env.reset()\n self.agent_cum_rewards = np.zeros((len(self.agents), self.n_states, 1))\n self.agent_contiguous_states = np.full((len(self.agents), self.n_states), True)", "def reset(self):\n self.success = False\n self.i = 0\n if self.monitor:\n self.env = gym.wrappers.Monitor(self.env, \"./mountaincar-monitor\", force=True)\n state = self.env.reset()\n state = self.preprocess_state(state)\n state = np.concatenate([state] * self.action_repeat)\n return state", "def seed():", "def seed(self, seed=None):\r\n if seed is None:\r\n seed = self.default_instance_seed\r\n\r\n seedgen = numpy.random.RandomState(seed)\r\n for old_r, new_r in self.state_updates:\r\n old_r_seed = seedgen.randint(2 ** 30)\r\n old_r.set_value(numpy.random.RandomState(int(old_r_seed)),\r\n borrow=True)", "def _reset(self):\r\n \r\n airgym.reset()\r\n self.stepN = 0\r\n self.episodeN += 1\r\n \r\n self.allLogs = { 'reward': [0] }\r\n self.allLogs['distance'] = [221]\r\n self.allLogs['action'] = [1]\r\n \r\n print(\"\")\r\n \r\n #self.sensors = airgym.getSensorStates()\r\n \r\n # Initial state\r\n self.state = airgym.getScreenDepthVis()\r\n \r\n \r\n return self.state", "def set_seed():\n np.random.seed(1423)", "def reset(self):\n\n self.curr_episode += 1\n self.curr_step = 0\n\n self.action_episode_memory.append([])\n self.rewards.append([])\n\n self.is_finalized = False\n init_state, init_reward = self._take_action(5 * np.random.randn(self.act_dimension))\n self.initial_conditions.append(init_state)\n return init_state", "def set_seed(self, seed: int):\n self.rsimulator.set_seed(seed)\n # Maybe call new game here?", "def reset(self, **kwargs):\n\n # on a reset we set the health back to 120\n self.player_hp = 120\n self.enemy_hp = 120\n\n # reset the environment\n \n observation = self.env.reset(**kwargs)\n\n # we restarted inc the number\n self.num_resets += 1\n\n # the observation\n obs = self.observation(observation)\n self.current_frame_number = 0\n \n # fill up the queue\n for i in range(4):\n self.q.append(obs)\n \n return np.array(list(self.q))", "def reset_env(\n self, key: chex.PRNGKey, params: EnvParams\n ) -> Tuple[chex.Array, EnvState]:\n # Always start with no stock\n # # By defauly, we start on a random weekday\n # Otherwise, with fixed burn-in, would always\n # count return from same weekday\n weekday = jax.lax.cond(\n params.initial_weekday == -1,\n lambda _: jax.random.randint(key, (), 0, 7, dtype=jnp_int),\n lambda _: params.initial_weekday.astype(jnp_int),\n None,\n )\n\n state = EnvState(\n weekday=weekday,\n stock=jnp.zeros(self.max_useful_life - 1, dtype=jnp_int),\n step=0,\n )\n return self.get_obs(state), state", "def set_random_seed():\n np.random.seed(42)", "def reinitialize(self, random_state):\n pass", "def seed(self, seed=None):\r\n if seed is None:\r\n seed = self.default_seed\r\n #backport\r\n #seed = self.default_seed if seed is None else seed\r\n seedgen = numpy.random.RandomState(seed)\r\n for old_r, new_r in self.random_streams.random_state_variables:\r\n old_r_seed = seedgen.randint(2 ** 30)\r\n old_r_container = self.memo[old_r].value\r\n if old_r_container.value is None:\r\n #the cast to int here makes it work on 32bit machines,\r\n #not sure why\r\n old_r_container.value = numpy.random.RandomState(\r\n int(old_r_seed))\r\n else:\r\n #the cast to int here makes it work on 32bit machines,\r\n #not sure why\r\n old_r_container.value.seed(int(old_r_seed))", "def setUp(self) -> None:\n self.random = np.random.RandomState(seed=42)", "def random():\n np.random.seed(0)", "def reset_env(\n self, key: chex.PRNGKey, params: EnvParams\n ) -> Tuple[chex.Array, EnvState]:\n # Always start with no stock\n # # By defauly, we start on a random weekday\n # Otherwise, with fixed burn-in, would always\n # count return from same weekday\n weekday = jax.lax.cond(\n params.initial_weekday == -1,\n lambda _: jax.random.randint(key, (), 0, 7, dtype=jnp_int),\n lambda _: params.initial_weekday.astype(jnp_int),\n None,\n )\n\n state = EnvState(\n weekday=weekday,\n stock=self.initial_stock,\n step=0,\n )\n return self.get_obs(state), state", "def reset_from_state(self, state):\n\n # initialize gym env variables\n self.finish = False\n self.curr_step = -1\n self.curr_episode += 1\n\n # initialize target position\n self.target = state[4:6]\n\n # initialize sheep com\n self.sheep_com = state[0:2]\n\n # get the farthest sheep and radius of the sheep\n self.farthest_sheep = state[2:4]\n self.radius_sheep = np.array([state[8]])\n\n # update distance to target\n self.target_distance = np.array([state[9]])\n\n # initialize sheep position\n self.sheep_poses = (np.random.uniform(-0.75*self.radius_sheep, \n 0.75*self.radius_sheep, size=(self.num_sheep,2))) \\\n + self.sheep_com[None,:]\n rnd_ind = np.random.choice(self.num_sheep)\n self.sheep_poses[rnd_ind,:] = state[2:4]\n\n # initialize values for reward estimation\n self.init_radius_sheep = self.radius_sheep\n self.init_target_distance = self.target_distance\n\n # initialize dog position\n init_dog_pose = state[6:8]\n self.dog_pose = init_dog_pose\n\n # initialize inertia\n self.inertia = np.ones((self.num_sheep, 2))\n\n # initialize episode reward and length\n self.episode_reward = 0\n self.episode_length = 0\n\n # get the state, reward, finish, info\n state = self._get_state()\n \n return state", "def reset(self) -> None:\n self.is_run = False\n self.env_step = 0\n if self.resume_from_log:\n self.start_epoch, self.env_step, self.gradient_step = \\\n self.logger.restore_data()\n\n self.last_rew, self.last_len = 0.0, 0\n self.start_time = time.time()\n if self.train_collector is not None:\n self.train_collector.reset_stat()\n\n if self.train_collector.policy != self.policy:\n self.test_in_train = False\n elif self.test_collector is None:\n self.test_in_train = False\n\n if self.test_collector is not None:\n assert self.episode_per_test is not None\n assert not isinstance(self.test_collector, AsyncCollector) # Issue 700\n self.test_collector.reset_stat()\n test_result = test_episode(\n self.policy, self.test_collector, self.test_fn, self.start_epoch,\n self.episode_per_test, self.logger, self.env_step, self.reward_metric\n )\n self.best_epoch = self.start_epoch\n self.best_reward, self.best_reward_std = \\\n test_result[\"rew\"], test_result[\"rew_std\"]\n if self.save_best_fn:\n self.save_best_fn(self.policy)\n\n self.epoch = self.start_epoch\n self.stop_fn_flag = False\n self.iter_num = 0", "def set_rng_seed(seed: Optional[int]) -> None:\n if seed is not None:\n os.environ[\"PYTHONHASHSEED\"] = str(0)\n import random\n random.seed(seed)\n import numpy as np\n np.random.seed(seed)\n from tensorflow.compat.v1 import set_random_seed\n set_random_seed(seed)\n print(f\"Running with seed: {seed}\")", "def seed_all(seed):\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)", "def seed_all(seed):\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)", "def reset(self):\n \n self.env.reset()\n obs, _, done, _ = self.env.step(1)\n if done: \n self.env.reset()\n obs, _, done, _ = self.env.step(2)\n if done: \n self.env.reset()\n \n return obs", "def reset():\n Vessel.reset_instances()", "def seed_everything(seed):\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "def set_all_random_seeds(random_seed: int, verbose: bool = True):\n random.seed(random_seed)\n np.random.seed(random_seed)\n torch.manual_seed(random_seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(random_seed)\n if verbose:\n logging.info(\"Setting random seed to: %d\", random_seed)", "def _reset(self): # We are using a virtual function defined in the gym infrastructure.\n self.gazebo.unpauseSim()\n \"\"\"\n why we need to unpauseSim because resetting controllers and for checking the sensors, we need the simulation\n to be running because otherwise we don't have any sensory data and we don't have access to the controller reset\n functions services they won't work and tell you to hit play. => it is very important.\n \"\"\"\n self.controllers_object.reset_controllers()\n self.check_all_sensors_ready()\n self.set_init_pose()\n #initialized robot\n self.gazebo.pauseSim()\n self.gazebo.resetSim()\n self.gazebo.unpauseSim()\n self.controllers_object.reset_controllers()\n self.check_all_sensors_ready()\n self.gazebo.pauseSim()\n self.init_env_variables()\n obs = self._get_obs()\n simplified_obs = self.convert_obs_to_state(obs)\n\n return simplified_obs", "def set_global(self):\n random.setstate(self.py)\n np.random.set_state(self.np)\n torch.set_rng_state(self.torch)", "def test_model_reset_correctly(tmpdir):\n tutils.reset_seed()\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n )\n\n before_state_dict = deepcopy(model.state_dict())\n\n trainer.tuner.scale_batch_size(model, max_trials=5)\n\n after_state_dict = model.state_dict()\n\n for key in before_state_dict.keys():\n assert torch.all(torch.eq(before_state_dict[key], after_state_dict[key])), \\\n 'Model was not reset correctly after scaling batch size'", "def reset(self):\n self.tracker.reset()\n self.episode += 1\n self.episode_step = 0", "def seed_everything(seed=0):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n torch.backends.cudnn.deterministic = cudnn_deterministic", "def set_seed(seed=42):\n np.random.seed(seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)", "def update_random_state(self):\n self.random_state = RandomState()", "def fix_seeds(\n seed=90,\n set_system=True,\n set_torch=True,\n set_torch_cudnn=True):\n # set system seed\n if set_system:\n np.random.seed(seed)\n random.seed(seed)\n\n # set torch seed\n if set_torch:\n torch.manual_seed(seed)\n\n # set torch cudnn backend\n if set_torch_cudnn:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False", "def random_seed(seed):\n state = RandomState()\n random.seed(seed) # alter state\n np.random.seed(seed)\n torch.manual_seed(seed)\n yield\n state.set_global()", "def specific_reset(self) -> None:\n self.agent.specific_reset() # reset joints\n new_pos = self.agent.init_xyz\n new_pos[:2] = np.random.uniform(-0.01, 0.01, 2)\n self.agent.set_position(new_pos)\n self.old_potential = self.calculate_task_potential()", "def reset(self):\n self._reset_next_step = False\n self.step_count = 0\n \n self._state = self.state_initializer()\n self._meta_state = self._meta_state_initializer()\n self.task.reset(self._state, self._meta_state)\n self.physics.reset(self._state)\n self.action_space.reset(self._state)\n for rule in self.game_rules:\n rule.reset(self._state, self._meta_state)\n rule.step(self._state, self._meta_state)\n \n return dm_env.restart(self.observation())", "def reset(self, **kwargs):\n self.env.reset(**kwargs)\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs", "def reset(self, **kwargs):\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs", "def set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n # Maybe different op seeds(for dropout) for different procs is better. By:\n # `paddle.seed(args.seed + paddle.distributed.get_rank())`\n paddle.seed(args.seed)", "def reset(self, **kwargs):\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs", "def reseed(self, seed: Optional[Seed]) -> None:\n if self.random is random:\n self.random = Random()\n\n self.seed = seed\n self.random.seed(self.seed)", "def reset(self, seed = None):\n if seed is None:\n self.seed_mask = hashlittle(str(datetime.now()))\n else:\n self.seed_mask = seed\n self._memomask.clear()", "def set_random_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n logging.info(f\"Set simulation random seed to: {seed}\")", "def reset_env(self):\n return self.env.reset()", "def reset_states(self):\n self.mean_makespan_baseline.assign(0)\n self.mean_makespan_train.assign(0)\n self.step.assign(0)", "def set_seed(self):\n self.set_scikit_learn_seed()\n self.set_torch_seed()\n self.set_python_random_seed()", "def set_random_seeds(seed: int):\n random.seed(seed)\n np.random.seed(seed)\n torch.random.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n logging.debug(\"SystemLog: Set random seed {}\".format(seed))", "def reset(self):\n seed = copy.deepcopy(self.rng_seed)\n self.rng = check_random_state(seed)\n self.num_generated = 0\n return self", "def set_states(self, states):\n if states is None:\n logging.getLogger('eval').warning(\n 'could not reproduce state, setting unreproducable random seed for all random states')\n self.randomstate.seed(np.random.randint(0, 1000000))\n if hasattr(self, 'random_mask_state'):\n self.random_mask_state.seed(np.random.randint(0, 100000))\n if hasattr(self, 'deformrandomstate'):\n self.deformrandomstate.seed(np.random.randint(0, 100000))\n else:\n if hasattr(self, 'random_mask_state') and 'random_mask_state' in states:\n self.random_mask_state.set_state(states['random_mask_state'])\n if hasattr(self, 'deformrandomstate') and 'deformrandomstate' in states:\n self.deformrandomstate.set_state(states['deformrandomstate'])\n self.randomstate.set_state(states['randomstate'])", "def reset(\n self,\n seed: int | None = None,\n options: dict | None = None,\n ) -> None:\n raise NotImplementedError", "def reset(self):\n if self.reset_tracker >= self.reset_interval:\n instance = self.sampling_function()\n self.env.use_next_instance(instance=instance)\n return self.env.reset()", "def reset(self):\n # Reset time counter\n self.t = 0\n\n # Reset randomization\n self.randomization.reset()\n\n # Randomize parameters.\n self.parameters = self.randomization.parameter_randomizer.randomize(\n self.parameters, self._random_state\n )\n\n self._reset()\n\n # Randomize simulation. Because sim is recreated in self._reset(),\n # simulation_randomizer.randomize should be called after the _reset.\n self.randomization.simulation_randomizer.randomize(\n self.mujoco_simulation.mj_sim, self._random_state\n )\n\n # reset observer.\n self.observer.reset()\n\n # Reset multi goal tracker for a new episode.\n self.multi_goal_tracker.reset()\n\n # Reset state of goal generation.\n return self.reset_goal_generation(sync_type=SyncType.RESET)", "def reset(self, **kwargs):\n return self.env.reset(**kwargs)", "def reset(self):\n self.tracker.reset()\n self.total_max_q = 0.0\n self.episode_step = 0\n self.episode += 1", "def reset(self, blocking=True):\n ret = super(ReacherEnv, self).reset(blocking=blocking)\n self._episode_steps = 0\n return ret", "def seed_all_rng(seed=None):\n if seed is None:\n seed = (\n os.getpid()\n + int(datetime.now().strftime(\"%S%f\"))\n + int.from_bytes(os.urandom(2), \"big\")\n )\n logger.info(\"Using a generated random seed {}\".format(seed))\n np.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)", "def reset(self):\n self.epochs = 0\n # Shuffle the training data\n perm = np.arange(self.num_train)\n np.random.shuffle(perm)\n assert self.num_train == self.train_images.shape[\n 0], 'Error incorrect shuffling mask'\n self.train_images = self.train_images[perm]\n self.train_labels = self.train_labels[perm]\n self.curr_train_index = 0", "def reset():\n teardown_db()\n build()", "def reset(self, rng):\n tp = self.task_params\n g = self.task.env.task.graph\n env = self.task.env\n task = self.task\n init_states, goal_states, dists, paths = [], [], [], []\n for i in range(tp.batch_size):\n s, e, path = g.sample_random_goal(rng, tp.min_dist, tp.max_dist)\n # Compute distance to goal from all nodes.\n dist = g.get_path_distance([e])\n # Compute atleast one path between the source and the goal (to sample\n # demonstrations from).\n \n init_states.append(s)\n goal_states.append(e)\n dists.append(dist)\n paths.append(path)\n \n task.init_states, task.goal_states, task.dists, task.paths = \\\n init_states, goal_states, dists, paths\n task.history_f = []\n _ = env.reset(rng, init_states=init_states, batch_size=tp.batch_size)\n return init_states", "def reset(self):\n obs = self.gym.reset()\n # self.step = 1\n agent_obs = self.featurize(obs[self.gym.training_agent])\n # self.observations_history = [agent_obs]\n return agent_obs", "def reproducible(seed: int = 0) -> None:\n\n os.environ[\"PYTHONHASHSEED\"] = \"0\"\n\n np.random.seed(seed)\n python_random.seed(seed)\n tf.random.set_seed(seed)", "def reset():\n _runtime.reset()", "def set_random_seed():\n random.seed(DBG_RAND_SEED)\n numpy.random.seed(DBG_RAND_SEED)", "def set_random_seed(seed):\n np.random.seed(seed)\n tf.set_random_seed(seed)", "def set_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)", "def reset(self):\n self.rand_start = int(np.random.rand()*25000)+self.offset\n state = np.array(np.zeros(self.obs))\n self.time = self.rand_start\n self.residual = 0\n self.cum_r = 0\n return state", "def reset_env(env, num_active_adv=0):\n if hasattr(env, 'domain_randomization'):\n env.domain_randomization = False\n if num_active_adv > 0:\n env.adversary_range = env.advs_per_strength * env.num_adv_strengths", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def set_random_seed(self, seed):\n np.random.seed(seed)\n return", "def set_random_seed(self, seed):\n np.random.seed(seed)\n return" ]
[ "0.76082146", "0.74018556", "0.71205187", "0.70204157", "0.6981054", "0.6923713", "0.6894734", "0.68613243", "0.68281037", "0.6792506", "0.67748046", "0.67734873", "0.6745905", "0.6744908", "0.67360026", "0.6713903", "0.6712834", "0.66821176", "0.6635531", "0.65819466", "0.654578", "0.65422875", "0.6533175", "0.6520783", "0.6514913", "0.6505726", "0.6481904", "0.6463939", "0.6463939", "0.6449578", "0.64488107", "0.64019877", "0.64003456", "0.6385877", "0.63786626", "0.6350632", "0.6345502", "0.63443846", "0.63304", "0.6330014", "0.63292", "0.6328502", "0.632413", "0.6304718", "0.63010263", "0.6294368", "0.62942266", "0.62935656", "0.6292121", "0.6290495", "0.6290495", "0.6288453", "0.6287303", "0.628381", "0.6259197", "0.6254924", "0.62319565", "0.6230076", "0.6228399", "0.6226308", "0.6222513", "0.6222119", "0.62198937", "0.6209788", "0.62083393", "0.620251", "0.6198124", "0.6192035", "0.6188033", "0.6187362", "0.61856925", "0.61854935", "0.6180417", "0.6177551", "0.6173095", "0.61726123", "0.6167882", "0.6167802", "0.6167703", "0.61615413", "0.61503077", "0.61441046", "0.6138972", "0.6138541", "0.6136984", "0.6133281", "0.61288786", "0.6123276", "0.6119216", "0.6114168", "0.61125714", "0.61064494", "0.6097338", "0.6088635", "0.6084536", "0.60776675", "0.6077464", "0.6073242", "0.60714644", "0.60714644" ]
0.61540705
80
not used in ssd.
def terminate(self): return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def use(self):", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__(self):\n\t\treturn", "def exo2():", "def __call__(self) -> None:", "def support(self):", "def sth():", "def cx():", "def __call__(self):\n pass", "def __call__(self):\n pass", "def CL(self):", "def access():", "def degibber(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def run(self):", "def __init__():", "def __call__( self ):\n pass", "def d(self):\n pass", "def d(self):\n pass", "def __call__(self):\n raise NotImplementedError", "def plugh():", "def __call__(object):", "def main(self):", "def _prepare(self):", "def _prepare(self):", "def call(self):", "def falcon():", "def task4_1(self):\n\n pass", "def init(self):", "def init(self):", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def task4(self):\n\n pass", "def _hook(self):", "def c(self):\n pass", "def c(self):\n pass", "def main(self):\r\n pass", "def RUN(self):", "def cmd(self):", "def think(self):\n pass", "def mechanism(self):", "def regular(self):", "def handle(self):", "def probe(self):", "def _init(self):", "def run(self): \r\n return", "def DM(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def process(self):", "def process(self):", "def process(self):", "def dummy(self):\n pass", "def script(self):", "def __upgrade(self):", "def basic(self):\n pass", "def primary(self):\n ...", "def tell(self):\n ...", "def g():", "def use(self):\n pass", "def prepare(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def main():\n\tpass", "def _regr_basic():" ]
[ "0.6666852", "0.6380146", "0.6380146", "0.6380146", "0.6380146", "0.6380146", "0.6362416", "0.6315402", "0.6302781", "0.62878007", "0.6131016", "0.6083334", "0.6069174", "0.6069174", "0.60661525", "0.60107166", "0.60062444", "0.59841406", "0.59841406", "0.59841406", "0.59841406", "0.59841406", "0.59841406", "0.59841406", "0.59841406", "0.59841406", "0.59841406", "0.59809554", "0.5979567", "0.5972745", "0.5972745", "0.59551513", "0.5954903", "0.5930121", "0.5927158", "0.5918231", "0.5918231", "0.59181476", "0.59114164", "0.59007955", "0.5893443", "0.5893443", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5866784", "0.5864316", "0.58405364", "0.58139336", "0.58139336", "0.5809306", "0.58091086", "0.5805407", "0.5802121", "0.57931846", "0.57921034", "0.5787757", "0.57712", "0.57707113", "0.5756248", "0.5752574", "0.5736138", "0.5736138", "0.5736138", "0.5736138", "0.5727361", "0.5727361", "0.5727361", "0.57232356", "0.57009465", "0.5675689", "0.5633624", "0.56315243", "0.56214637", "0.56044155", "0.5602367", "0.5595608", "0.5590271", "0.5590271", "0.5590271", "0.5590271", "0.5588987", "0.5576281" ]
0.0
-1
Sets agents fingerprints (policies); distributions over actions given the current state.
def update_fingerprint(self, fp): self.fp = fp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self):\n\t\tnumpy.random.shuffle(self.agents_list)\n\t\tfor agent in self.agents_list:\n\t\t\tagent.produce()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.charge()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.retribute()\n\t\tfor agent in self.agents_list:\n\t\t\tif agent.strategy == 0: \n\t\t\t\tagent.give()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.consume()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.solve_consumption_deficit()\n\t\tfor site in self.sites:\n\t\t\tsite.recovery()\n\t\tfor agent in self.agents_list:\n\t\t\tagent.sprout()", "def update_all_agent(self):\n for a in self.agents:\n soft_update(a.target_actor, a.actor, self.tau)\n soft_update(a.target_critic, a.critic, self.tau)\n self.num_iteration += 1", "def actions(self, agent_state):\n raise NotImplementedError(\"Don't know what actions are available\")", "def agent_init(self, agent_info):\n\n # First, we get the relevant information from agent_info \n # Note: we use np.random.RandomState(seed) to set the two different RNGs\n # for the planner and the rest of the code\n try:\n self.num_states = agent_info[\"num_states\"]\n self.num_actions = agent_info[\"num_actions\"]\n except:\n print(\"You need to pass both 'num_states' and 'num_actions' in agent_info to initialize the action-value table\")\n self.gamma = agent_info.get(\"discount\", 0.95)\n self.step_size = agent_info.get(\"step_size\", 0.1)\n self.epsilon = agent_info.get(\"epsilon\", 0.1)\n self.planning_steps = agent_info.get(\"planning_steps\", 10)\n self.kappa = agent_info.get(\"kappa\", 0.001)\n\n self.rand_generator = np.random.RandomState(agent_info.get('random_seed', 42))\n self.planning_rand_generator = np.random.RandomState(agent_info.get('planning_random_seed', 42))\n\n # Next, we initialize the attributes required by the agent, e.g., q_values, model, tau, etc.\n # The visitation-counts can be stored as a table as well, like the action values \n self.q_values = np.zeros((self.num_states, self.num_actions))\n self.tau = np.zeros((self.num_states, self.num_actions))\n self.actions = list(range(self.num_actions))\n self.past_action = -1\n self.past_state = -1\n self.model = {}", "def step(self):\n updating_env = {} if self.independent_update else self.env\n for a in self.agents:\n if self.i % a.period == 0:\n action = a(self.env)\n if a.name is not None:\n updating_env[a.name] = action\n if self.independent_update:\n self.env.update(updating_env)\n self.i += 1", "def agent_init(self, agent_info):\n\n # First, we get the relevant information from agent_info \n # NOTE: we use np.random.RandomState(seed) to set the two different RNGs\n # for the planner and the rest of the code\n try:\n self.num_states = agent_info[\"num_states\"]\n self.num_actions = agent_info[\"num_actions\"]\n except:\n print(\"You need to pass both 'num_states' and 'num_actions' in agent_info to initialize the action-value table\")\n self.gamma = agent_info.get(\"discount\", 0.95)\n self.step_size = agent_info.get(\"step_size\", 0.1)\n self.epsilon = agent_info.get(\"epsilon\", 0.1)\n self.planning_steps = agent_info.get(\"planning_steps\", 10)\n\n self.rand_generator = np.random.RandomState(agent_info.get('random_seed', 42))\n self.planning_rand_generator = np.random.RandomState(agent_info.get('planning_random_seed', 42))\n\n # Next, we initialize the attributes required by the agent, e.g., q_values, model, etc.\n # A simple way to implement the model is to have a dictionary of dictionaries, \n # mapping each state to a dictionary which maps actions to (reward, next state) tuples.\n self.q_values = np.zeros((self.num_states, self.num_actions))\n self.actions = list(range(self.num_actions))\n self.past_action = -1\n self.past_state = -1\n self.model = {} # model is a dictionary of dictionaries, which maps states to actions to \n # (reward, next_state) tuples", "def policy(agent):", "def actions(self, states, agent_indices):\n return NotImplementedError()", "def set_bus_actions_and_state(self, actions, joint_state):\n self.bus_list[-1].set_action(actions)\n self.bus_list[-1].set_state(joint_state)\n return", "def step(self, state, actions, rewards, next_state, dones):\n # Save experience in replay memory\n memory.add(state, actions, rewards, next_state, dones)\n\n for num_agent, agent in enumerate(self.agents):\n agent.step(num_agent)", "def execute(self):\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation\n # main loop\n while not self.environment.end_episode:\n # each agent choose its action\n self.environment.choose_action()\n # next state\n self.environment.calculate_next_state()\n # is the end of the episode\n self.environment.calculate_end_episode()\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation", "def _init_agents(self):\n self.agents = [Agent(e=0.1, a=0.1, row=self.row, col=self.col) for i in range(2)]", "def _update_distribution(self, trajectories):\n costs = trajectories[\"costs\"].copy()\n actions = trajectories[\"actions\"].copy()\n Q = cost_to_go(costs, self.gamma_seq)\n best_id = np.argmin(Q, axis = 0)[0]\n self.mean_action = (1.0 - self.step_size) * self.mean_action +\\\n self.step_size * actions[best_id]", "def step(self):\n if not self.is_done():\n actions = [ agent.program(self.percept(agent)) for agent in self.agents ]\n for agent, action in zip(self.agents, actions):\n self.execute_action(agent, action)\n\n self.exogenous_change()", "def take_one_step(self):\n\t\tfor i in range(len(self.agents)):\n\t\t\tself.agents[i].action(0)", "def actions(self, state):\n\t\traise NotImplementedError", "def initialize_distribution(states, actions):\n dist = {}\n\n for i in states:\n dist[i] = {}\n for j in actions:\n dist[i][j] = [0.0]\n\n return dist", "def step(self, actions, agent_id=0):\n self._last_state = self._current_state\n\n # TODO\n # action = actions.discrete_actions[0]-1\n action = actions.argmax()\n\n done = 0\n if self._stage == 0: # is fixation\n if action == 0:\n reward = 0.\n else:\n reward = -1.\n self._current_state = 1\n self._stage = 1\n elif self._stage == 1: # is first stage, use prob_transition\n if action == 1 or action == 2:\n if np.random.random() < self._prob_transition[0][action-1]:\n self._current_state = 2\n else:\n self._current_state = 3\n reward = 0.\n else: # pick a next state at random\n reward = -1.\n self._current_state = np.random.random() < 0.5 and 2 or 3\n self._stage = 2\n else: # is second stage, use prob_reward\n # Given an action (arm pulled), sample reward, return\n if action == 1 or action == 2:\n current_prob_rewards = self._prob_reward[self._current_state-2]\n self._best_reward = self._max_reward*np.max(current_prob_rewards)\n thisProb = current_prob_rewards[action-1]\n if np.random.random() < thisProb:\n # print(\"give reward\")\n reward = self._max_reward\n else:\n reward = 0.0\n else:\n reward = -1.\n\n self._total_reward += reward\n self._best_total_reward += self._best_reward\n self._stage = 0\n self._current_state = 0\n self._trial += 1\n self._since_flipped += 1\n # if more than self._min_stable trials since flipping, certain chance of flipping prob rews\n if (self._since_flipped >= self._min_stable) and (np.random.random() <= self._flip_prob):\n self._randomize()\n self._since_flipped = 0\n\n\n self._last_action = np.zeros(self._num_arms)\n self._last_action[action] = 1\n # conditions to end episode\n if self._step >= self._steps_per_ep-1:\n self._state = READY_TO_END_EPISODE\n done = 1\n\n self._step += 1\n self._prev_reward = reward\n\n obs = self._current_state\n reset = done == 1. or self._step == MAX_FRAMES\n\n # print(np.array([[obs]]).shape)\n\n # print(reward, self._stage)\n return np.array([obs]), reward, done, reset", "def agent_start(self,thisObs): \n action={'vol':0,'price':0}\n \n \"\"\"Changes for Boltzman Exploration\"\"\"\n #choice=self.pick_action_from_dist()\n #action_bin=self.prob_dist_action[choice]\n #action=self.unbin_action(action_bin,thisObs)\n \n \"\"\"Changes for epsilon greedy method\"\"\"\n action= self.return_random_action(thisObs)\n \n self.lastAction=action\n self.lastObs=thisObs\n return action", "def act(self):\n self.features = self.next_features\n self.choose_random = np.random.choice(2,p=(1-self.epsilon,self.epsilon)) # Chooses whether to explore or exploit with probability 1-self.epsilon\n # Selects the best action index in current state\n if self.choose_random:\n self.chosenA = np.random.choice(4)\n else:\n self.chosenA = self.argmaxQsa(self.features)\n # Records reward for printing and performs action\n self.action = self.idx2act[self.chosenA]\n # Execute the action and get the received reward signal\n self.reward = self.move(self.action)\n self.total_reward += self.reward\n # IMPORTANT NOTE:\n # 'action' must be one of the values in the actions set,\n # i.e. Action.LEFT, Action.RIGHT, Action.ACCELERATE or Action.BRAKE\n # Do not use plain integers between 0 - 3 as it will not work", "def actions(self, state):\n raise NotImplementedError # Override this!", "def update(self, signals):\n raise NotImplementedError('Agent is an abstract base class')", "def update_instigator_state(self, state: InstigatorState):", "def __setstate__(self, state: dict) -> None: # pragma: no cover\n self.__dict__.update(state)\n self.rFp = {}\n self.wFp = {}\n self.Fp = ChainMap(self.rFp, self.wFp)\n self.open(mode=self.mode)", "def step(self, action):\n self.steps += 1\n self.robots[0].setAction(action)\n for i in range(self.num_agents):\n if i != 0 and self.policies[i:i+1]: # self.policies[0] is dummy\n self.robots[i].setAction(self.policies[i](self.robots[i].getObservation()))\n # rewards = [ -1.0 * self.num_foods / self.max_steps for _ in range(self.num_agents) ] # so agent needs to eat foods quickly\n rewards = [ 0.0 for _ in range(self.num_agents) ]\n for i in range(self.BULLET_STEPS):\n p.stepSimulation()\n rewards = [ rewards[i]+self._getReward(self.robots[i]) for i in range(self.num_agents) ]\n self.episode_rewards = [ self.episode_rewards[i]+rewards[i] for i in range(self.num_agents) ]\n obs = self.robots[0].getObservation()\n done = self._isDone()\n info = { 'steps': self.steps }\n if done:\n # TODO\n info['episode'] = { 'r': self.episode_rewards[0], 'l': self.steps, 'r_all': self.episode_rewards }\n # print(self.episode_rewards, self.steps)\n return obs, rewards[0], done, info", "def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()", "def step(self):\n if not self.is_done():\n actions = []\n for agent in self.agents:\n if agent.alive:\n actions.append(agent.program(self.percept(agent)))\n else:\n actions.append(\"\")\n for (agent, action) in zip(self.agents, actions):\n self.execute_action(agent, action)\n self.exogenous_change()", "def initialize(self):\n self.candidate_disease_list = []\n self.candidate_symptom_list = []\n self.agent_action = {\n \"turn\":None,\n \"action\":None,\n \"request_slots\":{},\n \"inform_slots\":{},\n \"explicit_inform_slots\":{},\n \"implicit_inform_slots\":{},\n \"speaker\":\"agent\"\n }", "def execute(self, agent: Agent, state: SimState) -> None:\n pass", "def trigger(self, state, updated_vars):\n for evidence_var in state.get_evidence().get_variables():\n if evidence_var.startswith('R(') and evidence_var.endswith(')'):\n actual_action = Assignment.create_from_string(evidence_var[2:])\n actual_utility = state.get_evidence.get_value(evidence_var).get_double()\n\n if actual_action.get_variables() in self.previous_states.keys():\n previous_state = self.previous_states[actual_action.get_variables()]\n self.learn_from_feedback(previous_state, actual_action, actual_utility)\n\n state.clear_evidence(evidence_var)\n\n if len(state.get_action_node_ids()) != 0:\n try:\n self.previous_states[state.get_action_node_ids()] = copy(state)\n except Exception as e:\n self.log.warning(\"cannot copy state: \" + str(e))", "def action(self, gstate, actions=None):\n raise NotImplementedError", "def step(self, states, actions, rewards, next_states, dones):\n for a in range(self.agents_count):\n # save for each agent\n self.memory.add(states[a], actions[a], rewards[a], next_states[a], dones[a])\n\n # Learn, if enough samples are available in memory\n if len(self.memory) > self.batch_size:\n experiences = self.memory.sample()\n self.learn(experiences)", "def act(self, state: State) -> Distribution:\n return self._gen_behaviour(self._gen_policy_params(state))", "def simulate(params,n_states,n_trials,env = \"rich\", policy=\"softmax\",\\\n D=0.5, mod = \"constant\",thresh = 0, k=1,rnd_seeds = None, V0=0.0, full=False,\n rmag = 1, lmag = 0):\n\n\tdef calc_D(state):\n\t\t\"\"\"\n\t\tcalculates D for the current trial and returns\n\t\tthe updated state tracker for D and respective betas\n\n\t\tD represents dopamine levels (equivalent of rho in OpAL)\n\t\tScales between 0 and 1, with 1 high level of DA\n\t\t\"\"\"\n\t\tif t < thresh:\n\t\t\tstate.D_g[t] = 0.5\n\t\t\tstate.D_n[t] = 0.5\n\t\telse:\n\t\t\tif mod == \"constant\":\n\t\t\t\tstate.D_g[t] = D\n\t\t\t\tstate.D_n[t] = 1-D\n\t\t\tif mod == \"value\":\n\t\t\t\t# NOTE: if rmag and lmag is 1/0, can just use V\n\t\t\t\t# average of two actions\n\t\t\t\tV = np.mean(1/2*(state.QG[t,:] - state.QN[t,:])) # state average(?) \n\t\t\t\tV = 1/(1 + np.exp(-V*k)) # translate between 0 and 1\n\t\t\t\tstate.D_g[t] = V \n\t\t\t\tstate.D_n[t] = 1 - V\n\t\treturn state\n\n\n\tdef generate_state():\n\t\t\"\"\"\n\t\tGet appropriate reward probabilities and magnitudes\n\t\tfor the specified environment type\n\t\t\"\"\"\n\n\t\tprobs = calc_probs(env)\n\t\tn_options = len(probs)\n\n\t\t# feedback for agent\n\t\tr_mag = np.zeros(n_options) + rmag\n\t\tl_mag = np.zeros(n_options) + lmag\n\n\t\tnew_state = Bogacz(n_trials, n_options, probs, r_mag, l_mag, V0=V0)\n\t\treturn new_state\n\n\n\t# learning rate, damping, decay, softmax temp\n\talpha_a, epsilon, lbda, beta = params\n\tstates = []\n\n\t# do the thing\n\tfor s in np.arange(n_states):\n\n\t\t# check if random seed provided\n\t\tif rnd_seeds is not None:\n\t\t\trandom.seed(rnd_seeds[s])\n\t\t\tnp.random.seed(rnd_seeds[s])\n\n\t\tstate = generate_state()\n\t\tfor t in range(n_trials):\n\n\t\t\tstate.idx = t\n\t\t\tstate=calc_D(state)\t\t\t\t\t# get D\n\t\t\tstate.policy_softmax(beta)\n\t\t\tstate.act(alpha_a, epsilon, lbda)\t# update \n\n\t\t\tif full:\n\t\t\t\tstate.update_other_actions(alpha_a, epsilon, lbda)\n\n\t\tstates.append(state)\t\t\t\t\t# save sim\n\n\treturn states", "def __init__(self, robot, human_policy, initial_world_state,\n num_theta = 2, num_ingredients = 3, reward_set = [((0,2,1),0), ((1,1,2),1)],\n gamma = 0.95):\n self.robot = robot\n self.human_policy = human_policy\n self.num_ingredients = num_ingredients\n self.reward_set = reward_set\n self.gamma = gamma\n self.world_state = initial_world_state\n self.theta_set = list(range(num_theta))\n self.allStates = self.getAllStates()\n #self.allObservations = self.getAllObservations()", "def update(self, samples, agent_number):\n\n # need to transpose each element of the samples\n # to flip obs[parallel_agent][agent_number] to\n # obs[agent_number][parallel_agent]\n obs, obs_full, action, reward, next_obs, next_obs_full, done = samples\n \n agent = self.maddpg_agent[agent_number]\n agent.critic_optimizer.zero_grad()\n\n #critic loss = batch mean of (y- Q(s,a) from target network)^2\n #y = reward of this timestep + discount * Q(st+1,at+1) from target network\n target_actions = self.target_act(next_obs)\n \n with torch.no_grad():\n q_next = agent.target_critic(next_obs_full, target_actions.view(-1, 4))\n\n y = reward[:,agent_number].view(-1, 1) + self.discount_factor * q_next * (1 - done[:, agent_number].view(-1, 1))\n q = agent.critic(obs_full, action.view(-1, 4))\n\n huber_loss = torch.nn.SmoothL1Loss()\n critic_loss = huber_loss(q, y.detach())\n critic_loss.backward()\n torch.nn.utils.clip_grad_norm_(agent.critic.parameters(), 0.5)\n agent.critic_optimizer.step()\n\n #update actor network using policy gradient\n agent.actor_optimizer.zero_grad()\n # make input to agent\n # detach the other agents to save computation\n # saves some time for computing derivative\n \n agent_obs = obs[:, agent_number]\n agent_actions = agent.actor(agent_obs)\n q_input = action.clone()\n q_input[:, agent_number] = agent_actions\n\n # get the policy gradient\n actor_loss = -agent.critic(obs_full, q_input.view(-1, 4)).mean()\n actor_loss.backward()\n torch.nn.utils.clip_grad_norm_(agent.actor.parameters(),0.5)\n agent.actor_optimizer.step()\n\n al = actor_loss.cpu().detach().item()\n cl = critic_loss.cpu().detach().item()\n \n return al, cl", "def update(self):\n startstate = self.state\n goalstates =self.env.getGoalStates()\n inputs = self.env.sense(self)\n self.action_sequence = self.drive(goalstates,inputs)\n action = self.choose_action() # Choose an action\n self.state = self.env.act(self,action) \n return", "def onActionTaken(self, agent):\n\n pass", "def step(self, actions):\r\n # Run actions\r\n actions = [np.argmax((action_scores+.0001) * mask) for action_scores, mask in zip(actions, self.get_avail_actions())]\r\n reward, terminated, info = self.env.step(actions)\r\n\r\n # Get updated state\r\n self.state = self.env.get_state()\r\n\r\n # Return arrays for each agent\r\n reward_n = [reward / self.n for _ in range(self.n)]\r\n terminated_n = [terminated for _ in range(self.n)]\r\n info_n = [info for _ in range(self.n)]\r\n observation_n = self.env.get_obs()\r\n\r\n return observation_n, reward_n, terminated_n, info_n", "def update(self):\n\n # get states, actions, rewards and total timesteps from memory\n states, actions, R, T = self.memory.get()\n n_ep = len(R)\n\n # compute value estimates for the states\n v = self.critic(states)\n\n # compute advantages (using GAE) and rewards to go\n A, rtg = utils.gae_rtg((R, v, T), self.gam, self.lam)\n\n # store the initial version of both the policy and the log probs of the\n # actions for later comparison with the future versions (needed for PPO)\n policy_old = copy.deepcopy(self.policy)\n log_probs_old = policy_old(states).log_prob(actions)\n\n # sample from a batch of experiences\n # (\"_\" subscript indicates \"sampled from\")\n for (v_, A_, rtg_, log_probs_old_), i in utils.sample_batch((v, A, rtg, log_probs_old), self.batch_size, self.policy_updates):\n log_probs_ = self.policy(states).log_prob(actions)[i]\n\n # estimate ratio between the new log probs and the old ones\n r_ = torch.exp(log_probs_ - log_probs_old_)\n\n l_1 = r_ * A_\n l_2 = torch.clamp(r_, 1-self.eps, 1+self.eps) * A_\n\n # TODO: implement entropy\n # TODO: merge policy and critic\n\n # surragate loss function for PPO\n l_clip = -torch.mean(torch.min(l_1, l_2))\n\n # update the policy\n self.policy_optimizer.zero_grad()\n l_clip.backward(retain_graph=True)\n self.policy_optimizer.step()\n\n # sample a batch of value estimates and the corresponding rewards to go\n # to update the value function.\n for (v_, rtg_), _ in utils.sample_batch((v, rtg), self.batch_size, self.v_updates):\n # compute the loss\n critic_loss = F.mse_loss(v_, rtg_)\n\n # update the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward(retain_graph=True)\n self.critic_optimizer.step()\n\n # clear the memory. PPO is an On-Policy method so we don't need these\n # memories anymore\n self.memory.clear()\n\n # return the loss of the value function for display\n return F.mse_loss(v, rtg)", "def start(self, agents):\n self.current_state = self.startState()\n self.agents = agents\n for i,agent in enumerate(self.agents):\n agent.setPlayerId(i)\n\n return self.current_state", "def onActionChosen(self, agent, action):\n\n pass", "def _child_set_up(self):\n # Set up acquisition optimisation\n self._set_up_acq_opt()\n self.method_name = 'GP-' + str(self.options.acq)", "def agent_start(self, state):\n self.sum_rewards = 0\n self.episode_steps = 0\n self.last_state = np.array(state)\n self.last_action = self.policy(self.last_state)\n return self.last_action", "def factions(self, factions):\n\n self._factions = factions", "def set_agents(self, agents):\n if self.single_agent_mode:\n raise ValueError(\n \"Setting agent in single agent mode or human mode is not allowed.\"\n )\n\n self.agents = agents\n # If at least one agent needs raw data, we set self.allow_raw_data = True\n for agent in self.agents:\n if agent.use_raw:\n self.allow_raw_data = True\n break", "def __setstate__(self, state):\n for i, j in state.items():\n setattr(self, i, j)\n self.describer_model = _load_model(self.name)", "def __setstate__(self, state):\n for i, j in state.items():\n setattr(self, i, j)\n self.describer_model = _load_model(self.name)", "def actions(self, actions):\n\n self._actions = actions", "def actions(self, actions):\n\n self._actions = actions", "def take_actions(self, actions: MultiAgentDict):\n\n # 1. - 4.\n wage_increases, demand = self.parse_actions(actions)\n wages = {agent.agent_id: agent.wage * (1 + wage_increases[agent.agent_id]) for agent in self.agents.values()}\n self.clear_labor_market(wages)\n self.clear_goods_market(demand)\n\n # 5. - 7.\n self.clear_dividends(self.firm.profit)\n self.clear_capital_market()\n\n return wage_increases, demand", "def registerInitialState(self, gameState):\n\n ''' \n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py. \n '''\n CaptureAgent.registerInitialState(self, gameState)\n self.opponents = self.getOpponents(gameState)\n self.distributions = []\n self.legalPositions = [p for p in gameState.getWalls().asList(False) if p[1] > 1]\n print self.legalPositions\n\n #initializing beleif distribution of opponents\n for i in range(0, gameState.getNumAgents()):\n if i in self.opponents:\n beliefs = util.Counter()\n for p in self.legalPositions: beliefs[p] = 1.0\n beliefs.normalize()\n self.distributions.append(beliefs)\n else:\n self.distributions.append(None)\n\n\n ''' \n Your initialization code goes here, if you need any.\n '''", "def step(self, actions): # actions is a list,\n\n assert len(actions) == len(self.agents), \"Number of actions (\" + str(\n len(actions)) + \") does not match number of agents (\" + str(self.n_agents) + \")\"\n\n # Process movement based on real states (not belief)\n\n\n rewards = [0.] * self.n_agents\n\n reward = 0.\n\n\n nextcells = [None] * self.n_agents\n rand_nums = self.rng.uniform(size=self.n_agents)\n\n for i in range(self.n_agents):\n\n currcell = self.tocellcoord[self.agents[i].state]\n if isinstance(actions,int):\n act = actions\n else:\n act = actions[i]\n direction = self.directions[act]\n\n if rand_nums[i] > 1/3: # pick action as intended\n if self.occupancy[tuple(currcell + direction)] == 0:\n nextcells[i] = self.tocellnum[tuple(currcell+direction)]\n else:\n nextcells[i] = self.tocellnum[tuple(currcell)] # wall collision\n # rewards[i] += self.collision_penalty\n\n else: # pick random action, except one initially intended\n adj_cells = self.adjacent_to(currcell) # returns list of tuples\n adj_cells.remove(tuple(currcell+direction))\n\n index = self.rng.choice(range(len(adj_cells)))\n new_cell = adj_cells[i]\n\n if self.occupancy[new_cell] == 0:\n nextcells[i] = self.tocellnum[new_cell]\n else:\n nextcells[i] = self.tocellnum[tuple(currcell)] # wall collision\n # rewards[i] += self.collision_penalty\n\n\n # check for inter-agent collisions:\n collisions = [c for c, count in Counter(nextcells).items() if count > 1]\n while(len(collisions) != 0): # While loop needed to handle edge cases\n for i in range(len(nextcells)):\n if nextcells[i] in collisions:\n nextcells[i] = self.agents[i].state # agent collided with another, so no movement\n\n\n collisions = [c for c, count in Counter(nextcells).items() if count > 1]\n\n\n for i in range(self.n_agents):\n if nextcells[i] == self.agents[i].state: # A collision happened for this agent\n rewards[i] += self.collision_penalty\n else:\n s = nextcells[i] # movement is valid\n self.agents[i].state = s\n if s in self.goals and s not in self.discovered_goals:\n rewards[i] += self.goal_reward\n self.discovered_goals.append(s)\n #rewards[i] += broadcasts[i]*self.broadcast_penalty\n\n\n self.currstate = tuple(nextcells)\n\n\n\n reward = np.sum(rewards)\n\n self.step_count += 1\n\n\n # If all goals were discovered, end episode\n done = len(self.discovered_goals) == len(self.goals)\n\n \n return reward, self.currstate, done, None", "def agent_set(bus):\n # TODO\n pass", "def percept(self, agent):\n abstract", "def main():\n agent = Agent()\n env = init_env()\n for i in range(1000):\n agent.start()\n state, reward = env.reset()\n while not env.terminal:\n action = agent.step(state, reward)\n state, reward = env.update(action)\n agent.end(reward)", "def load_agents(self, agents):\n self.agents = agents", "def reset(self):\n self.agents.reset()\n self._cur_obs, self._cur_lm = self.parallel_env.reset()\n self.agent_cum_rewards = np.zeros((len(self.agents), self.n_states, 1))\n self.agent_contiguous_states = np.full((len(self.agents), self.n_states), True)", "def act(self, state):\n # Append the state to the short term memory (ie. History)\n self._history.append(state)\n\n # If policy requires agent to explore, sample random action\n if self._explorer.is_exploring(self._num_actions_taken):\n action = self._explorer(self.nb_actions)\n else:\n # Use the network to output the best action\n env_with_history = self._history.value\n q_values = self._action_value_net.eval(\n # Append batch axis with only one sample to evaluate\n env_with_history.reshape((1,) + env_with_history.shape)\n )\n\n self._episode_q_means.append(np.mean(q_values))\n self._episode_q_stddev.append(np.std(q_values))\n\n # Return the value maximizing the expected reward\n action = q_values.argmax()\n\n # Keep track of interval action counter\n self._num_actions_taken += 1\n return action", "def _initialize_chaotic_map(self, agents: List[Agent]) -> None:\n\n for i, agent in enumerate(agents):\n if i == 0:\n for j in range(agent.n_variables):\n agent.position[j] = r.generate_uniform_random_number(\n size=agent.n_dimensions\n )\n else:\n for j in range(agent.n_variables):\n # Calculates its position using logistic chaotic map (eq. 18)\n agent.position[j] = (\n self.eta\n * agents[i - 1].position[j]\n * (1 - agents[i - 1].position[j])\n )", "def __setstate__(self, state):\n params, weights = state\n #self.set_params(**params)\n #self.ready()\n self._set_weights(weights)", "def __setstate__(self, state):\n params, weights = state\n #self.set_params(**params)\n #self.ready()\n self._set_weights(weights)", "def update1(self, state, action, nextState, reward):\n #print \"update1 in ApproximateQAgent\"\n \"*** YOUR CODE HERE ***\"\n ##################################################################################################################################Eric Did Stuff\n actionList = nextState.getLegalActions(self.index)\n\n\n #print \"Action List\", actionList\n\n\n\n\n weights = self.getWeights()\n\n features = self.featExtractor.getFeatures(state, action, self)\n #self.myFeats = features\n if self.index == 0:\n print \"FEATURES: \",features\n value = self.computeValueFromQValues(nextState)\n qValue = self.getQValue(state,action)\n #print \"value\", value, \"qValue\", qValue\n for feature in features:\n if len(actionList) != 0:\n weights[feature] = weights[feature] + self.alpha * (reward + self.discount * value - qValue) * features[feature]\n else:\n weights[feature] = weights[feature] + self.alpha * (reward - qValue) * features[feature]\n #print \"feature\", feature, \"weights\", weights[feature]\n #print \"weights\", weights\n\n #util.raiseNotDefined()", "def __init__(self, state_size, action_size, num_agents, seed, fc1=400, fc2=300, update_times=10,\n weight_decay=1.e-5):\n self.state_size = state_size\n self.action_size = action_size\n self.seed = random.seed(seed)\n self.n_seed = np.random.seed(seed)\n self.num_agents = num_agents\n self.update_times = update_times\n self.n_step = 0\n self.TAU = 1e-3\n\n self.noise = []\n for i in range(num_agents):\n self.noise.append(rm.OrnsteinUhlenbeckProcess(size=(action_size,), std=LinearSchedule(0.4, 0, 2000)))\n\n # critic local and target network (Q-Learning)\n self.critic_local = Critic(state_size, action_size, fc1, fc2, seed).to(device)\n\n self.critic_target = Critic(state_size, action_size, fc1, fc2, seed).to(device)\n self.critic_target.load_state_dict(self.critic_local.state_dict())\n\n # actor local and target network (Policy gradient)\n self.actor_local = Actor(state_size, action_size, fc1, fc2, seed).to(device)\n self.actor_target = Actor(state_size, action_size, fc1, fc2, seed).to(device)\n self.actor_target.load_state_dict(self.actor_local.state_dict())\n\n # optimizer for critic and actor network\n self.optimizer_critic = optim.Adam(self.critic_local.parameters(), lr=CRITIC_LR, weight_decay=1.e-5)\n self.optimizer_actor = optim.Adam(self.actor_local.parameters(), lr=ACTOR_LR)\n\n # Replay memory\n self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)\n\n # Initialize time step (for updating every UPDATE_EVERY steps)\n self.t_step = 0\n self.a_step = 0", "def agent_step(self, reward, state):\n self.sum_rewards += reward\n self.episode_steps += 1\n\n # Make state an array of shape (1, state_dim) to add a batch dimension and\n # to later match the get_action_values() and get_TD_update() functions\n state = np.array(state)\n\n # Select action\n action = self.policy(state)\n \n # Append new experience to replay buffer\n self.replay_buffer.append(self.last_state, self.last_action, reward, 0, state)\n \n # Perform replay steps:\n if self.replay_buffer.size() > self.replay_buffer.minibatch_size:\n self.network_target.load_state_dict(self.network.state_dict())\n for _ in range(self.num_replay):\n # Get sample experiences from the replay buffer\n experiences = self.replay_buffer.sample() \n self.optimize_network(experiences)\n \n # Update the last state and last action.\n self.last_state = state\n self.last_action = action\n \n return action", "def training(self, dataset, repeat=1, gamma=1.0, learning_rate=0.1, model='3yo'):\n for _ in range(repeat):\n for episode in dataset:\n # 1- Get the data stored inside the dataset\n image_index = episode[0] # image of the object\n label_index = episode[1] # label given by the informant\n informant_index = episode[2] # a integer representing the informant\n informant_action = episode[3] # 0=reject, 1=accept\n\n # 2- The agent take an action (with softmax) considering is current state-action table\n # [0=cup, 1=book, 2=ball]\n col = (image_index * self.tot_images) + label_index\n action_array = self.actor_matrix[:, col]\n action_distribution = self._softmax(action_array)\n child_action = np.random.choice(self.tot_actions,\n 1,\n p=action_distribution) # select the action through softmax\n\n # 3- (External) New state and reward obtained from the environment\n # u_t = self.critic_vector[0, col] # previous state\n # New state is estimated, in this simple case nothing happen\n # because the next state is terminal\n # u_t1 = u_t # Only in this example they are the same\n\n # 4- (Intrinsic) The informant_reputation is updated:\n # agent_action, agent_confidence, informant_action, reward\n # informant_vector: 0=unreliable, 1=reliable\n # do_actions_agree: False, True\n # Estimating child_confidence\n distance = np.absolute(action_distribution[0] - action_distribution[1])\n child_confidence_distribution = [1 - distance, distance] # non-knowledgeable, knowledgeable\n child_confidence = np.random.choice(2, 1, p=child_confidence_distribution)\n # Check if child and informant agree\n if (child_action == informant_action):\n do_actions_agree = True\n else:\n do_actions_agree = False\n # Increment the counter in the informant_vector.\n # Here we update the counter distribtuion only if\n # the child is confident, because it is only in that\n # case that the child can say if the informant is\n # reliable or not.\n if (do_actions_agree == False and child_confidence == 1):\n self.informant_vector[informant_index][0] += 1 # unreliable\n elif (do_actions_agree == True and child_confidence == 1):\n self.informant_vector[informant_index][1] += 1 # reliable\n elif (do_actions_agree == False and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n elif (do_actions_agree == True and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n else:\n raise ValueError(\"ERROR: anomaly in the IF condition for informant_vector update\")\n # Using the informant_vector given as input it estimates the reputation of the informant\n informant_reputation_distribution = np.true_divide(self.informant_vector[informant_index],\n np.sum(self.informant_vector[informant_index]))\n informant_reputation = np.random.choice(2, 1, p=informant_reputation_distribution)\n\n # 5- (Intrinsic) The Cost is estimated:\n # current_state, agent_action, agent_confidence, informant_action, informant_reputation\n # child_confidence: 0=non-knowledgeable, 1=knowledgeable\n # informant_reputation: 0=non-knowledgeable, 1=knowledgeable\n # action: 0=reject, 1=accept\n # informant_action: 0=reject, 1=accept\n cost = self._return_cost(child_confidence,\n informant_reputation,\n child_action,\n informant_action,\n value=model)\n\n # 6- The utility table is updated using: previous_state, current_state, cost, reward\n # Updating the critic using Temporal Differencing Learning\n # In this simple case there is not a u_t1 state.\n # The current state is considered terminal.\n # We can delete the term (gamma*u_t1)-u_t and considering\n # only (reward-cost) as utility of the state (see Russel Norvig).\n reward = 0 # only for intrinsic learning reward=0\n delta = (reward - cost) # + (gamma*u_t1) - u_t\n self.critic_vector[0, col] += learning_rate * delta\n\n # 7- The actor table is updated using the delta from the critic\n # Update the ACTOR using the delta\n self.actor_matrix[child_action, col] += learning_rate * delta # the current action\n self.actor_matrix[1 - child_action, col] -= learning_rate * delta # the opposite action", "def fetch_initial_states(self):\n for agent_id, agent_obj in self.__registered_agents.items():\n # given the agent's capabilities, get everything the agent can perceive\n state = self.__get_agent_state(agent_obj)\n\n # filter other things from the agent state\n filtered_agent_state = agent_obj.filter_observations(state)\n\n # save the current agent's state for the API\n api.add_state(agent_id=agent_id, state=filtered_agent_state,\n agent_inheritence_chain=agent_obj.class_inheritance,\n world_settings=api.MATRX_info)\n\n # add god state\n api.add_state(agent_id=\"god\", state=self.__get_complete_state(), agent_inheritence_chain=\"god\",\n world_settings=api.MATRX_info)\n\n # initialize the message manager\n self.message_manager.agents = self.__registered_agents.keys()\n self.message_manager.teams = self.__teams\n\n # make the information of this tick available via the API, after all\n # agents have been updated\n api.next_tick()", "def update_qvals(self, state, action, reward):\n self.qvals[(state, action)] = 0", "def reset_agent_locations(self):\n self.transitions_left = self.T-1\n self.x_agent = np.repeat(self.xT.reshape(1, self.dimensions), self.n_agents, axis=0)", "def __setstate__(self,state):\n self.__dict__.update(state)\n self.KDTreeFinder = spatial.KDTree(self.featureVals)", "def learn(self):\n for a in self.agents:\n a.learn()", "def act(self, observation, reward, done):\r\n\r\n # Choosing action randomly in proportion with number of views.\r\n prob = self.organic_views / sum(self.organic_views)\r\n action = choice(self.config.num_products, p = prob)\r\n\r\n return {\r\n **super().act(observation, reward, done),\r\n **{\r\n 'a': action,\r\n 'ps': prob[action]\r\n }\r\n }", "def __setstate__(self, state):\n params, theta = state\n self.set_params(**params)\n self.ready()\n self._set_weights(theta)", "def run_one_step(self):\n # Get the current state, action and initialise the reward\n state = copy.copy(self.env.get_state())\n action = self.agent.get_action(state)\n reward = 0.0\n # Check if the environment has reached a terminal state\n if self.env.check_terminal() is False:\n # Save the initial state and action to an 'experience'\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n # Update the environment using the chosne action\n self.env.update(action)\n # Get the reward to attribute to the agent and save to the experience to save\n reward = self.env.get_reward()\n latest_experience.reward = copy.copy(reward)\n # Get the updated state\n state = self.env.get_state()\n if self.env.check_terminal() is False:\n # If the new state isn't terminal, save the next action and the 'done' flag to the experience\n action = self.agent.get_action(state)\n latest_experience.done = False\n else:\n # If the new state is terminal, save a dummy action and the 'done' flag to the experience\n action = 0.0\n latest_experience.done = True\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = copy.copy(action)\n # Update the history with the latest experience\n self.agent.update_history(copy.copy(latest_experience))\n # Update the agents policy using a batch of experiences chosen from the history\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count += 1\n # Update the target network if appropriate\n if self.update_target_rate is not None:\n if self.count % self.update_target_rate == 0:\n self.agent.policy.learner.update_target_network()\n else:\n # If the environment is in a terminal state, record this and perform a policy update\n latest_experience = Experience(copy.copy(state), copy.copy(action))\n latest_experience.reward = 0.0\n latest_experience.next_state = copy.copy(state)\n latest_experience.next_action = 0.0\n latest_experience.done = True\n self.agent.update_history(copy.copy(latest_experience))\n self.agent.update_policy_batch(max(1, self.batch_size))\n self.count = 0\n return reward", "def store(self, state, action, reward, obs_state, done):\n\n self.states.append(state)\n self.actions.append(action)\n self.rewards.append(reward)\n self.obs_states.append(obs_state)\n self.dones.append(done)", "def run(self, agent_host):\n S, A, R = deque(), deque(), deque()\n global shot\n shot = 0\n while shot < 5:\n ##update total arrow shot\n self.totalCount+=1\n\n ##update accuracy number for every 10000 arrows\n if(self.totalCount%10000==0):\n self.phasesOnTarget.append(self.phasesTemp)\n self.phasesTemp=0\n\n\n s0 = self.get_zombie_state(agent_host)\n a0= self.choose_action(s0)\n if a0[2] == 'shoot':\n shot += 1\n self.shootCount+=1\n r0 = self.act(agent_host, a0)\n\n ##update arrow numbers for different angles\n if(a0[2]=='shoot'):\n self.arrowAngleCount[tuple((a0[0],a0[1]))]+=1\n\n ##update arrow on target quantity\n if(r0>0):\n self.totalOnTarget+=1\n self.phasesTemp+=1\n\n ## update arrow hit the target on different angles\n if(a0[2]=='shoot'):\n self.arrowAngleOn[tuple((a0[0],a0[1]))]+=1\n\n\n ##update reward\n S.append(s0)\n A.append(a0)\n R.append(r0)\n\n ##update reward list\n self.reward.append(r0)\n print(s0,a0,r0)\n while len(S) >= 1:\n self.update_q_table(S, A, R)\n S.popleft()\n A.popleft()\n R.popleft()\n agent_host.sendCommand('quit')", "def __init__(self, state_size, action_size, args,\n agent_count = 1,\n l2_decay = 0.0001):\n\n self.framework = \"D4PG\"\n self.device = args.device\n self.eval = args.eval\n\n self.actor_learn_rate = args.actor_learn_rate\n self.critic_learn_rate = args.critic_learn_rate\n self.gamma = args.gamma\n self.rollout = args.rollout\n self.num_atoms = args.num_atoms\n self.vmin = args.vmin\n self.vmax = args.vmax\n self.atoms = torch.linspace(self.vmin,\n self.vmax,\n self.num_atoms).to(self.device)\n self.atoms = self.atoms.unsqueeze(0)\n\n # Initialize ACTOR networks #\n self.actor = ActorNet(args.layer_sizes,\n state_size,\n action_size).to(self.device)\n self.actor_target = ActorNet(args.layer_sizes,\n state_size,\n action_size).to(self.device)\n self.actor_optim = optim.Adam(self.actor.parameters(),\n lr=self.actor_learn_rate,\n weight_decay=l2_decay)\n\n # Initialize CRITIC networks #\n c_input_size = state_size * agent_count\n c_action_size = action_size * agent_count\n self.critic = CriticNet(args.layer_sizes,\n c_input_size,\n c_action_size,\n self.num_atoms).to(self.device)\n self.critic_target = CriticNet(args.layer_sizes,\n c_input_size,\n c_action_size,\n self.num_atoms).to(self.device)\n self.critic_optim = optim.Adam(self.critic.parameters(),\n lr=self.critic_learn_rate,\n weight_decay=l2_decay)", "def update(self, samples, agent_number, logger):\n\n # need to transpose each element of the samples\n # to flip obs[parallel_agent][agent_number] to\n # obs[agent_number][parallel_agent]\n obs, obs_full, action, reward, next_obs, next_obs_full, done = map(transpose_to_tensor, samples)\n\n obs_full = torch.stack(obs_full)\n next_obs_full = torch.stack(next_obs_full)\n \n agent = self.maddpg_agent[agent_number]\n agent.critic_optimizer.zero_grad()\n\n #critic loss = batch mean of (y- Q(s,a) from target network)^2\n #y = reward of this timestep + discount * Q(st+1,at+1) from target network\n target_actions = self.target_act(next_obs)\n target_actions = torch.cat(target_actions, dim=1)\n \n target_critic_input = torch.cat((next_obs_full.t(),target_actions), dim=1).to(device)\n \n with torch.no_grad():\n q_next = agent.target_critic(target_critic_input)\n \n y = reward[agent_number].view(-1, 1) + self.discount_factor * q_next * (1 - done[agent_number].view(-1, 1))\n action = torch.cat(action, dim=1)\n critic_input = torch.cat((obs_full.t(), action), dim=1).to(device)\n q = agent.critic(critic_input)\n\n huber_loss = torch.nn.SmoothL1Loss()\n critic_loss = huber_loss(q, y.detach())\n critic_loss.backward()\n #torch.nn.utils.clip_grad_norm_(agent.critic.parameters(), 0.5)\n agent.critic_optimizer.step()\n\n #update actor network using policy gradient\n agent.actor_optimizer.zero_grad()\n # make input to agent\n # detach the other agents to save computation\n # saves some time for computing derivative\n q_input = [ self.maddpg_agent[i].actor(ob) if i == agent_number \\\n else self.maddpg_agent[i].actor(ob).detach()\n for i, ob in enumerate(obs) ]\n \n q_input = torch.cat(q_input, dim=1)\n # combine all the actions and observations for input to critic\n # many of the obs are redundant, and obs[1] contains all useful information already\n q_input2 = torch.cat((obs_full.t(), q_input), dim=1)\n \n # get the policy gradient\n actor_loss = -agent.critic(q_input2).mean()\n actor_loss.backward()\n #torch.nn.utils.clip_grad_norm_(agent.actor.parameters(),0.5)\n agent.actor_optimizer.step()\n\n al = actor_loss.cpu().detach().item()\n cl = critic_loss.cpu().detach().item()\n logger.add_scalars('agent%i/losses' % agent_number,\n {'critic loss': cl,\n 'actor_loss': al},\n self.iter)", "def queue_agents(self, agents):\n logger.info('Preparing agents...')\n for agent in tqdm(agents):\n self.data['agent_trip_types'][agent.id] = agent.public\n ev = self.route_agent(agent)\n if ev is not None:\n self.queue(*ev)", "def step(self, action):\n obs = self.gym.get_observations()\n all_actions = self.gym.act(obs)\n all_actions.insert(self.gym.training_agent, action)\n state, reward, terminal, info = self.gym.step(all_actions)\n agent_state = self.featurize(state[self.gym.training_agent])\n\n # agent_state_history = self.make_observation(agent_state, self.step)\n agent_reward = reward[self.gym.training_agent]\n\n # self.step += 1\n return agent_state, agent_reward, terminal, info", "def storeState(self):\n\n self.action_history[self.trial] = self.action\n self.ball_history[self.trial] = self.ballcolor", "def actor_critic_f(env, estimator_policy, estimator_value, num_episodes, discount_factor=1.0):\n\n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes)) \n\n Transition = collections.namedtuple(\"Transition\", [\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n\n max_return = 0\n\n for i in range(num_episodes):\n # Reset the environment and pick the fisrst action\n\n # set initial state (x, y, theta, x', y', theta')\n state = np.array([+7, 10, 0, 0, 20, 1.5], dtype=np.float32).reshape(6, 1)\n # state = np.array([+9, 1, 0, 0, 20, 0], dtype=np.float32).reshape(6, 1)\n action = np.array([0, 0], dtype=np.float32).reshape(2, 1)\n reward = 0\n env._reset(state)\n\n total_return = 0\n\n # One step in the environment\n for t in itertools.count():\n print \"{}-#{:03d} \".format(t, i+1),\n if t > 1000:\n break\n\n env._render({\n \"max_return\": max_return,\n \"total_return\": total_return\n })\n\n # Take a step\n mdp_state = form_mdp_state(state, action, reward)\n action = estimator_policy.predict(mdp_state)\n # action[0, 0] = 10\n # action[1, 0] = 1.9\n next_state, reward, done, _ = env.step(action)\n\n if total_return + reward < 0:\n reward = -500\n\n # Update statistics (minus 1 reward per step)\n total_return += reward\n\n if total_return > max_return:\n max_return = total_return\n\n # Calculate TD Target\n next_mdp_state = form_mdp_state(next_state, action, reward)\n value = estimator_value.predict(mdp_state)\n value_next = estimator_value.predict(next_mdp_state)\n td_target = reward + discount_factor * value_next\n td_error = td_target - value\n\n # Update the value estimator\n estimator_value.update(mdp_state, td_target)\n\n # Update the policy estimator (use td-error as advantage estimate)\n estimator_policy.update(mdp_state, td_error, action)\n\n # Print out which step we're on, useful for debugging.\n print \"action = [{:.2f}, {:.2f}]\".format(action[0,0], action[1,0]),\n print \"{:9.3f} (\\33[93m{:9.3f}\\33[0m)\".format(reward, total_return),\n print \"td_target (value) = {:5.2f} + {:5.2f} * {:5.2f} = {:5.2f}, value = {:5.2f}, td_error (policy) = {:5.2f}\".format(\n reward, discount_factor, value_next, td_target, value, td_error)\n\n if done or total_return < 0:\n break\n\n state = next_state\n \n stats.episode_rewards[i] = total_return\n stats.episode_lengths[i] = t\n\n return stats", "def updateState(self):\n\t\tif len(self.__state_history) != len(self.__reward):\n\t\t\traise Exception(\"State|Action tuples must be of the same length as Reward list\")\n\n\t\tsar = [(sa[0], sa[1], r) for (sa, r) in zip(self.__state_history, self.__reward)]\n\n\t\tself.policy.updatePolicyWithStateHistory(sar)", "def set_state(self,params):\n self.update_emccd_bias(params['emccd_bias'])\n self.update_num_images(params['num_images'])\n self.make_rois_from_lists(params['roi_coords'],params['thresholds'])\n try: # add things here that don't exist in old state files (different try/except for each)\n self.copy_im_threshs = params['copy_im_threshs']\n except KeyError:\n self.copy_im_threshs = [None for _ in range(self.num_images)]", "def set_defaults(self, agents):\n for a in agents:\n for k, v in a.get_defaults().items():\n self.env[k] = v", "def __init__(self, \n action_size=2, \n seed=42, \n n_agents=2,\n state_size=24,\n buffer_size=10000,\n batch_size=256,\n gamma=0.99,\n noise_start=1.0,\n noise_decay=1.0):\n self.action_size = action_size\n self.seed = seed\n self.n_agents = n_agents\n self.state_size = state_size\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.gamma = gamma\n self.noise_weight = noise_start\n self.noise_decay = noise_decay\n \n self.enable_noise = True\n\n # instantiate agents with respective actor and critic\n models = [ActorCriticWrapper(num_agents=self.n_agents) for _ in range(self.n_agents)]\n self.agents = [DDPGAgent(i, models[i]) for i in range(self.n_agents)]\n \n # instantiate shared replay buffer\n self.memory = ReplayBuffer(self.action_size, self.buffer_size, self.batch_size, self.seed)", "def agent_step(self, reward, state):\n prev_val= self.state[self.prevAction]\n self.state[self.prevAction]=prev_val+self.alpha*(reward-prev_val)\n val=max(self.state)\n index=self.state.index(val)\n self.prevAction=index\n i=random.uniform(0,1)\n if i < 1-self.prob:\n self.prevAction=index\n return index\n else:\n index=random.randint(0,self.num_bandits-1)\n self.prevAction=index", "def _advance_by_action(game, agents, action):\n getLogger(__name__).debug(\"Agent {} action {}\".format(game.current_agent_id, action))\n agent_id_for_action = game.current_agent_id\n\n game.take_action(action)\n for agent in agents:\n agent.take_action(action, agent.agent_id == agent_id_for_action)", "def set_state(canvas, state):\n for key, value in state.items():\n set_attribute(canvas, key, value)", "def step(self, states, actions, rewards, next_states, dones):\n \n states = states.reshape(1, -1)\n next_states = next_states.reshape(1, -1)\n self.memory.add(states, actions, rewards, next_states, dones)\n\n # for each agent, sample experiences from the shared buffer and learn\n if len(self.memory) > self.batch_size:\n experiences = [self.memory.sample() for _ in range(self.n_agents)]\n self.learn(experiences, self.gamma)", "def __init__(self, env, actor=None, critic=None, weights=None, warmup_actor=1, warmup_critic=1, gamma=0.99):\n self.env = env\n nactions = np.product(self.env.action_shape)\n actor = self._actor() if actor is None else actor\n critic, action_input = self._critic() if critic is None else critic\n membuf = SequentialMemory(int(1E5), window_length=1)\n random = OrnsteinUhlenbeckProcess(size=nactions, theta=0.15, mu=0.0, sigma=0.3)\n self.agent = DDPGAgent(nb_actions=nactions,\n actor=actor,\n critic=critic,\n critic_action_input=action_input,\n memory=membuf,\n nb_steps_warmup_critic=warmup_critic,\n nb_steps_warmup_actor=warmup_actor,\n random_process=random,\n gamma=gamma,\n target_model_update=1E-3)\n self.agent.compile(keras.optimizers.Adam(lr=0.001, clipnorm=1.0), metrics=['mae', 'accuracy'])\n\n # Potentially load the Agent's weights from disk\n if weights is not None:\n basename, ext = os.path.splitext(weights)\n cweights = basename + \"_critic\" + ext\n aweights = basename + \"_actor\" + ext\n if not os.path.isfile(cweights):\n raise ValueError(\"Could not find file\", cweights)\n elif not os.path.isfile(aweights):\n raise ValueError(\"Could not find file\", aweights)\n else:\n self.agent.load_weights(weights)", "def configure( self, sensors, actuators, archiver=None ):\n self.__sensors = sensors\n self.__actuators = actuators\n self.__archiver = archiver\n self.__configured = True", "def assign_agents(particle,self):\n\n self.models[particle].state2agents(self.states[particle])\n\n return self.models[particle]", "def act(self, current_state, setpoint):\n s = self.last_s # Refers only to theta\n a = self.last_a # Refers only the last action taken\n s_p = self.map_state(\n current_state['theta'], current_state['theta_dot'])\n\n # Update the q-table based on the Bellman Equation\n r = self.reward_func(current_state)\n q_predict = self.q_table[s + (a,)]\n\n if not self.reset_env:\n q_target = r + self.params['gamma'] * np.max(self.q_table[s_p])\n else:\n q_target = r\n print(str(r) + ' Reset')\n self.reset_env = False\n # self.q_table[40,1]=90\n\n self.q_table[s+(a,)] += self.params['learn_rate'] * \\\n (q_target - q_predict)\n # print(self.params['epsilon'])\n # Take the next action\n if (np.random.uniform() < self.params['epsilon']) or (np.max(np.abs(self.q_table[s_p])) < 1e-3):\n ac = np.random.randint(self.num_actions)\n self.dist[ac] += 1\n else:\n ac = np.argmax(self.q_table[s_p])\n\n #ac = 0\n\n self.last_a = ac\n self.last_s = s_p\n if self.cont_decay == 1000:\n self.params['epsilon'] *= self.params['decay_rate']\n self.cont_decay = 0\n else:\n self.cont_decay += 1\n u = self.map_force(ac)\n f_u = self.F_cum + u\n print(self.params)\n print(self.q_table[30:51])\n return f_u", "def reset(self, *agents):\n # initialize the state to [0, 0, ..., 0] (length D+1) + [1, 1]\n for i in range(len(agents)):\n D_state = np.hstack((np.zeros(shape=(agents[i].D + 1)), [1, 1]))\n if i == 0:\n self.state = D_state\n else:\n self.state = np.hstack((self.state, D_state))\n\n self.k = 1\n\n # price\n self.S = np.zeros(shape=(self.N,))\n self.S[ind(self.k)] = self.initial_market_price\n self.S_tilde = np.zeros(shape=(self.N,))\n self.S_tilde[ind(self.k)] = self.initial_market_price\n\n for agent in agents:\n agent.reset()\n\n return self.state", "def __call__(self, state, observation, agent_info=None, history=None):\n B = observation.n_elems()\n\n _mean, _var = self.model(observation[\"frame\"])\n _id = torch.eye(self.action_dim).unsqueeze(0).repeat(B, 1, 1)\n\n distribution = torch.distributions.Normal(_mean, _var)\n action_sampled = distribution.sample()\n action_max = _mean\n smask = (\n agent_info[\"stochastic\"].float().unsqueeze(-1).repeat(1, self.action_dim)\n )\n action = action_sampled * smask + (1.0 - smask) * action_max\n\n agent_do = DictTensor({\"action\": action, \"mean\": _mean, \"std\": _var})\n state = DictTensor({})\n return agent_do, state", "def GAStep(self):\n\n self.updateMatingPool()\n self.newGeneration()", "def step(self):\n #_increment timers\n for agent in self.agents:\n agent.tick()\n\n # choose agent pair\n agentA, agentB = self.choose()\n\n # interact\n agentA.step(agentB)\n agentB.step(agentA)\n\n # log results\n self.logger.log(agentA, agentB)\n\n # increment counters\n self.steps += 1\n self.time += 1", "def update_policy(self):\n # this is update_policy \n # sample batch of 32 from the memory\n batch_of_samples = self.replay_memory.sample(batch_size=32)\n current_state_samples = batch_of_samples['current_state_samples']\n next_state_samples = batch_of_samples['next_state_samples']\n #print type(current_state_samples[0])\n #print current_state_samples\n\n # fetch stuff we need from samples 32*84*84*4\n current_state_images = np.zeros([1, 84, 84, 4])\n #print current_state_samples\n current_state_images[0,...] = np.dstack([sample.state for sample in current_state_samples])\n\n next_state_images = np.zeros([1, 84, 84, 4])\n next_state_images[0,...] = np.dstack([sample.state for sample in next_state_samples])\n\n # preprocess\n current_state_images = self.preprocessor.process_batch(current_state_images)\n next_state_images = self.preprocessor.process_batch(next_state_images)\n # print \"current_state_images {} max {} \".format(current_state_images.shape, np.max(current_state_images))\n #print current_state_images.shape\n q_current = self.q_network.predict(current_state_images,batch_size=self.batch_size) # 32*num_actions\n q_next = self.q_network.predict(next_state_images,batch_size=self.batch_size)\n\n # targets\n y_targets_all = q_current #1*num_actions\n #print y_targets_all.shape # [1,6]\n idx = 0 \n last_sample = current_state_samples[-1]\n if last_sample.is_terminal:\n y_targets_all[idx, last_sample.action] = last_sample.reward\n else:\n if self.mode == 'vanilla':\n y_targets_all[idx, last_sample.action] = np.float32(last_sample.reward) + self.gamma*np.max(q_next[idx])\n if self.mode == 'double': \n y_targets_all[idx, last_sample.action] = np.float32(last_sample.reward) + self.gamma*q_next[idx, np.argmax(q_current[idx])] \n\n loss = self.q_network.train_on_batch(current_state_images, np.float32(y_targets_all))\n\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_loss', value=loss, step=self.iter_ctr)\n\n if not (self.iter_ctr % self.log_loss_every_nth):\n self.dump_train_loss(loss)\n\n # if (self.iter_ctr > (self.num_burn_in+1)) and not(self.iter_ctr%self.target_update_freq):\n # # copy weights\n # print \"Iter {} Updating target Q network\".format(self.iter_ctr)\n # self.target_q_network.set_weights(self.q_network.get_weights())\n # [self.target_q_network.trainable_weights[i].assign(self.q_network.trainable_weights[i]) \\\n # for i in range(len(self.target_q_network.trainable_weights))]", "def agent_init(self):\n pass", "def execute(self, agent: Agent, state: SimState) -> None:\n if agent.state() is not AgentState.INFECTIVE:\n return\n\n if np.random.random() < state.remove_prob():\n if np.random.random() < state.lethality():\n agent.set_state(AgentState.DEAD)\n else:\n agent.set_state(AgentState.IMMUNE)\n else:\n agent.update_sick_days()" ]
[ "0.57742363", "0.5736501", "0.5717755", "0.5712141", "0.5538278", "0.5537794", "0.5531449", "0.55293477", "0.55289406", "0.5468341", "0.5435091", "0.53988713", "0.5367964", "0.53655124", "0.53448045", "0.5333574", "0.5322495", "0.53154963", "0.5305376", "0.52951205", "0.52815765", "0.5263189", "0.52477175", "0.5244877", "0.52348083", "0.5209047", "0.5209047", "0.5195909", "0.5192983", "0.5191551", "0.51909286", "0.51830477", "0.517007", "0.51663667", "0.5160613", "0.5152443", "0.5130768", "0.5122538", "0.5120934", "0.51192683", "0.51162666", "0.5110788", "0.5103025", "0.51021415", "0.50986624", "0.50956035", "0.50715363", "0.50715363", "0.5062546", "0.5062546", "0.50529534", "0.5030907", "0.50255567", "0.502027", "0.50155705", "0.5008725", "0.50070286", "0.50034475", "0.49962622", "0.4992199", "0.498532", "0.498532", "0.49792084", "0.49766633", "0.49648422", "0.4960946", "0.49604562", "0.49506614", "0.4948144", "0.494808", "0.49426654", "0.49419048", "0.49388063", "0.49372238", "0.49370733", "0.49342138", "0.49265283", "0.49162948", "0.49124706", "0.49077561", "0.49066722", "0.49043953", "0.4897182", "0.48961705", "0.48960736", "0.4896059", "0.48953944", "0.4891157", "0.48905307", "0.48887923", "0.48867804", "0.48861048", "0.48856404", "0.48769322", "0.48765036", "0.4874289", "0.48652598", "0.48622566", "0.48562512", "0.48537162", "0.4851921" ]
0.0
-1
! resources object of Resources class contain resources from config file options object of MergeOptions class contain merge options from config file str_name default value same as the class name "SynsetsSUMOMerger2"
def __init__(self, resources, options, str_name = 'SynsetsSUMOMerger2'): super(SynsetsSUMOMerger2, self).__init__(resources, options, str_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resources(self):", "def register_resources(self, resources):\n from tw.api import merge_resources\n merge_resources(self.request_local.resources, resources)", "def MergeLogic(self) -> str:", "def _merge_resource(self, resource, desired, unmanaged):\n unmanaged_resource = unmanaged[resource] # this always exists\n desired_resource = desired.get(resource)\n if desired_resource is None:\n desired_data = {}\n else:\n desired_data = desired_resource.data\n\n # determine if any changes occurred after merging\n if unmanaged_resource.merge(desired_data):\n return unmanaged_resource\n return None", "def mergeConfig(self):\n config = \\\n \"from Configuration.DataProcessing.Merge import mergeProcess\\nprocess = mergeProcess(\\n \"\n config += \",\".join(self.merge_inputs)\n config += \",\\n\"\n config += \" output_file = \\\"%s\\\",\\n\" % os.path.basename(self.lfn)\n config += \" output_lfn = \\\"%s\\\"\\n) \" % self.lfn\n return config", "def merge(): #Status: WIP\r\n pass", "def copyResource(game, channel, packageName, sdkDir, decompileDir, operations, name, pluginInfo=None):\n\n if operations != None:\n for child in operations:\n if child['type'] == 'mergeManifest':\n manifestFrom = utils_file.getFullPath(os.path.join(sdkDir, child['from']))\n manifestFromTemp = manifestFrom\n manifestTo = utils_file.getFullPath(os.path.join(decompileDir, child['to']))\n\n if 'orientation' in game:\n if game['orientation'] == 'portrait':\n manifestFrom = manifestFrom[:-4] + \"_portrait.xml\"\n else:\n manifestFrom = manifestFrom[:-4] + \"_landscape.xml\"\n\n if not os.path.exists(manifestFrom):\n manifestFrom = manifestFromTemp\n\n utils_log.info(\"The sdk manifest file is %s\", manifestFrom)\n\n # merge into xml\n bRet = mergeManifest(channel, manifestTo, manifestFrom)\n if bRet:\n utils_log.info(\"merge manifest file success.\")\n else:\n utils_log.error(\"merge manifest file failed.\")\n return 1\n\n elif child['type'] == 'copyRes':\n\n if child['from'] == None or child['to'] == None:\n utils_log.error(\"the sdk config file error. 'copyRes' need 'from' and 'to'.sdk name:%s\", name)\n return 1\n\n copyFrom = utils_file.getFullPath(os.path.join(sdkDir, child['from']))\n copyTo = utils_file.getFullPath(os.path.join(decompileDir, child['to']))\n\n if child['to'] == 'lib':\n copyLibs(game, copyFrom, copyTo, decompileDir)\n else:\n copyResToApk(copyFrom, copyTo)\n\n elif child['type'] == 'script' and pluginInfo != None:\n # now only third-plugin support script\n if child['from'] == None:\n utils_log.error(\"the sdk config file is error. 'script' need 'from' attrib to specify script.py\")\n return 1\n\n scriptName = child['from']\n utils_log.info(\"now to execute plugin script. name:%s\", scriptName)\n doScript(channel, pluginInfo, decompileDir, packageName, sdkDir, scriptName)\n\n return 0", "def createMergedConfigFile(self):\n # Read config data\n if os.path.isfile(self.config_file):\n with open(self.config_file, 'r') as stream:\n try:\n cfg = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n if debug:\n print(\"Using Config file: \" + self.config_file)\n else:\n if debug:\n print(\"Config file does not exist: \" + self.config_file)\n exit(1)\n\n # If project namespace was not in the config file, set a default\n if (cfg is not None\n and 'generic' in cfg\n and 'project_namespace' in cfg['generic']\n and cfg['generic']['project_namespace'] is not None\n and len(cfg['generic']['project_namespace']) > 0):\n if debug:\n print(\"Using specified namespace\")\n else:\n conf_dir = os.path.dirname(self.config_file)\n cmd = \"cd \" + conf_dir + ' && basename `git rev-parse --show-toplevel`'\n try:\n result_bytes = subprocess.check_output(cmd,\n timeout=300,\n shell=True)\n project_namespace = result_bytes.decode('UTF-8').rstrip()\n if debug:\n print(\"Derived namespace from git: \" + project_namespace)\n except subprocess.CalledProcessError as e:\n if debug:\n print(\"Error deriving project namespace from git: \", e.output)\n sys.exit(1)\n # Insert the project_namespace into the config data\n if cfg is None:\n cfg = {}\n if 'generic' not in cfg:\n cfg['generic'] = {}\n cfg['generic']['project_namespace'] = project_namespace\n\n # Confirm project namespace\n if debug:\n print(\"Project Namespace: \" + cfg['generic']['project_namespace'])\n\n # Read overrides\n override_file_data = {}\n if os.path.isfile(self.override_file):\n with open(self.override_file, 'r') as stream:\n try:\n override_file_data = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n # Created merged data\n self.config_data = cfg\n # print(\"Applying override_file_data: \" + str(override_file_data))\n if override_file_data is not None:\n self.config_data = merge(self.config_data, override_file_data)\n\n # Ensure parent directory for merged file exists\n directory = Path(self.merged_file).parent\n if not os.path.exists(directory):\n os.makedirs(directory)\n # Created merged file\n with open(self.merged_file, 'w') as out_file:\n yaml.dump(self.config_data, out_file)", "def build(self) -> Optional[Bundle]:\n # Prepare STIX2 bundle objects with author.\n bundle_objects = [self.author]\n\n # Add object marking definitions to bundle.\n bundle_objects.extend(self.object_markings)\n\n # Create intrusion sets and add to bundle.\n intrusion_sets = self._create_intrusion_sets()\n bundle_objects.extend(intrusion_sets)\n\n # Create sectors and add to bundle.\n sectors = self._create_sectors()\n bundle_objects.extend(sectors)\n\n # Intrusion sets target sectors and add to bundle.\n intrusion_sets_target_sectors = self._create_targets_relationships(\n intrusion_sets, sectors\n )\n bundle_objects.extend(intrusion_sets_target_sectors)\n\n # Create locations and add to bundle.\n locations = self._create_locations()\n bundle_objects.extend(locations)\n\n # Intrusion sets target locations and add to bundle.\n intrusion_sets_target_locations = self._create_targets_relationships(\n intrusion_sets, locations\n )\n bundle_objects.extend(intrusion_sets_target_locations)\n\n # Create observations.\n observations = self._create_ioc_observations()\n\n # Get observables and add to bundle.\n observables = [o.observable for o in observations if o.observable is not None]\n bundle_objects.extend(observables)\n\n # Get indicators, create YARA indicators and to bundle.\n indicators = [o.indicator for o in observations if o.indicator is not None]\n indicators.extend(self._create_yara_indicators())\n bundle_objects.extend(indicators)\n\n # Get observation relationships and add to bundle.\n indicators_based_on_observables = [\n o.relationship for o in observations if o.relationship is not None\n ]\n bundle_objects.extend(indicators_based_on_observables)\n\n # Indicator indicates entities, add to bundle.\n indicator_indicates = intrusion_sets\n\n indicator_indicates_entities = self._create_indicates_relationships(\n indicators, indicator_indicates\n )\n bundle_objects.extend(indicator_indicates_entities)\n\n # Create object references for the report.\n object_refs = create_object_refs(\n intrusion_sets,\n sectors,\n intrusion_sets_target_sectors,\n locations,\n intrusion_sets_target_locations,\n observables,\n indicators,\n indicators_based_on_observables,\n indicator_indicates_entities,\n )\n\n # TODO: Ignore reports without any references or not?\n # Hack, the report must have at least on object reference.\n if not object_refs:\n dummy_object = self._create_dummy_object()\n\n bundle_objects.append(dummy_object)\n object_refs.append(dummy_object)\n\n # Create report and add to bundle.\n report = self._create_report(object_refs)\n bundle_objects.append(report)\n\n # XXX: Without allow_custom=True the observable with the custom property\n # will cause an unexpected property (x_opencti_score) error.\n return Bundle(objects=bundle_objects, allow_custom=True)", "def merge(self, skel):\n return Skeleton.simple_merge((self, skel)).consolidate()", "def merge_spec(self):\n from django_swagger_utils.spec_client.merge_spec import MergeSpec\n merge_spec = MergeSpec(self.paths['api_spec_dir'], self.paths['base_dir'])\n merge_spec.merge()", "def merge(self, ref, *args):\n return self.cmd('merge', ref, *args)", "def merge(self, obj):\n pass", "def resourceManager(*args, nameFilter: AnyStr=\"\", saveAs: List[AnyStr, AnyStr]=None,\n **kwargs)->None:\n pass", "def resources(self, resources):\n self._resources = resources", "def _resolve_duplicates(self) -> None:\n resource_ids_resources: DefaultDict[str, List[Resource]] = defaultdict(list)\n for resource in self.resources:\n resource_ids_resources[resource.resource_id].append(resource)\n merged_resources: List[Resource] = []\n for resource_id, resources in resource_ids_resources.items():\n if len(resources) > 1:\n merged_resource = ResourceSpec.merge_resources(\n resource_id=resource_id, resources=resources\n )\n merged_resources.append(merged_resource)\n for merged_resource in merged_resources:\n self.resources = [\n resource\n for resource in self.resources\n if resource.resource_id != merged_resource.resource_id\n ]\n self.resources.append(merged_resource)", "def _load_resources(self):\n puts = (getattr(self, 'project', None) or self).puts\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n for name in self.settings.get(resource_type, {}):\n extra = {\n 'project': getattr(self, 'project', None) or self,\n 'app': self if hasattr(self, 'project') else None,\n }\n\n with indent(4 if hasattr(self, 'project') else 2):\n puts(colored.green(u\"✓ {}:{}\".format(resource_type, name)))\n\n self._resources[resource_type].append(\n resource_cls.factory(\n name=name,\n settings=self.settings.get(resource_type, {})[name],\n **extra\n )\n )", "def _merge(self):\n raise NotImplementedError", "def _build_resources_template(self, output_filename=\"{}_r.json\"):\n\n template = self._base_troposphere_template()\n\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n resource_cls.register_type_resources_template(self, template)\n for r in self.get_resources(resource_type):\n r.register_resources_template(template)\n\n template = utils.fix_troposphere_references(template)\n\n if template and template.resources:\n output_filename = output_filename.format(self._get_next_build_sequence_id())\n self.puts(colored.cyan(output_filename))\n with open(os.path.join(self.build_path, output_filename), 'w') as f:\n f.write(template.to_json())", "def merge(self):\n rdr = Reader(self.config)\n rdr.read_string(utils.paste(single_line=False))\n if len(rdr.get_entry_collection().entries) == 0:\n self.visual.error(\"Zero items extracted from the collection to merge.\")\n return\n eids = []\n for entry in rdr.get_entry_collection().entries.values():\n self.entry_collection.add_new_entry(entry)\n eids.append(entry.ID)\n self.selector.update_reference(self.reference_entry_id_list)\n # select them\n res = self.selector.select_by_id(eids)\n if res is None:\n self.visual.error(\"Failed to select merged entry!\")\n self.visual.log(\"Merged new entr{}:\".format(\"y\" if len(res) == 1 else \"ies\"))\n self.show_entries()", "def combine_data(self, object, additional_data):\n object[\"ancestors\"] = additional_data[\"ancestors\"] if self.cartographer_client else []\n object[\"position\"] = additional_data.get(\"order\", 0) if additional_data else 0\n object = super(ResourceMerger, self).combine_data(object, additional_data)\n return combine_references(object)", "def __init__(self):\r\n self.label = \"mergeAreas\"\r\n self.description = \"Merges dark targets feature classes into single \\\r\n acquisition swathes by day.\"\r\n self.canRunInBackground = False", "def is_merged(self):\r\n url = '{0}/merge'.format(self.get_url())\r\n\r\n return http.Request('GET', url), resource.parse_boolean", "def resource_map(self):", "def getResourceReplacers(self):\n replacers = {}\n replacerDir = os.path.join(self.dir,'Replacers')\n if not os.path.exists(replacerDir):\n return replacers\n if 'mosh.resourceReplacer.applied' not in settings:\n settings['mosh.resourceReplacer.applied'] = []\n for name in os.listdir(replacerDir):\n path = os.path.join(replacerDir,name)\n if os.path.isdir(path):\n replacers[name] = ResourceReplacer(replacerDir,name)\n return replacers", "def merge():\n click.echo(\"Not implemented yet. In the future, this command will be used for merging models.\")\n sys.exit(-2)", "def get_resources(self):\n res = set()\n res.update(self.get_inputs())\n res.update(self.get_outputs())\n return res", "def createResourceSims(self):\n if self.game.myEmpire['viewResources'] == 0:\n return\n import anwp.sims\n # remove old sims if any\n self.removeResourceSims()\n # create resource sims\n self.resourceSims = []\n for systemID, systemDict in self.game.allSystems.iteritems():\n if systemDict['myEmpireID'] == self.game.myEmpireID:\n # create resource sims representing resources on system\n i = 0\n for attr in ['AL', 'EC', 'IA']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] - 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1\n \n # create resource sims representing resources being generated\n i = 0\n for attr in ['prodAL', 'prodEC', 'prodIA', 'prodCR']:\n if systemDict[attr] > 0:\n # system produces this resource create sim\n name = string.lower(attr[-2:])\n imageFileName = '%smini_%s_gen.png' % (self.game.app.genImagePath, name)\n \n # create sim\n sim = ResourceEntity(self, anwp.sims.categories.StaticCategory(imageFileName, 'resource'))\n \n # add sim to world\n self.resourceSims.append(sim)\n x = systemDict['x'] + 15\n y = systemDict['y'] - 45 - 20*i\n facing = 0\n speed = 0\n force = 1\n self.world.addToWorld(sim, x, y, facing, speed, force)\n i += 1", "def merge(self, filename = None, format = 'srt'):\n \n return self.download(filename, format, True)", "def merge_asset(self, other):\n for asset in other.asset:\n asset_name = asset.get(\"name\")\n asset_type = asset.tag\n # Avoids duplication\n pattern = \"./{}[@name='{}']\".format(asset_type, asset_name)\n if self.asset.find(pattern) is None:\n self.asset.append(asset)", "def build_resource(self, *args, **kwargs):\r\n r = {}\r\n for current_resource in self.resources:\r\n item = self._get_resource(\r\n repo=self.current_repo, owner=self.owner, \r\n resource=current_resource, **kwargs\r\n )\r\n if not item: continue\r\n r[current_resource] = item\r\n\r\n return r", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def resources(self, resources):\n\n self._resources = resources", "def import_terminology(Name=None, MergeStrategy=None, Description=None, TerminologyData=None, EncryptionKey=None):\n pass", "def test_merge(self):\r\n filename = os.path.join(CONFIGURATION.source_messages_dir, random_name())\r\n generate.merge(CONFIGURATION.source_locale, target=filename)\r\n self.assertTrue(os.path.exists(filename))\r\n os.remove(filename)", "def augment(self, resources):\n return self.source.augment(resources)", "def js_merge(self):\n if self.merge:\n js = \"\"\n for file_name in self.file_names:\n try:\n js += jsmin(open(file_name, newline=\"\\n\").read())\n except FileNotFoundError:\n print(f\"The file {file_name} could not be found\")\n self.js = jsmin(js)\n\n else:\n for file_name in self.file_names:\n js = jsmin(open(file_name, newline=\"\\n\").read())\n open(file_name, 'w', newline=\"\\n\").write(js)", "def merge_objects(self, mujoco_objects):\n self.n_objects = len(mujoco_objects)\n self.mujoco_objects = mujoco_objects\n self.objects = [] # xml manifestation\n self.max_horizontal_radius = 0\n for obj_name, obj_mjcf in mujoco_objects.items():\n self.merge_asset(obj_mjcf)\n # Load object\n obj = obj_mjcf.get_collision(name=obj_name, site=True)\n obj.append(new_joint(name=obj_name, type=\"free\", damping=\"0.0005\"))\n self.objects.append(obj)\n self.worldbody.append(obj)\n\n self.max_horizontal_radius = max(\n self.max_horizontal_radius, obj_mjcf.get_horizontal_radius()\n )", "def resource_prefix(self):", "def pop_resources(self):\n resources = self.request_local.resources\n self.request_local.resources = {}\n # deal with aggregated resources\n if resources and \"head\" in resources:\n # This is lazy, because we otherwise run\n # into circular import issues\n if self.aggregation_config is not None:\n self._setup_aggregation_mapping()\n\n\n if self.aggregated_js_mapping:\n self._replace_resources_with_aggregates(resources,\n self.aggregated_js_mapping,\n JSLink,\n )\n if self.aggregated_css_mapping:\n self._replace_resources_with_aggregates(resources,\n self.aggregated_css_mapping,\n CSSLink,\n )\n return resources", "def merge_docs(self):", "def _build_resources_repr(self, resources):\n if resources:\n result = \", \".join(\"{} (r{})\".format(r.name, r.revision) for r in resources)\n else:\n result = \"-\"\n return result", "def resources(self):\n return self.__resources", "def __init__(self):\r\n\t\tself.label = \"Linked Data Single No Functional Property Merge\"\r\n\t\tself.description = \"\"\"The related seperated tables from Linked Data Location Entities Property Enrichment Tool have multivalue for each wikidata location because the coresponding property is not functional property. \r\n\t\tThis Tool helps user to merge these multivalue to a single record and add it to original feature class sttribute table by using merge rules which are specified by users.\"\"\"\r\n\t\tself.canRunInBackground = False", "def _merge_rois(mer_path, label_list):\n class_list = []\n class_list.append(label_list[0])\n for test_fn in label_list[1:]:\n test_label = mne.read_label(test_fn)\n i = 0\n belong = False\n while (i < len(class_list)) and (belong is False):\n fn_lost = '/home/uais/data/freesurfer/subjects/fsaverage/dSPM_conf_stc/STC_ROI/merge/LLrt,new_4-rh.label' \n #if class_list[i] == fn_lost:\n # import pdb\n # pdb.set_trace()\n class_label = mne.read_label(class_list[i])\n label_name = class_label.name\n if test_label.hemi != class_label.hemi:\n i = i + 1\n continue\n overlapped = len(np.intersect1d(test_label.vertices,\n class_label.vertices))\n if overlapped > 0:\n com_label = test_label + class_label\n pre_test = test_label.name.split('_')[0]\n pre_class = class_label.name.split('_')[0]\n # label_name = pre_class + '_%s-%s' %(pre_test,class_label.name.split('-')[-1])\n if pre_test != pre_class:\n pre_class += ',%s' % pre_test\n pre_class = list(set(pre_class.split(',')))\n new_pre = ''\n for pre in pre_class[:-1]:\n new_pre += '%s,' % pre\n new_pre += pre_class[-1]\n label_name = '%s_' % (new_pre) + class_label.name.split('_')[-1]\n os.remove(class_list[i])\n os.remove(test_fn)\n fn_newlabel = mer_path + '%s.label' %label_name\n if os.path.isfile(fn_newlabel):\n fn_newlabel = fn_newlabel[:fn_newlabel.rfind('-')] + ',new-%s' % fn_newlabel.split('-')[-1]\n mne.write_label(fn_newlabel, com_label)\n class_list[i] = fn_newlabel\n belong = True\n i = i + 1\n if belong is False:\n class_list.append(test_fn)\n return len(class_list)", "def __init__(self,resources):\n OeskObject.__init__(self,resources[0][0][0],resources[0][0][1])\n self.checkAndParse(resources)\n self.addAttribute('PRIORITY', hex(0))", "def merge_files(locale, fail_if_missing=True):\r\n for target, sources in CONFIGURATION.generate_merge.items():\r\n merge(locale, target, sources, fail_if_missing)", "def build(self, resource_name: str) -> str: # pragma: no cover\n raise NotImplementedError", "def tag_resources_with_options(\n self,\n request: dds_20151201_models.TagResourcesRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.TagResourcesResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.resource_group_id):\n query['ResourceGroupId'] = request.resource_group_id\n if not UtilClient.is_unset(request.resource_id):\n query['ResourceId'] = request.resource_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.resource_type):\n query['ResourceType'] = request.resource_type\n if not UtilClient.is_unset(request.tag):\n query['Tag'] = request.tag\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='TagResources',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.TagResourcesResponse(),\n self.call_api(params, req, runtime)\n )", "def create_resource_object():\n\n # Create two objects of different users and same center code\n Resource.objects.create(status=0, title='Recurso de teste (BR1.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=1, cooperative_center_code='BR1.1')\n \n Resource.objects.create(status=0, title='Recurso de teste (BR1.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=2, cooperative_center_code='BR1.1')\n\n # Create one object of diffent center code\n Resource.objects.create(status=0, title='Recurso de teste (PY3.1)', \n link='http://bvsalud.org', originator='BIREME',\n created_by_id=3, cooperative_center_code='PY3.1')\n\n\n # add descriptor and thematic area for resource pk 1\n object_ct = ContentType.objects.get_for_model(Resource)\n descriptor = Descriptor.objects.create(object_id=1, content_type=object_ct, text='descritor 1')\n keyword = Keyword.objects.create(object_id=1, content_type=object_ct, text='keyword 1')\n thematic = ResourceThematic.objects.create(object_id=1, content_type=object_ct, thematic_area_id=1)", "def _resource_library_rules_objects(self):\n env_name = self._env_name()\n objs_name = self._objs_name()\n\n objs = []\n res_srcs = self.data['res_srcs']\n res_objects = {}\n path = self.path\n for src in res_srcs:\n base_src_name = self._regular_variable_name(os.path.basename(src))\n src_name = base_src_name + '_' + self.name + '_res'\n if src_name not in res_objects:\n res_objects[src_name] = (\n '%s_%s_object' % (\n base_src_name,\n self._regular_variable_name(self.name)))\n target_path = os.path.join(self.build_path,\n path,\n '%s.objs' % self.name,\n base_src_name)\n self._write_rule(\n '%s = %s.SharedObject(target=\"%s\" + top_env[\"OBJSUFFIX\"]'\n ', source=\"%s\")' % (res_objects[src_name],\n env_name,\n target_path,\n src))\n objs.append(res_objects[src_name])\n self._write_rule('%s = [%s]' % (objs_name, ','.join(objs)))", "def merge_assets(self, other):\n for asset in other.asset:\n if find_elements(root=self.asset, tags=asset.tag,\n attribs={\"name\": asset.get(\"name\")}, return_first=True) is None:\n self.asset.append(asset)", "def resources(self, value):\n self._resource_objects = value", "def initResonance(resonance, doMerge=True):\n from ccpnmr.analysis.core.MoleculeBasic import DEFAULT_ISOTOPES\n \n if resonance.isDeleted:\n return\n \n #print 'initResonance', makeResonanceGuiName(resonance)\n resonanceSet = resonance.resonanceSet\n \n if resonance.name == 'r%d' % resonance.serial:\n resonance.setName(None)\n \n if resonanceSet:\n atomSets = tuple(resonanceSet.atomSets)\n atom = atomSets[0].findFirstAtom()\n element = atom.chemAtom.elementSymbol\n if resonance.isotopeCode[-len(element):] != element:\n msg = 'Resonance %d isotope-assignment mismatch: Resetting isotope' \n print msg % resonance.serial\n \n resonance.isotopeCode = DEFAULT_ISOTOPES.get(element, 'unknown')\n getBoundResonances(resonance, recalculate=True, contribs=None) \n \n spinSystem = findSpinSystem(resonance)\n if spinSystem:\n if resonance.resonanceGroup: # same as the spin system\n residue = atom.residue\n \n if spinSystem.residue is not residue:\n #assignSpinSystemResidue(spinSystem,residue)\n \n if doMerge:\n ccpCode = residue.molResidue.ccpCode\n spinSystems = list(resonance.nmrProject.findAllResonanceGroups(residue=residue))\n N = len(spinSystems)\n msg = 'There are %d separate %d%s spin systems. Merge together?'\n if (N > 1) and showOkCancel('Confirm', msg % (N,residue.seqCode,ccpCode)):\n assignSpinSystemResidue(spinSystem,residue) \n \n for spinSystem1 in spinSystems[1:]:\n mergeSpinSystems(spinSystem1,spinSystems[0])\n \n else:\n assignSpinSystemResidue(spinSystem,residue) \n \n else:\n assignSpinSystemResidue(spinSystem,residue) \n \n else:\n addSpinSystemResonance(spinSystem, resonance)\n\n assignResonanceType(resonance, atomSets)\n \n else:\n updateResonanceAnnotation(resonance)\n\n if not resonance.shifts:\n if resonance.peakDimContribs:\n for contrib in resonance.peakDimContribs:\n peakDim = contrib.peakDim\n experiment = peakDim.peak.peakList.dataSource.experiment\n \n if experiment.shiftList is None:\n shiftList = resonance.nmrProject.findFirstMeasurementList(className='ShiftList')\n if shiftList is None:\n shiftList = resonance.nmrProject.newShiftList(unit='ppm')\n experiment.setShiftList( shiftList )\n \n updateResonShift(resonance,peakDim)", "def create_shared_resources(resources_or_config: Union[dict, SharedResources] = None) -> SharedResources:\n if resources_or_config is None:\n return SharedResources()\n elif isinstance(resources_or_config, SharedResources):\n return resources_or_config\n else:\n return SharedResources(config=resources_or_config)", "def load_specs(self,merge_method=None):\n\n\t\timport copy\n\t\tspecs_files = glob.glob('./calcs/specs/meta*yaml')\n\t\tallspecs = []\n\t\tmerge_method = self.merge_method if not merge_method else merge_method\n\t\tfor fn in specs_files:\n\t\t\twith open(fn) as fp: \n\t\t\t\tif (merge_method != 'override_factory' or \n\t\t\t\t\tnot re.match('^meta\\.factory\\.',os.path.basename(fn))):\n\t\t\t\t\tallspecs.append(yaml.load(fp.read()))\n\t\t#---if we are overriding factory then we change to careful after filtering out the factory\n\t\tmerge_method = 'careful' if merge_method=='override_factory' else merge_method\n\t\tif merge_method=='strict':\n\t\t\tspecs = allspecs.pop(0)\n\t\t\tfor spec in allspecs:\n\t\t\t\tfor key,val in spec.items():\n\t\t\t\t\tif key not in specs: specs[key] = copy.deepcopy(val)\n\t\t\t\t\telse: raise Exception('\\n[ERROR] redundant key %s in more than one meta file'%key)\n\t\telif merge_method=='careful':\n\t\t\t#---! recurse only ONE level down in case e.g. calculations is defined in two places but there\n\t\t\t#...! ...are no overlaps, then this will merge the dictionaries at the top level\n\t\t\tspecs = allspecs.pop(0)\n\t\t\tfor spec in allspecs:\n\t\t\t\tfor topkey,topval in spec.items():\n\t\t\t\t\tif topkey not in specs: specs[topkey] = copy.deepcopy(topval)\n\t\t\t\t\telse: \n\t\t\t\t\t\tfor key,val in topval.items():\n\t\t\t\t\t\t\tif key not in specs[topkey]: specs[topkey][key] = val\n\t\t\t\t\t\t\telse: \n\t\t\t\t\t\t\t\timport pdb;pdb.set_trace()\n\t\t\t\t\t\t\t\traise Exception(\n\t\t\t\t\t\t\t\t('[ERROR] performing careful merge in the top-level specs dictionary \"%s\" '+\n\t\t\t\t\t\t\t\t' but there is already a child key \"%s\"')%(topkey,key))\n\t\telse: raise Exception('\\n[ERROR] unclear meta specs merge method %s'%merge_method)\n\t\treturn specs", "def has_merge(self) -> Optional[str]:\n return self.source_name is not None", "def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res", "def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res", "def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res", "def merge(self):\n try:\n self.save()\n except Exception as e:\n existing = self.session.Sample.find_by_name(self.name)\n if existing:\n if self.sample_type_id == existing.sample_type_id:\n existing.update_properties(self.properties)\n existing.description = self.description\n existing.project = self.project\n existing.save()\n self.reload(existing.dump())\n return True\n else:\n raise e\n else:\n raise e\n return False", "def MakeResource(resource_list, output_list=None):\n content = {'resources': resource_list}\n if output_list:\n content['outputs'] = output_list\n return yaml.dump(content)", "def has_merge(self) -> Optional[str]:\n return None", "def create_resources(self) -> List[ResourceDescription]:\r\n return self.resources", "def __init__(__self__,\n resource_name: str,\n args: BundleArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def common_resources(cls) -> Optional[Tuple[Any, ...]]:\n return None", "def mergeResonances(resonanceB, resonanceA):\n\n from ccpnmr.analysis.core.MoleculeBasic import getResidueMapping\n \n if resonanceB is resonanceA:\n return resonanceA\n\n if resonanceB.isDeleted:\n return resonanceA\n\n if resonanceA.isDeleted:\n return resonanceB\n \n removeAssignmentNotifiers()\n \n isotopeA = resonanceA.isotopeCode\n isotopeB = resonanceB.isotopeCode\n \n if isotopeA and isotopeB:\n if isotopeA != isotopeB:\n showWarning('Resonance Merge Failure',\n 'Attempt to merge resonances with different isotope codes')\n setupAssignmentNotifiers()\n return \n \n mappings = []\n resonanceSet = resonanceB.resonanceSet\n if resonanceSet:\n atomSets = resonanceSet.atomSets\n residue = resonanceSet.findFirstAtomSet().findFirstAtom().residue\n serials = [atomSet.serial for atomSet in atomSets]\n serials.sort()\n residueMapping = getResidueMapping(residue)\n for atomSetMapping in residueMapping.atomSetMappings:\n serials2 = list(atomSetMapping.atomSetSerials)\n serials2.sort()\n if serials2 == serials:\n mappings.append([atomSetMapping, atomSets])\n \n # attributes where we have object.resonance\n controlData = {'findFirstMeasurement':('shiftDifferences', 'hExchRates',\n 'hExchProtections', 'shiftAnisotropies',\n 't1s', 't1Rhos', 't2s'),\n 'findFirstDerivedData':('pkas',),\n 'findFirstPeakDimContrib':('peakDimContribs',)\n }\n for funcName in controlData:\n for attrName in controlData[funcName]:\n for objectA in list(resonanceA.__dict__.get(attrName)):\n objectB = getattr(objectA.parent, funcName)(resonance=resonanceB)\n if objectB is not None:\n objectA = mergeObjects(objectB, objectA)\n \n # attributes where we have object.resonances\n controlData = {'findFirstMeasurement':('jCouplings',\n 'noes', 'rdcs', 'dipolarRelaxations'),\n 'findFirstDerivedData':('isotropicS2s', 'spectralDensities',\n 'datums'),\n 'findFirstPeakDimContribN':('peakDimContribNs',)\n }\n for funcName in controlData:\n for attrName in controlData[funcName]:\n for objectA in list(resonanceA.__dict__.get(attrName)):\n testKey = set(objectA.__dict__['resonances'])\n testKey.remove(resonanceA)\n testKey.add(resonanceB)\n testKey = frozenset(testKey)\n objectB = getattr(objectA.parent, funcName)(resonances=testKey)\n \n if objectB is not None:\n objectA = mergeObjects(objectB, objectA)\n \n resonanceA.setCovalentlyBound([])\n resonanceB.setCovalentlyBound([])\n \n # merge shifts in the same shiftlist\n # NB must be done after other measurements \n for shiftA in resonanceA.shifts:\n for shiftB in resonanceB.shifts:\n if shiftA.parentList is shiftB.parentList:\n shiftA = mergeObjects(shiftB,shiftA)\n\n # Get rid of duplicate appData\n for appData in resonanceA.applicationData:\n matchAppData = resonanceB.findFirstApplicationData(application=appData.application,\n keyword=appData.keyword)\n if matchAppData:\n resonanceB.removeApplicationData(matchAppData)\n \n mergeObjects(resonanceB, resonanceA)\n \n # Must be after resonance merge, so that links to peaks are properly set\n for shiftA in resonanceA.shifts:\n averageShiftValue(shiftA)\n \n # Assign names will be merged, but if assigned we only want the correct ones \n if resonanceA.resonanceSet:\n assignNames = []\n for atomSet in resonanceA.resonanceSet.atomSets:\n assignNames.append( atomSet.name )\n \n resonanceA.setAssignNames(assignNames) \n \n for atomSetMapping, atomSets in mappings:\n updateAtomSetMapping(atomSetMapping, atomSets)\n \n getBoundResonances(resonanceA, recalculate=True)\n updateResonanceAnnotation(resonanceA)\n \n setupAssignmentNotifiers()\n \n return resonanceA", "def merge(self, args, prompt=True):\n # Keep a copy of the args for an action\n self.cli_args = args\n if not self.has_section('iscore'):\n self.add_section('iscore')\n\n if args.iscore_url:\n self.set('iscore', 'base_url', args.iscore_url)\n\n if args.api_version:\n self.set('iscore', 'api_version', args.api_version)\n\n if hasattr(args, 'save') and args.save:\n self.set('iscore', 'force_save', 'yes')\n\n self.credentials = None\n if args.api_token:\n self.api_token = args.api_token\n elif self.has_option('iscore', 'api_token'):\n self.api_token = self.get('iscore', 'api_token')\n elif prompt:\n print(\"Enter your IScorE API Token (leave blank to use your credentials)\")\n self.api_token = input(\"> \")\n\n if not self.api_token:\n print(\"Please login using your IScorE credentials\")\n username = input(\"Username: \")\n password = getpass.getpass()\n self.credentials = (username, password)", "def parse_resources(self, soup):\n for res in soup.find_all('res'):\n if 'customlangpack' in res['id'].lower():\n self.find_langpack_path(res)\n else:\n rid = remove_xml(res['id'])\n self.resources[rid] = path_format(self.properties.substitute(res['src']))", "def _add_resource_descriptions_to_pools(self, meta_list):\r\n if not meta_list:\r\n return\r\n\r\n for meta in meta_list:\r\n getattr(resources, meta.resource_type).add(meta)", "def __init__(self, \r\n initial_concept_uri=None, \r\n lang=None, \r\n broader=True, \r\n narrower=False, \r\n verbose=False,\r\n refresh=False):\r\n def get_cached_skos_option_dict():\r\n '''\r\n Helper function to retrieve cached skos_option_dict\r\n '''\r\n cached_skos_option_dict_path = os.path.join(self.cache_dir, 'skos_options.yaml')\r\n try:\r\n cached_skos_option_dict_file = open(cached_skos_option_dict_path, 'r')\r\n cached_skos_option_dict = yaml.load(cached_skos_option_dict_file)\r\n cached_skos_option_dict_file.close()\r\n except:\r\n cached_skos_option_dict = {}\r\n \r\n return cached_skos_option_dict\r\n \r\n # Start of constructor\r\n assert narrower or broader, 'Need at least one of \"broader\" or \"narrower\" set to True in order to build concept trees'\r\n \r\n self.fcache_dir = os.path.join(tempfile.gettempdir(), 'concept_hierarchy')\r\n \r\n self.lang = lang or 'en'\r\n self.narrower = narrower\r\n self.broader = broader\r\n self.verbose = verbose\r\n \r\n self.skos_option_dict = {'altLabels': True, \r\n 'narrower': narrower, \r\n 'broader': broader,\r\n 'lang': lang\r\n } \r\n \r\n # Force refresh if SKOS options have changed\r\n self.refresh = refresh or (self.skos_option_dict != get_cached_skos_option_dict()) \r\n\r\n self.concept_fetcher = ConceptFetcher(self.skos_option_dict)\r\n \r\n self.concept_registry = {}\r\n \r\n if self.refresh:\r\n if self.verbose:\r\n print 'Refreshing disk cache'\r\n else:\r\n self.load() \r\n \r\n if initial_concept_uri:\r\n self.get_concept_from_uri(initial_concept_uri) # Build tree around initial URI if specified\r", "def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res", "def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res", "def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res", "def __init__(self):\r\n\t\tself.label = \"Linked Data Batch No Functional Property Merge\"\r\n\t\tself.description = \"\"\"The related seperated tables from Linked Data Location Entities Property Enrichment Tool have multivalue for each wikidata location because the coresponding property is not functional property. \r\n\t\tThis Tool helps user to merge these multivalue to a single record and add it to original feature class sttribute table by using merge rules which are specified by users.\"\"\"\r\n\t\tself.canRunInBackground = False", "def on_merge(self, to_be_merged, merge_result, context):\n pass", "def merge(*args):\n return _libsbml.Unit_merge(*args)", "def bundle_outputs(self):\n pass", "def stand_by(self):\n\n if self.gpus is None:\n self['gpus'] = [0]\n\n # setup logging\n setup_logging(self.get_log_folder(), self.logging)\n\n # # prepare workspace\n # self.get_workspace_dir()\n\n # pretrained\n if \"pretrained\" in self:\n name = self.pretrained.name\n resources = self.pretrained.family.get(name)\n assert isinstance(resources, dict), ValueError(f\"Could not get the resource with name: {name}\")\n cache_dir = self.pretrained.cache_dir\n if cache_dir is None:\n cache_path = default_download_dir(name)\n else:\n cache_path = Path(cache_dir)\n cache_path = cache_path / name\n if not cache_path.exists():\n maybe_create_dir(str(cache_path))\n\n for k, item in resources.items():\n url = item.url\n suffix = item.suffix\n to_insert_paths = item.to_insert_paths\n to_replaces = item.to_replaces\n others = item.others\n # when specify urls of resource\n if url.startswith(\"http\"):\n filename = url.split(\"/\")[-1]\n file_path = cache_path / filename\n if not file_path.exists():\n try:\n maybe_download(url, str(cache_path))\n except Exception as e:\n logger.error(f\"Download from {url} failure!\", exc_info=True)\n raise e\n # assigning filename to the corresponding config variable.\n if to_insert_paths and file_path.exists():\n for to_insert_path in to_insert_paths:\n self.cascade_set(to_insert_path, str(file_path))\n # replace corresponding variable with file content\n if to_replaces and file_path.exists():\n replace_content = load_from_file(str(file_path))\n for to_replace in to_replaces:\n self.cascade_set(to_replace, replace_content)\n else: # when specify paths of resource which have downloaded.\n maybe_filename = Path(url)\n if maybe_filename.is_dir():\n if suffix:\n file_path = maybe_filename / suffix\n else:\n file_path = maybe_filename\n else:\n file_path = maybe_filename\n # assigning filename to the corresponding config variable.\n if to_insert_paths:\n for to_insert_path in to_insert_paths:\n self.cascade_set(to_insert_path, str(file_path))\n # replace corresponding variable with file content\n if to_replaces:\n replace_content = load_from_file(str(file_path))\n for to_replace in to_replaces:\n self.cascade_set(to_replace, replace_content)\n logging.info(f\"Prepare resource {k} from {url}, whose path is {str(file_path)}\")\n # others config\n if others:\n for _k, v in others.items():\n value = v.get(\"value\")\n other_to_replaces = v.get(\"to_replaces\")\n for other_to_replace in other_to_replaces:\n self.cascade_set(other_to_replace, value)\n\n # replace placeholder\n self._replace_placeholder(self)", "def render_merged(self, context):\r\n\r\n output, files, filter = self.resolve(context)\r\n\r\n # make paths absolute\r\n output_path = _abspath(output)\r\n source_paths = [_abspath(s) for s in files]\r\n\r\n # check if the asset should be (re)created\r\n if not os.path.exists(output_path):\r\n if not settings.ASSETS_AUTO_CREATE:\r\n # render the sources after all\r\n return self.render_sources(context)\r\n else:\r\n update_needed = True\r\n else:\r\n update_needed = get_updater()(output_path, source_paths)\r\n\r\n if update_needed:\r\n create_merged(source_paths, output_path, filter)\r\n last_modified = os.stat(output_path).st_mtime\r\n # TODO: do asset tracking here\r\n #get_tracker()()\r\n\r\n # modify the output url for expire header handling\r\n if settings.ASSETS_EXPIRE == 'querystring':\r\n outputfile = \"%s?%d\" % (output, last_modified)\r\n elif settings.ASSETS_EXPIRE == 'filename':\r\n name = output.rsplit('.', 1)\r\n if len(name) > 1: return \"%s.%d.%s\" % (name[0], last_modified, name[1])\r\n else: outputfile = \"%s.%d\" % (name, last_modified)\r\n elif not settings.ASSETS_EXPIRE:\r\n outputfile = output\r\n else:\r\n raise ValueError('Unknown value for ASSETS_EXPIRE option: %s' %\r\n settings.ASSETS_EXPIRE)\r\n\r\n context.update({'ASSET_URL': _absurl(outputfile)})\r\n try:\r\n result = self.childnodes.render(context)\r\n finally:\r\n context.pop()\r\n return result", "def _update_dataset(lc, geno, dataset, delete_resources=False):\n package_update_required = False\n if not _dataset_match(geno, dataset):\n dataset.update(_dataset_fields(geno))\n package_update_required = True\n\n chromos = dict(\n (chromo['resource_name'], chromo) for chromo in geno['resources'])\n\n # migrate recombinant1 datasets which had no resource\n # name to identify resource\n if (len(chromos) == 1 and len(dataset['resources']) == 1\n and dataset['resources'][0]['name'] == 'data'):\n dataset['resources'][0]['name'] = geno['resources'][0]['resource_name']\n package_update_required = True\n\n # collect updated resources\n out_resources = []\n for resource in dataset['resources']:\n if resource['name'] not in chromos:\n if not delete_resources:\n out_resources.append(resource)\n continue\n\n r = chromos.pop(resource['name'])\n\n if not _resource_match(r, resource):\n resource.update(_resource_fields(r))\n package_update_required = True\n\n out_resources.append(resource)\n\n # missing resources\n if chromos:\n out_resources.extend(\n # dummy url for old ckan compatibility reasons\n dict(_resource_fields(chromo), url='http://')\n for chromo in chromos.values())\n package_update_required = True\n\n if (package_update_required or\n len(out_resources) != len(dataset['resources'])):\n dataset['resources'] = out_resources\n dataset = lc.call_action('package_update', dataset)\n\n return dataset", "def merge_metadata(self):\n\n # Load merge metadata if necessary\n if not self._merges:\n self._merges = VersionedProperty(self.url, opts[\"prop\"])\n self._merges.load(self)\n\n return self._merges", "def merge(self, obj):\n mlist = self.selected_handles()\n \n if len(mlist) != 2:\n msg = _(\"Cannot merge citations.\")\n msg2 = _(\"Exactly two citations must be selected to perform a \"\n \"merge. A second citation can be selected by holding \"\n \"down the control key while clicking on the desired \"\n \"citation.\")\n ErrorDialog(msg, msg2)\n else:\n citation1 = self.dbstate.db.get_citation_from_handle(mlist[0])\n citation2 = self.dbstate.db.get_citation_from_handle(mlist[1])\n if not citation1.get_reference_handle() == \\\n citation2.get_reference_handle(): \n msg = _(\"Cannot merge citations.\")\n msg2 = _(\"The two selected citations must have the same \"\n \"source to perform a merge. If you want to merge \"\n \"these two citations, then you must merge the \"\n \"sources first.\")\n ErrorDialog(msg, msg2)\n else:\n MergeCitation(self.dbstate, self.uistate, mlist[0], mlist[1])", "def setOptManagerResources(o): # pylint: disable=global-statement\n # pylint: disable=global-statement\n global OPT_MANAGER_RESOURCES_PGAAS\n OPT_MANAGER_RESOURCES_PGAAS = \"{}/pgaas\".format(o)", "def load(self):\n self.suite.load()\n self.resource_map = {}\n dirlist = os.listdir(self.resources)\n for resource_name in (name for name in dirlist\n if os.path.isfile(os.path.join(self.resources,name)) and\n os.path.splitext(name)[1].lower() == '.fbr'):\n try:\n f = open(os.path.join(self.resources,resource_name),'rU')\n expr = f.read()\n d = eval(expr)\n resource_id = os.path.splitext(resource_name)[0].lower()\n d['id'] = resource_id\n kind = d['kind']\n del d['kind']\n self.resource_map[resource_id] = Resource.create(kind,**d)\n finally:\n f.close()", "def change_merged(self, event):\n pass", "def __add__(self, other):\n if self.xml.find('mosromgrmeta') is None or isinstance(other, RunningOrderControl):\n return other.merge(self)\n raise MosCompletedMergeError(\"Cannot merge completed MOS file\")", "def create(self):\n\n if len(self.filenames) != len(self.download_links):\n print(\"Must have the same amount off file names than download links\", file=sys.stderr)\n return None\n\n resources = []\n\n #Creating the resource dict\n for i in range(len(self.filenames)):\n resources.append(\n {\n \"id\": self.ids[i],\n \"description\":\"\",\n \"filename\":self.filenames[i],\n \"download_link\":self.download_links[i]\n }\n )\n\n\n #The JSON\n data = {\n \"dataset\":{\n \"project\":self.project,\n \"version\":self.version,\n \"description\":self.description,\n \"project_link\":self.project_link,\n \"data_path\": self.data_path,\n \"metadata\": self.metadata,\n \"files_type\":self.file_type,\n \"protocole\":self.protocole,\n \"resources\":resources,\n \"data_representation\":self.data_representation\n }\n }\n with open(self.dataset_path, \"w\") as json_file:\n json_file.write(json.dumps(data))", "def _build_pre_resources_template(self, output_filename=\"{}_pr_r.json\"):\n template = actions.ActionsTemplate()\n\n for resource_type, resource_cls in six.iteritems(AVAILABLE_RESOURCES):\n resource_cls.register_type_pre_resources_template(self, template)\n for r in self.get_resources(resource_type):\n r.register_pre_resources_template(template)\n\n if template:\n output_filename = output_filename.format(self._get_next_build_sequence_id())\n self.puts(colored.cyan(output_filename))\n with open(os.path.join(self.build_path, output_filename), 'w') as f:\n f.write(template.to_json(indent=4))", "def __init__(self, config: GenedescConfigParser, species: str, go_relations: List[str] = None,\n do_relations: List[str] = None, use_cache: bool = False):\n self.config = config\n raw_files_source = config.get_wb_raw_file_sources()\n cache_location = config.get_cache_dir()\n release_version = config.get_wb_release()\n organisms_info = config.get_wb_organisms_info()\n project_id = organisms_info[species][\"project_id\"]\n self.sister_sp_fullname = \"\"\n if \"main_sister_species\" in organisms_info[species] and \"full_name\" in \\\n organisms_info[organisms_info[species][\"main_sister_species\"]]:\n self.sister_sp_fullname = organisms_info[organisms_info[species][\"main_sister_species\"]][\"full_name\"]\n self.orth_fullnames = \"\"\n if \"ortholog\" in organisms_info[species] and all([\"full_name\" in organisms_info[ortholog_sp] for ortholog_sp in\n organisms_info[species][\"ortholog\"]]):\n self.orth_fullnames = [organisms_info[ortholog_sp][\"full_name\"] for ortholog_sp in\n organisms_info[species][\"ortholog\"]]\n expression_cluster_anatomy_prefix = organisms_info[species][\"ec_anatomy_prefix\"] if \\\n \"ec_anatomy_prefix\" in organisms_info[species] else None\n expression_cluster_molreg_prefix = organisms_info[species][\"ec_molreg_prefix\"] if \\\n \"ec_molreg_prefix\" in organisms_info[species] else None\n expression_cluster_genereg_prefix = organisms_info[species][\"ec_genereg_prefix\"] if \\\n \"ec_genereg_prefix\" in organisms_info[species] else None\n super().__init__(go_relations=go_relations, do_relations=do_relations, use_cache=use_cache)\n self.gene_data_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + '.' + project_id +\n '.' + release_version + \".geneIDs.txt.gz\")\n self.gene_data_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + project_id + \\\n '/annotation/' + species + '.' + project_id + '.' + release_version + '.geneIDs.txt.gz'\n self.go_ontology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"gene_ontology.\" + release_version + \".obo\")\n self.go_ontology_url = raw_files_source + '/' + release_version + '/ONTOLOGY/gene_ontology.' + \\\n release_version + '.obo'\n self.go_associations_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + \".\" + project_id + \".\" + release_version +\n \".gene_association.wb.gz\")\n self.go_associations_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + project_id + \\\n '/annotation/' + species + '.' + project_id + '.' + release_version + \".gene_association.wb.gz\"\n self.do_ontology_url = raw_files_source + '/' + release_version + '/ONTOLOGY/disease_ontology.' + \\\n release_version + '.obo'\n self.do_ontology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"disease_ontology.\" + release_version + \".obo\")\n self.do_associations_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"disease_associations.by_orthology.\" + release_version +\n \".tsv.txt\")\n self.do_associations_url = raw_files_source + '/' + release_version + \\\n '/ONTOLOGY/disease_association.by_orthology.' + release_version + '.tsv.txt'\n self.do_associations_new_cache_path = os.path.join(cache_location, \"wormbase\", release_version, 'ONTOLOGY',\n 'disease_association.' + release_version + '.daf.txt')\n self.do_associations_new_url = raw_files_source + '/' + release_version + '/ONTOLOGY/disease_association.' + \\\n release_version + '.daf.txt'\n self.orthology_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + project_id + \\\n '/annotation/' + species + '.' + project_id + '.' + release_version + '.orthologs.txt.gz'\n self.orthology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + '.' + project_id + '.' +\n release_version + \".orthologs.txt.gz\")\n self.orthologs = defaultdict(lambda: defaultdict(list))\n self.protein_domain_url = raw_files_source + '/' + release_version + '/species/' + species + '/' + \\\n project_id + '/annotation/' + species + '.' + project_id + '.' + release_version + \\\n '.protein_domains.csv.gz'\n self.protein_domain_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"species\", species,\n project_id, \"annotation\", species + '.' + project_id +\n '.' + release_version + \".protein_domains.csv.gz\")\n self.protein_domains = defaultdict(list)\n self.expression_ontology_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"anatomy_ontology.\" + release_version + \".obo\")\n self.expression_ontology_url = raw_files_source + '/' + release_version + '/ONTOLOGY/anatomy_ontology.' + \\\n release_version + '.obo'\n self.expression_associations_cache_path = os.path.join(cache_location, \"wormbase\", release_version, \"ONTOLOGY\",\n \"anatomy_association.\" + release_version + \".wb\")\n self.expression_associations_url = raw_files_source + '/' + release_version + \\\n '/ONTOLOGY/anatomy_association.' + release_version + '.wb'\n self.expression_cluster_anatomy_url = self._get_expression_cluster_url(\n prefix=expression_cluster_anatomy_prefix, ec_type=\"anatomy\", release_version=release_version)\n self.expression_cluster_anatomy_cache_path = self._get_expression_cluster_cache_path(\n prefix=expression_cluster_anatomy_prefix, ec_type=\"anatomy\", release_version=release_version,\n cache_location=cache_location)\n self.expression_cluster_anatomy_data = defaultdict(list) if self.expression_cluster_anatomy_url else None\n self.expression_cluster_molreg_url = self._get_expression_cluster_url(\n prefix=expression_cluster_molreg_prefix, ec_type=\"molReg\", release_version=release_version)\n self.expression_cluster_molreg_cache_path = self._get_expression_cluster_cache_path(\n prefix=expression_cluster_molreg_prefix, ec_type=\"molReg\", release_version=release_version,\n cache_location=cache_location)\n self.expression_cluster_molreg_data = defaultdict(list) if self.expression_cluster_molreg_url else None\n self.expression_cluster_genereg_url = self._get_expression_cluster_url(\n prefix=expression_cluster_genereg_prefix, ec_type=\"geneReg\", release_version=release_version)\n self.expression_cluster_genereg_cache_path = self._get_expression_cluster_cache_path(\n prefix=expression_cluster_genereg_prefix, ec_type=\"geneReg\", release_version=release_version,\n cache_location=cache_location)\n self.expression_cluster_genereg_data = defaultdict(list) if self.expression_cluster_genereg_url else None", "def merge_bam_files(self, inputs, output, sample_id, rg_id=None,\n platform='illumina', library='A', sort_order=\"readname\"):\n if len(inputs) > 1:\n if sort_order == \"readname\":\n sort_options = \"-n\"\n else:\n sort_options = \"\"\n \n header_file = p.as_temp(\"%s.header\" % output)\n\n with open(header_file, \"w\") as header:\n for ix, input_file in enumerate(inputs):\n # TODO use pysam here\n in_header = pysam.Samfile(input_file,'rb',check_header=False, check_sq=False).text\n RG_lines = filter(lambda x: x.startswith(\"@RG\"), in_header.split(\"\\n\"))\n if len(RG_lines) == 1:\n rg_id = re.findall(\"ID:([a-zA-Z0-9_\\-\\.]*)\", RG_lines[0])[0]\n else:\n rg_id = re.sub(\"\\.bam$\", \"\", os.path.basename(input_file))\n header.write(\"@RG\\tID:%s\\tPU:%s\\tDS:%s\\tLB:%s\\tPL:%s\\tSM:%s\\n\" % (rg_id, rg_id, input_file, library, platform, sample_id))\n merge_options = \"-h %s\" % (header_file)\n\n self.cmd(\"{samtools} merge \\\n {sort_options} \\\n {merge_options} \\\n {output_bam} {input_bam_list}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n sort_options=sort_options,\n merge_options=merge_options,\n output_bam=output,\n input_bam_list=\" \".join(inputs),\n ),\n shell=True)\n else:\n # TODO use pysam here\n input_file = inputs[0]\n in_header = pysam.Samfile(input_file,'rb',check_header=False, check_sq=False).text\n RG_lines = filter(lambda x: x.startswith(\"@RG\"), in_header.split(\"\\n\"))\n if len(RG_lines) == 1:\n rg_id = re.findall(\"ID:([a-zA-Z0-9_\\-\\.]*)\", RG_lines[0])[0]\n else:\n rg_id = re.sub(\"\\.bam$\", \"\", os.path.basename(input_file))\n with open(p.as_temp(\"%s.header\" % output), \"w\") as header:\n header.write(\"@RG\\tID:%s\\tPU:%s\\tDS:%s\\tLB:%s\\tPL:%s\\tSM:%s\\n\" % (rg_id, rg_id, input_file, library, platform, sample_id))\n \n self.cmd(\"{picard}/AddOrReplaceReadGroups.jar \\\n INPUT={in_bam} \\\n OUTPUT={out_bam} \\\n QUIET=false \\\n VALIDATION_STRINGENCY=LENIENT\\\n COMPRESSION_LEVEL=5 \\\n RGID={rg_id} \\\n RGSM={sample_id} \\\n RGPU={rg_id} \\\n RGLB=A \\\n RGPL=illumina \\\n RGDS={in_bam}\"\n .format(\n picard=self.cmds[\"picard\"],\n in_bam=inputs[0],\n out_bam=output,\n sample_id=sample_id,\n rg_id=rg_id,\n ),\n shell=True)", "def write_merge_script(s,inputs=[]):\n assert len(inputs)>0\n # hadd determines if we are merging main histograms file, or unfolding files\n hadd = True if s.jobtype == \"MRG\" else False\n s.jobfile = os.path.join(s.submitdir, 'merge_wasym.sh' if hadd else 'munfold_wasym.sh')\n s.outROOT = ('root_' if hadd else 'unfold_')+s.tag+\".root\"\n s.outROOTpath = os.path.join('results','ana_wasym',s.outROOT)\n pre = 'merge' if hadd else 'munfold'\n s.outOU = os.path.join(s.submitdir, pre+'_wasym.out.log')\n s.outER = os.path.join(s.submitdir, pre+'_wasym.err.log')\n s.outLOG = os.path.join(s.submitdir, pre+'_wasym.log.log')\n flist = 'wasym.root.list' if hadd else 'wasym.unfold.list'\n s.outputs += [flist]\n f = open(s.jobfile, \"w\")\n print >>f, SH_PRE%(s.fdic[0],s.fdic[1])\n print >>f,'RMODE=merge'\n print >>f,'nexpected=%d'%len(inputs)\n print >>f,'ntot=0'\n print >>f,'rm -f ${ROOTDIR}/%s ; touch ${ROOTDIR}/%s;'%(flist,flist)\n for fin in inputs:\n fname = fin if hadd else '%s.unfold'%fin\n print >>f,'f=\"${RESDIR}/%s.root\"'%fname\n print >>f,'st=`xrd uct3-xrd.mwt2.org existfile $f`'\n print >>f,'if [ \"$st\" == \"The file exists.\" ]; then'\n # xrootd files: reduce cache size, since hadd is stupid and will eat 100% of RAM\n print >>f,'echo ${RESHOST}/$f?cachesz=1000000 >> ${ROOTDIR}/%s'%flist\n print >>f,'((ntot++))'\n print >>f,'else'\n print >>f,'echo ERROR: failed to locate file $f'\n print >>f,'fi'\n print >>f,'if [ \"$ntot\" -eq \"$nexpected\" ]; then echo \"ALL DONE\"; else echo \"ERROR: missing `expr $nexpected - $ntot` files\"; echo exit 202; exit 202; fi'\n print >>f,'if [ \"$ntot\" -eq \"0\" ]; then echo \"ERROR: no files to merge\"; echo exit 203; exit 203; fi'\n print >>f,\"\"\"\n# a special version of hadd that adds files in chunks of 20\nfunction hadd2() {\n local per\n per=30 #20\n fin=$1\n opts=$2\n fout=$3\n shift\n n=`cat $fin | wc -l`\n ngrp=`expr $n / $per`\n nrem=`expr $n % $per`\n if [ \\\"$nrem\\\" == \\\"0\\\" ]; then ngrp=`expr $ngrp - 1`; fi\n for igrp in `seq 0 $ngrp`; do\n\timin=`expr $per \\* $igrp`\n\timax=`expr $per \\* $igrp + $per`\n\tif [ \\\"$imax\\\" -gt \\\"$n\\\" ]; then imax=`expr $per \\* $igrp + $nrem`; fi\n\t# offset by 1\n\timin=`expr $imin + 1`\n\timax=`expr $imax`\n\tidel=`expr $imax - $imin + 1`\n\techo \\\"===== Part $igrp / $ngrp : $imin to $imax\\\"\n\techo hadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\thadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\tst=$?\n\tif [ \\\"$st\\\" != \\\"0\\\" ]; then\n\t echo \\\"ERROR: merge step $igrp failed. Bailing out...\\\"\n\t return $st\n\tfi\n done\n # remove opts to speed up the last step and prevent creation of additional ntuple cycles;2\n echo hadd ${fout} ${fout}.TMPHADD_*root*\n hadd ${fout} ${fout}.TMPHADD_*root*\n st=$?\n rm -f ${fout}.TMPHADD_*root*\n return $st\n}\n \"\"\"\n if False:\n if hadd:\n print >>f, 'echo hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'echo hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'hadd2 ${ROOTDIR}/%s \"%s\" %s'%(flist,\"-O\" if hadd else \"-T\",s.outROOTpath)\n print >>f, \"status=$?\"\n print >>f, SH_POST\n f.close()\n os.system('chmod +x %s'%s.jobfile)\n s.write_submit_script()\n return True", "def __init__(self, resources=None): # noqa: E501\n self.openapi_types = {\n 'resources': List[WorkspaceResourceEntity]\n }\n\n self.attribute_map = {\n 'resources': 'resources'\n }\n\n self._resources = resources", "def test_load_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def merge(self, obj):\n mlist = self.selected_handles()\n\n if len(mlist) != 2:\n msg = _(\"Cannot merge media objects.\")\n msg2 = _(\"Exactly two media objects must be selected to perform a \"\n \"merge. A second object can be selected by holding down the \"\n \"control key while clicking on the desired object.\")\n ErrorDialog(msg, msg2)\n else:\n MergeMedia(self.dbstate, self.uistate, mlist[0], mlist[1])", "def resource(self, n):\n\n cfg = self.read()\n\n for res in cfg.get('Resources', []):\n res_name = res.get('Resource')\n\n if res_name == n:\n return ConfigResource(res)", "def CreateResources(self, manifests, region):\n resource_dict = manifest_util.ParseDeployConfig(self.messages, manifests,\n region)\n msg_template = 'Created Cloud Deploy resource: {}.'\n # Create delivery pipeline first.\n # In case user has both types of pipeline definition in the same\n # config file.\n pipelines = resource_dict[manifest_util.DELIVERY_PIPELINE_KIND_V1BETA1]\n if pipelines:\n operation_dict = {}\n for resource in pipelines:\n operation_dict[resource.name] = self.CreateDeliveryPipeline(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # In case user has both types of target definition in the same\n # config file.\n targets = resource_dict[manifest_util.TARGET_KIND_V1BETA1]\n if targets:\n operation_dict = {}\n for resource in targets:\n operation_dict[resource.name] = target_util.PatchTarget(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # Create automation resource.\n automations = resource_dict[manifest_util.AUTOMATION_KIND]\n operation_dict = {}\n for resource in automations:\n operation_dict[resource.name] = automation_util.PatchAutomation(resource)\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)\n # Create custom target type resource.\n custom_target_types = resource_dict[manifest_util.CUSTOM_TARGET_TYPE_KIND]\n operation_dict = {}\n for resource in custom_target_types:\n operation_dict[resource.name] = (\n custom_target_type_util.PatchCustomTargetType(resource)\n )\n self.operation_client.CheckOperationStatus(operation_dict, msg_template)", "def test_merge_repl(self):\n ars = self.ar[2009][11]['general']\n ars2 = awstats_reader.AwstatsReader(test_file_dir,\n 'joshuakugler.com')[2009][11]['general']\n self.assertEqual(ars.merge(ars2, 'LastLine', 'signature'), '')" ]
[ "0.5547208", "0.54064465", "0.5403034", "0.53563666", "0.53249174", "0.5320973", "0.53159565", "0.5253915", "0.5152775", "0.51470643", "0.5145946", "0.5095782", "0.50935775", "0.5056793", "0.501863", "0.50170356", "0.49941415", "0.4964399", "0.49395525", "0.4936977", "0.49245661", "0.49073938", "0.48995975", "0.48994577", "0.48935193", "0.48932666", "0.4886419", "0.48360097", "0.48348394", "0.48240554", "0.48230165", "0.4821076", "0.4821076", "0.4821076", "0.4821076", "0.48138437", "0.48064637", "0.4797145", "0.47907194", "0.47876963", "0.47850648", "0.4770838", "0.47618756", "0.47613925", "0.47597164", "0.47474575", "0.47396702", "0.4739533", "0.4723089", "0.47220472", "0.47213975", "0.4717753", "0.47170317", "0.47126296", "0.4712448", "0.47090828", "0.47086453", "0.4708525", "0.47062054", "0.4700874", "0.4700874", "0.4700874", "0.46984398", "0.46965238", "0.4690443", "0.4687691", "0.4680793", "0.46802104", "0.46800116", "0.46784824", "0.46721035", "0.46710357", "0.46511957", "0.4644209", "0.4644209", "0.4644209", "0.46440327", "0.46365276", "0.4628818", "0.45957413", "0.45956963", "0.45900232", "0.45887", "0.4587564", "0.45858663", "0.45844764", "0.4582263", "0.45758015", "0.45709392", "0.45700675", "0.4560697", "0.45588234", "0.45558962", "0.4552935", "0.45512265", "0.4551101", "0.45503062", "0.45483342", "0.45449498", "0.4540639" ]
0.82576424
0
! Create dictionary based on mapping PLWN on SUMO ontology file. Dictionary format and mapping PLWN on SUMO ontology file format are presented below.
def get_plwn2sumo_dict(self): if not os.path.exists(self.resources().mapping_sumo_file()): raise IOError( "%s file not found!" % \ self.resources().mapping_sumo_file() ) plwn2sumo_dict = defaultdict(set) with open(self.resources().mapping_sumo_file()) as sumofile: next(sumofile) for line in sumofile: synset_id = int(line.strip().split(';')[0]) sumo = line.strip().split(';')[-2] plwn2sumo_dict[sumo].add(synset_id) return plwn2sumo_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mapping_stratum(download_files =True):\r\n # get code description _index \r\n ix_= AGSO_PROPERTIES['props_codes'].index('name')\r\n def mfunc_(d): \r\n \"\"\" Set individual layer in dict of properties \"\"\"\r\n _p= {c: k.lower() if c not in ('code', 'label', 'name') else k \r\n for c, k in zip(AGSO_PROPERTIES['props_codes'], d) }\r\n id_= d[ix_].replace('/', '_').replace(\r\n ' ', '_').replace('\"', '').replace(\"'\", '').lower()\r\n return id_, _p \r\n rock_and_structural_props =list()\r\n for agso_data in tuple(set_agso_properties(download_files)): \r\n # remove the header of the property file\r\n rock_and_structural_props.append(\r\n dict(map( lambda x: mfunc_(x), agso_data[1:])))\r\n \r\n return tuple(rock_and_structural_props)", "def map_rule4(self):\n odml.terminology.terminologies['map'] = parse(\"\"\"\n S1[T1]\n - P2\n S2[T2]\n - P1\n S3[T3]\n - P1\n - P2\n - P3\n \"\"\")", "def _makeimap(self):\n self.map_[\"source\"] = \"nasa\"\n self.map_[\"instrument\"] = \"goes\"\n self.map_[\"physobs\"] = \"irradiance\"\n self.map_[\"provider\"] = \"sdac\"", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('ANAP').get('abstractTypes')\n exolinks = globalMap.get('ANAP').get('exolinks')\n\n # DataType GraphicsHandlerType\n currentMap = {}\n abstractTypes['GraphicsHandlerType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'] = currentMap\n loadMaps['ANAP.GraphicsHandlerType'] = currentMap\n currentMap['tag'] = 'ANAP.GraphicsHandlerType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class AnalysisProfile\n currentMap = {}\n abstractTypes['AnalysisProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'analysisProfiles'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AnalysisProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AnalysisProfile.bgColor\n currentMap = {}\n contentMap['bgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'] = currentMap\n loadMaps['ANAP.AnalysisProfile.bgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.bgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00031'\n currentMap['name'] = 'bgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#FFFFFF'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.fgColor\n currentMap = {}\n contentMap['fgColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'] = currentMap\n loadMaps['ANAP.AnalysisProfile.fgColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.fgColor'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00032'\n currentMap['name'] = 'fgColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['default'] = '#000000'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute AnalysisProfile.font\n currentMap = {}\n contentMap['font'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'] = currentMap\n loadMaps['ANAP.AnalysisProfile.font'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.font'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00030'\n currentMap['name'] = 'font'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.graphicsHandler\n currentMap = {}\n contentMap['graphicsHandler'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'] = currentMap\n loadMaps['ANAP.AnalysisProfile.graphicsHandler'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.graphicsHandler'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00040'\n currentMap['name'] = 'graphicsHandler'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'Tk'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-10-03-11:26:03_00001')\n\n # Attribute AnalysisProfile.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AnalysisProfile.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AnalysisProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'] = currentMap\n loadMaps['ANAP.AnalysisProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00029'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AnalysisProfile.panView\n currentMap = {}\n contentMap['panView'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'] = currentMap\n loadMaps['ANAP.AnalysisProfile.panView'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.panView'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00036'\n currentMap['name'] = 'panView'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.sendBugReports\n currentMap = {}\n contentMap['sendBugReports'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'] = currentMap\n loadMaps['ANAP.AnalysisProfile.sendBugReports'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.sendBugReports'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00004'\n currentMap['name'] = 'sendBugReports'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'maybe'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2010-11-17-16:21:33_00001')\n\n # Attribute AnalysisProfile.transientDialogs\n currentMap = {}\n contentMap['transientDialogs'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientDialogs'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientDialogs'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00037'\n currentMap['name'] = 'transientDialogs'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.transientWindows\n currentMap = {}\n contentMap['transientWindows'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'] = currentMap\n loadMaps['ANAP.AnalysisProfile.transientWindows'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.transientWindows'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00038'\n currentMap['name'] = 'transientWindows'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.twoCharShortcuts\n currentMap = {}\n contentMap['twoCharShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'] = currentMap\n loadMaps['ANAP.AnalysisProfile.twoCharShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.twoCharShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00039'\n currentMap['name'] = 'twoCharShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useCrosshair\n currentMap = {}\n contentMap['useCrosshair'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useCrosshair'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useCrosshair'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00034'\n currentMap['name'] = 'useCrosshair'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.useGlobalShortcuts\n currentMap = {}\n contentMap['useGlobalShortcuts'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'] = currentMap\n loadMaps['ANAP.AnalysisProfile.useGlobalShortcuts'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.useGlobalShortcuts'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00035'\n currentMap['name'] = 'useGlobalShortcuts'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute AnalysisProfile.userEmail\n currentMap = {}\n contentMap['userEmail'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userEmail'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userEmail'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00003'\n currentMap['name'] = 'userEmail'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute AnalysisProfile.userName\n currentMap = {}\n contentMap['userName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userName'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00001'\n currentMap['name'] = 'userName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.userOrganisation\n currentMap = {}\n contentMap['userOrganisation'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'] = currentMap\n loadMaps['ANAP.AnalysisProfile.userOrganisation'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.userOrganisation'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-11-17-16:21:37_00002'\n currentMap['name'] = 'userOrganisation'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute AnalysisProfile.webBrowser\n currentMap = {}\n contentMap['webBrowser'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'] = currentMap\n loadMaps['ANAP.AnalysisProfile.webBrowser'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.webBrowser'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00033'\n currentMap['name'] = 'webBrowser'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role AnalysisProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AnalysisProfile.colorSchemes\n currentMap = {}\n contentMap['colorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'] = currentMap\n loadMaps['ANAP.AnalysisProfile.colorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.colorSchemes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00026'\n currentMap['name'] = 'colorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.macros\n currentMap = {}\n contentMap['macros'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'] = currentMap\n loadMaps['ANAP.AnalysisProfile.macros'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.macros'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00022'\n currentMap['name'] = 'macros'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.marksColor\n currentMap = {}\n contentMap['marksColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'] = currentMap\n loadMaps['ANAP.AnalysisProfile.marksColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.marksColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00028'\n currentMap['name'] = 'marksColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n\n # Role AnalysisProfile.refExpProfiles\n currentMap = {}\n contentMap['refExpProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'] = currentMap\n loadMaps['ANAP.AnalysisProfile.refExpProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.refExpProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00024'\n currentMap['name'] = 'refExpProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.residueProfiles\n currentMap = {}\n contentMap['residueProfiles'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'] = currentMap\n loadMaps['ANAP.AnalysisProfile.residueProfiles'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.residueProfiles'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00020'\n currentMap['name'] = 'residueProfiles'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ANAP').get('abstractTypes')\n\n # Role AnalysisProfile.rulersColor\n currentMap = {}\n contentMap['rulersColor'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'] = currentMap\n loadMaps['ANAP.AnalysisProfile.rulersColor'] = currentMap\n currentMap['tag'] = 'ANAP.AnalysisProfile.rulersColor'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00027'\n currentMap['name'] = 'rulersColor'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['implSkip'] = True\n currentMap['copyOverride'] = True\n # End of AnalysisProfile\n\n currentMap = abstractTypes.get('AnalysisProfile')\n aList = ['createdBy', 'graphicsHandler', 'guid', 'isModifiable', 'lastUnlockedBy', 'name', 'panView', 'sendBugReports', 'transientDialogs', 'transientWindows', 'twoCharShortcuts', 'useCrosshair', 'useGlobalShortcuts', 'userEmail', 'webBrowser']\n currentMap['headerAttrs'] = aList\n aList = ['bgColor', 'fgColor', 'font', 'userName', 'userOrganisation', 'marksColor', 'rulersColor']\n currentMap['simpleAttrs'] = aList\n aList = ['residueProfiles', 'refExpProfiles', 'macros', 'colorSchemes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['colorSchemes', 'macros', 'refExpProfiles', 'residueProfiles']\n currentMap['children'] = aList\n\n # Class ColorScheme\n currentMap = {}\n abstractTypes['ColorScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'] = currentMap\n loadMaps['ANAP.ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'colorSchemes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ColorScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ColorScheme.colors\n currentMap = {}\n contentMap['colors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'] = currentMap\n loadMaps['ANAP.ColorScheme.colors'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.colors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00043'\n currentMap['name'] = 'colors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute ColorScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'] = currentMap\n loadMaps['ANAP.ColorScheme.name'] = currentMap\n currentMap['tag'] = 'ANAP.ColorScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00007'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ColorScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ColorScheme\n\n currentMap = abstractTypes.get('ColorScheme')\n aList = ['colors', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Macro\n currentMap = {}\n abstractTypes['Macro'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'] = currentMap\n loadMaps['ANAP.Macro'] = currentMap\n currentMap['tag'] = 'ANAP.Macro'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'macros'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Macro.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Macro.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'] = currentMap\n loadMaps['ANAP.Macro.details'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute Macro.function\n currentMap = {}\n contentMap['function'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'] = currentMap\n loadMaps['ANAP.Macro.function'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.function'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00002'\n currentMap['name'] = 'function'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.isInMenu\n currentMap = {}\n contentMap['isInMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'] = currentMap\n loadMaps['ANAP.Macro.isInMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00005'\n currentMap['name'] = 'isInMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.isInMouseMenu\n currentMap = {}\n contentMap['isInMouseMenu'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'] = currentMap\n loadMaps['ANAP.Macro.isInMouseMenu'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.isInMouseMenu'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-29-13:48:16_00006'\n currentMap['name'] = 'isInMouseMenu'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Macro.module\n currentMap = {}\n contentMap['module'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'] = currentMap\n loadMaps['ANAP.Macro.module'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.module'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00003'\n currentMap['name'] = 'module'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Macro.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'] = currentMap\n loadMaps['ANAP.Macro.name'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:10_00001'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute Macro.ordering\n currentMap = {}\n contentMap['ordering'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'] = currentMap\n loadMaps['ANAP.Macro.ordering'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.ordering'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00004'\n currentMap['name'] = 'ordering'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.path\n currentMap = {}\n contentMap['path'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'] = currentMap\n loadMaps['ANAP.Macro.path'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.path'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00001'\n currentMap['name'] = 'path'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00003')\n\n # Attribute Macro.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'] = currentMap\n loadMaps['ANAP.Macro.serial'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:09_00001'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Macro.shortcut\n currentMap = {}\n contentMap['shortcut'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'] = currentMap\n loadMaps['ANAP.Macro.shortcut'] = currentMap\n currentMap['tag'] = 'ANAP.Macro.shortcut'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-15:11:12_00006'\n currentMap['name'] = 'shortcut'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Macro.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Macro\n\n currentMap = abstractTypes.get('Macro')\n aList = ['function', 'isInMenu', 'isInMouseMenu', 'module', 'ordering', 'serial', 'shortcut']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'name', 'path']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class RefExpProfile\n currentMap = {}\n abstractTypes['RefExpProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'] = currentMap\n loadMaps['ANAP.RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'refExpProfiles'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute RefExpProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute RefExpProfile.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'] = currentMap\n loadMaps['ANAP.RefExpProfile.name'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00046'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute RefExpProfile.peakSymbolColors\n currentMap = {}\n contentMap['peakSymbolColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakSymbolColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakSymbolColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00048'\n currentMap['name'] = 'peakSymbolColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.peakTextColors\n currentMap = {}\n contentMap['peakTextColors'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'] = currentMap\n loadMaps['ANAP.RefExpProfile.peakTextColors'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.peakTextColors'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00049'\n currentMap['name'] = 'peakTextColors'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00007')\n\n # Attribute RefExpProfile.refExpNames\n currentMap = {}\n contentMap['refExpNames'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'] = currentMap\n loadMaps['ANAP.RefExpProfile.refExpNames'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.refExpNames'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00047'\n currentMap['name'] = 'refExpNames'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role RefExpProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role RefExpProfile.negColorSchemes\n currentMap = {}\n contentMap['negColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'] = currentMap\n loadMaps['ANAP.RefExpProfile.negColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.negColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00045'\n currentMap['name'] = 'negColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role RefExpProfile.posColorSchemes\n currentMap = {}\n contentMap['posColorSchemes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'] = currentMap\n loadMaps['ANAP.RefExpProfile.posColorSchemes'] = currentMap\n currentMap['tag'] = 'ANAP.RefExpProfile.posColorSchemes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00041'\n currentMap['name'] = 'posColorSchemes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of RefExpProfile\n\n currentMap = abstractTypes.get('RefExpProfile')\n aList = ['name']\n currentMap['headerAttrs'] = aList\n aList = ['peakSymbolColors', 'peakTextColors', 'refExpNames', 'negColorSchemes', 'posColorSchemes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ResidueProfile\n currentMap = {}\n abstractTypes['ResidueProfile'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'] = currentMap\n loadMaps['ANAP.ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'residueProfiles'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ResidueProfile.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ResidueProfile.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'] = currentMap\n loadMaps['ANAP.ResidueProfile.ccpCode'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00051'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.guiName\n currentMap = {}\n contentMap['guiName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'] = currentMap\n loadMaps['ANAP.ResidueProfile.guiName'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.guiName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00052'\n currentMap['name'] = 'guiName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ResidueProfile.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'] = currentMap\n loadMaps['ANAP.ResidueProfile.molType'] = currentMap\n currentMap['tag'] = 'ANAP.ResidueProfile.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:52_00050'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ResidueProfile.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of ResidueProfile\n\n currentMap = abstractTypes.get('ResidueProfile')\n aList = ['ccpCode', 'guiName', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AnalysisProfile\n currentMap = {}\n exolinks['AnalysisProfile'] = currentMap\n loadMaps['ANAP.exo-AnalysisProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-AnalysisProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00004'\n currentMap['name'] = 'AnalysisProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.AnalysisProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to ColorScheme\n currentMap = {}\n exolinks['ColorScheme'] = currentMap\n loadMaps['ANAP.exo-ColorScheme'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ColorScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00002'\n currentMap['name'] = 'ColorScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ColorScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Macro\n currentMap = {}\n exolinks['Macro'] = currentMap\n loadMaps['ANAP.exo-Macro'] = currentMap\n currentMap['tag'] = 'ANAP.exo-Macro'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-08-11:06:22_00001'\n currentMap['name'] = 'Macro'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.Macro\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to RefExpProfile\n currentMap = {}\n exolinks['RefExpProfile'] = currentMap\n loadMaps['ANAP.exo-RefExpProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-RefExpProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00006'\n currentMap['name'] = 'RefExpProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.RefExpProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ResidueProfile\n currentMap = {}\n exolinks['ResidueProfile'] = currentMap\n loadMaps['ANAP.exo-ResidueProfile'] = currentMap\n currentMap['tag'] = 'ANAP.exo-ResidueProfile'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2008-05-05-15:12:50_00005'\n currentMap['name'] = 'ResidueProfile'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccpnmr.api.AnalysisProfile.ResidueProfile\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))", "def generate_antonym_pairs(config: SettingConfig) -> dict:\n print(f\"Generating initial antonym pairs from RoWordNet @ {datetime.now()}\")\n wn = rwn.RoWordNet()\n\n # Create the output dictionary that will be of type dict(str : set(pair(str, str)) where the key is\n # the PoS and the value is a set of pairs of words of PoS specified by the key\n pairs = dict()\n\n # Iterate over the selected parts of speech\n for part_of_speech in config.pos.values():\n\n pos_pairs = list()\n\n # Return all synsets corresponding to the PoS\n synset_ids = wn.synsets(pos=part_of_speech)\n\n # Iterate all the synsets for the current PoS\n for synset_id in synset_ids:\n\n # Get the synset object specified by synset_id\n synset = wn.synset(synset_id)\n\n # Get the outbound relations of type antonym from\n outbound_relations = filter(lambda x: x[1] == 'near_antonym', wn.outbound_relations(synset_id))\n\n # Iterate outbound relations\n for relation in outbound_relations:\n # Get the synset corresponding to the target of the outbound relation\n target_synset = wn.synset(relation[0])\n\n # Get all the pairs, sort them by first word to keep set entries unique\n current_iteration_pairs = get_cross_synset_pairs(synset, target_synset)\n\n # Add the current set of pairs\n pos_pairs.extend(current_iteration_pairs)\n\n # Get corresponding key in pos dictionary and add the pair to the resulting dictionary\n for key, value in config.pos.items():\n if value == part_of_speech:\n pairs[key] = unique(pos_pairs)\n\n # Return the whole dictionary\n print(f\"Successfully generated antonym paris @ {datetime.now()}\")\n return pairs", "def _makeimap(self):\n self.map_['source'] = 'GOES'\n self.map_['provider'] = 'NOAA'\n self.map_['instrument'] = 'SUVI'\n self.map_['physobs'] = 'flux'", "def _makeimap(self):\n self.map_['source'] = 'NAOJ'\n self.map_['provider'] = 'NRO'\n self.map_['instrument'] = 'NORH'\n self.map_['phyobs'] = ''", "def osp2():\n return dict(\n kloc= range(75,125),\n docu = [3,4], ltex = [2,5],\n sced = [2,3,4], Pmat = [4,5],\n Prec = [3,4, 5],\n Resl = [4], Team = [3],\n acap = [4], aexp = [4],\n cplx = [4], data = [4],\n Flex = [3], pcap = [3],\n pcon = [3], pexp = [4],\n pvol = [3], rely = [5],\n ruse = [4], site = [6],\n stor = [3], time = [3],\n tool = [5])", "def _output_dict(self):\n lang = self.ddnGuiLanguage.get()\n\n fileout = os.path.normpath('{}/{}-{}.xml'.\\\n format(self.MapCreator, self.Source, self.ddnCurProject.get()))\n linesout = ['<?xml version=\"1.0\" encoding=\"UTF-8\"?>', \\\n '<DictionarySet xmlns:mc=\"urn:fmosoft-map-creator\" xmlns=\"urn:fmosoft-map-creator\" Version=\"1\">', \\\n ' <Dictionary SourceLanguage=\"{}\" SourceLanguageIsPredefined=\"true\" TargetLanguage=\"{}\" TargetLanguageIsPredefined=\"false\">'.\\\n format(self.Source, self.ddnCurProject.get()), \\\n ]\n for child in self.tree.get_children('approved'):\n vv = self.tree.item(child)['values']\n linesout.append(' <Translation Source=\"{}\" Target=\"{}\"/>'.format(vv[0], vv[1]))\n linesout.append(' </Dictionary>')\n linesout.append('</DictionarySet>')\n linesout.append('')\n\n if os.path.exists(fileout):\n os.remove(fileout)\n\n if fileout:\n output = codecs.open(fileout, mode='w', encoding='utf-8')\n output.write('\\n'.join(linesout))\n output.close()\n pass", "def parse_pl(pl_file_name):\n with open(pl_file_name, 'r') as f:\n # read lines without blank lines\n lines = [l for l in (line.strip() for line in f) if l]\n\n # Skip the first line: UCLA nodes ...\n lines_iter = iter(lines[1:])\n \n pl_dict = dict()\n for l in lines_iter:\n if l.startswith('#'): continue\n\n tokens = l.split()\n assert len(tokens) >= 5\n\n name, x, y, orient = \\\n tokens[0], float(tokens[1]), float(tokens[2]), tokens[4]\n\n # for ICCAD\n orient = 'N'\n\n pl_dict[name] = (x, y, orient)\n\n return pl_dict", "def generate_synonym_pairs(config: SettingConfig) -> dict:\n wn = rwn.RoWordNet()\n\n # Create the output dictionary that will be of type dict(str : set(pair(str, str)) where the key is\n # the PoS and the value is a set of pairs of words of PoS specified by the key\n pairs = dict()\n\n # Iterate over the selected parts of speech\n for part_of_speech in config.pos.values():\n\n pos_pairs = list()\n\n # Return all synsets corresponding to the PoS\n synset_ids = wn.synsets(pos=part_of_speech)\n\n # Iterate all the synsets for the current PoS\n for synset_id in synset_ids:\n # Get the synset object specified by synset_id\n synset = wn.synset(synset_id)\n\n # Get all the pairs, sort them by first word to keep set entries unique\n current_iteration_pairs = get_synset_pairs(synset)\n\n # Append all pairs from the current PoS to the global set\n pos_pairs.extend(current_iteration_pairs)\n\n # Get corresponding key in pos dictionary and add the pair to the resulting dictionary\n for key, value in config.pos.items():\n if value == part_of_speech:\n pairs[key] = unique(pos_pairs)\n\n return pairs", "def _create_dnp3_object_map(self):\n\n feeders = self.file_dict.get(\"feeders\", [])\n measurements = list()\n capacitors = list()\n regulators = list()\n switches = list()\n solarpanels = list()\n batteries = list()\n fuses = list()\n breakers = list()\n reclosers = list()\n energyconsumers = list()\n for x in feeders:\n measurements = x.get(\"measurements\", [])\n capacitors = x.get(\"capacitors\", [])\n regulators = x.get(\"regulators\", [])\n switches = x.get(\"switches\", [])\n solarpanels = x.get(\"solarpanels\", [])\n batteries = x.get(\"batteries\", [])\n fuses = x.get(\"fuses\", [])\n breakers = x.get(\"breakers\", [])\n reclosers = x.get(\"reclosers\", [])\n energyconsumers = x.get(\"energyconsumers\", [])\n\n # Unique grouping of measurements - GroupBy Name, Type and Connectivity node\n groupByNameTypeConNode = defaultdict(list) \n for m in measurements:\n groupByNameTypeConNode[m['name']+m.get(\"measurementType\")+m.get(\"ConnectivityNode\")].append(m)\n\n # Create Net Phase DNP3 Points\n for grpM in groupByNameTypeConNode.values():\n\n if grpM[0]['MeasurementClass'] == \"Analog\" and grpM[0].get(\"measurementType\") == \"VA\":\n measurement_type = grpM[0].get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n \n\n name1 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VAR-value'\n name2 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-Watts-value'\n name3 = grpM[0]['name'] + '-' + \"Phases:ABC\" + '-net-VA-value'\n\n description1 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VAR\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description2 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-Watts\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n description3 = \"Name:\" + grpM[0]['name'] + \",MeasurementType:\" + \"net-VA\" + \",ConnectivityNode:\" + grpM[0].get(\"ConnectivityNode\") +\",SimObject:\" + grpM[0].get(\"SimObject\")\n\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n # Create Each Phase DNP3 Points\n for m in measurements:\n attribute = attribute_map['regulators']['attribute']\n measurement_type = m.get(\"measurementType\")\n measurement_id = m.get(\"mRID\")\n name= m['name'] + '-' + m['phases']\n description = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + measurement_type + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name, description, measurement_type, measurement_id)\n self.c_ai += 1\n\n if m.get(\"measurementType\") == \"VA\":\n measurement_id = m.get(\"mRID\")\n name1 = m['name'] + '-' + m['phases'] + '-VAR-value'\n name2 = m['name'] + '-' + m['phases'] + '-Watts-value'\n name3 = m['name'] + '-' + m['phases'] + '-angle'\n\n description1 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"VAR\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description2 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"Watt\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") +\",SimObject:\" + m.get(\"SimObject\")\n description3 = \"Name:\" + m['name'] + \",Phase:\" + m['phases'] + \",MeasurementType:\" + \"angle\" + \",ConnectivityNode:\" + m.get(\"ConnectivityNode\") + \",SimObject:\" + m.get(\"SimObject\")\n if m['MeasurementClass'] == \"Analog\":\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name1, description1, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name2, description2, measurement_type, measurement_id)\n self.c_ai += 1\n self.assign_val_a(\"AI\", 30, 1, self.c_ai, name3, description3, measurement_type, measurement_id)\n self.c_ai += 1\n\n\n elif m['MeasurementClass'] == \"Discrete\" and measurement_type == \"Pos\":\n if \"RatioTapChanger\" in m['name'] or \"reg\" in m[\"SimObject\"]:\n # TODO: Do we need step?\n for r in range(5, 7): # [r==4]: Step, [r==5]: LineDropR, [r==6]:LineDropX \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, attribute[r])\n self.c_ao += 1\n else:\n self.assign_val_a(\"DI\", 1, 2, self.c_di, name, description, measurement_type, measurement_id)\n self.c_di += 1\n\n for m in capacitors:\n measurement_id = m.get(\"mRID\")\n cap_attribute = attribute_map['capacitors']['attribute'] # type: List[str]\n\n for l in range(0, 4):\n # publishing attribute value for capacitors as Bianry/Analog Input points based on phase attribute\n name = m['name']\n description = \"Name:\" + m['name'] + \"ConductingEquipment_type:LinearShuntCompensator\" + \",Attribute:\" + cap_attribute[l] + \",Phase:\" + m['phases']\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, cap_attribute[l])\n self.c_ao += 1\n for p in range(0, len(m['phases'])):\n name = m['name'] + m['phases'][p]\n description = \"Name:\" + m['name'] + \",ConductingEquipment_type:LinearShuntCompensator\" + \",controlAttribute:\" + cap_attribute[p] + \",Phase:\" + m['phases'][p]\n # description = \"Capacitor, \" + m['name'] + \",\" + \"phase -\" + m['phases'][p] + \", and attribute is - \" + cap_attribute[4]\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, cap_attribute[4])\n self.c_do += 1\n\n for m in regulators:\n reg_attribute = attribute_map['regulators']['attribute']\n # bank_phase = list(m['bankPhases'])\n for n in range(0, 4):\n measurement_id = m.get(\"mRID\")\n name = m['bankName'] + '-' + m['bankPhases']\n description = \"Name:\" + m['bankName'] + \",ConductingEquipment_type:RatioTapChanger_Reg\" +\",Phase:\" + m['bankPhases'] + \",Attribute:\" + reg_attribute[n]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id[0], reg_attribute[n])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id[0], reg_attribute[n])\n self.c_ai += 1\n for i in range(5, 7):\n for j in range(0, len(m['bankPhases'])):\n measurement_id = m.get(\"mRID\")[j]\n name = m['tankName'][j] + '-' + m['bankPhases'][j]\n description = \"Name:\" + m['tankName'][j] + \",ConductingEquipment_type:RatioTapChanger_Reg\"+ \",Phase:\" + m['bankPhases'][j] + \",controlAttribute:\" + reg_attribute[i]\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id,reg_attribute[i])\n self.c_ao += 1\n self.assign_val_d(\"AI\", 30, 1, self.c_ai, name, description, measurement_id,reg_attribute[i])\n self.c_ai += 1\n \n for m in solarpanels:\n for k in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-Watts-value'\n description = \"Solarpanel:\" + m['name'] + \",Phase:\" + m['phases'] + \",measurementID:\" + measurement_id\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n \n name1 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name2 = \"Solar\" + m['name'] + '-' + m['phases'][k] + '-VAR-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name2, description, measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n name3 = \"Solar\"+ m['name'] + '-' + m['phases'][k] + '-Watts-Net-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name3, description, measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n\t\t\t\n for m in batteries:\n for l in range(0, len(m['phases'])):\n measurement_id = m.get(\"mRID\")\n name = m['name'] + '-' + m['phases'][l] + '-Watts-value'\n description = \"Battery, \" + m['name'][l] + \",Phase: \" + m['phases'] + \",ConductingEquipment_type:PowerElectronicConnections\"\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description,measurement_id, \"PowerElectronicsConnection.p\")\n self.c_ao += 1\n name1 = m['name'] + '-' + m['phases'][l] + '-VAR-value'\n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name1, description,measurement_id, \"PowerElectronicsConnection.q\")\n self.c_ao += 1\n \n for m in switches:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][k]\n description = \"Name:\" + m[\"name\"] + \",ConductingEquipment_type:LoadBreakSwitch\" + \"Phase:\" + phase_value[k] +\",controlAttribute:\"+switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in fuses:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for l in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][l]\n description = \"Name:\" + m[\"name\"] + \",Phase:\" + phase_value[l] + \",Attribute:\" + switch_attribute + \",mRID\" + measurement_id\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in breakers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for n in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][n]\n description = \"Name: \" + m[\"name\"] + \",Phase:\" + phase_value[n] + \",ConductingEquipment_type:Breaker\" + \",controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n \n for m in reclosers:\n measurement_id = m.get(\"mRID\")\n switch_attribute = attribute_map['switches']['attribute']\n for i in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name'] + \"Phase:\" + m['phases'][i]\n description = \"Recloser, \" + m[\"name\"] + \"Phase: - \" + phase_value[i] + \",ConductingEquipment_type:Recloser\"+\"controlAttribute:\" + switch_attribute\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name, description, measurement_id, switch_attribute)\n self.c_do += 1\n\n for m in energyconsumers:\n measurement_id = m.get(\"mRID\")\n for k in range(0, len(m['phases'])):\n phase_value = list(m['phases'])\n name = m['name']+\"phase:\" + m['phases'][k]\n description = \"EnergyConsumer, \" + m[\"name\"] + \"Phase: \" + phase_value[k] \n self.assign_val_d(\"AO\", 42, 3, self.c_ao, name, description, measurement_id, \"EnergyConsumer.p\")\n self.c_ao += 1\n \n name1 = m['name']+\"phase:\" + m['phases'][k] + \"control\"\n self.assign_val_d(\"DO\", 12, 1, self.c_do, name1, description, measurement_id, \"EnergyConsumer.p\")\n self.c_do += 1\n\n return self.out_json", "def make_sol_dict():\n file_names = [\"FORMAT3_Copy of KommuneMTPLforTriangle.xls\",\n \"C Triangulations analysis R2017 GC20161109.xls\",\n \"EVOLUTION 2017 _ M+F - Triangles cat nat brut net.xls\",\n \"Bsp8 _ Dreiecke aus GCNA für CU1.4.1.xls\",\n \"Analysis MTPL MOD.xls\",\n \"Bsp6 _ Dreiecke aus GCNA für CU1.4.1.xls\",\n \"FORMAT6_sinistres.xls\",\n \"FORMAT1_LOSSES-MTPL-OVER-500-GROUP-2005_modified.xls\"]\n solutions_dict = dict()\n raw_dict = dict()\n for file_name in file_names:\n sr_list, file_name = ExcelLoader.load_excel(pdir.RESOURCES_DIR + \"/raw_test_files/\" + file_name)\n dh = DataHolder()\n for sr in sr_list:\n dh.add_sheet(sr.sheet_name, pd.DataFrame(columns=sr.headers, data=sr.row_vals),\n pd.DataFrame(columns=sr.headers, data=sr.xls_types), orig_sheet_name=sr.sheet_name)\n\n dh = SheetPreProcessor.separate_components(dh)\n raw_dict[file_name] = dh.encode()\n dh = HorizontalMerger.horizontal_merge(dh)\n #temp_path = pdir.RESOURCES_DIR + \"/temp/\"\n #dh.write_excel(temp_path + file_name)\n solutions_dict[file_name] = dh\n solutions_dict = MergePararametersOptimizer.make_ind_col_dict(solutions_dict)\n with open(pdir.RESOURCES_DIR + \"/test/merge_solutions.obj\", \"wb\") as temp_file:\n pickle.dump(solutions_dict, temp_file)\n with open(pdir.RESOURCES_DIR + \"/test/raw_test.obj\", \"wb\") as temp_file:\n pickle.dump(raw_dict, temp_file)", "def create_dicts(self):\n \n # remove this string from filename to make output file names more manageable\n pre_output1 = self.file1.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n pre_output2 = self.file2.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n \n # Build the output file name.\n # if prefix is present add it\n if self.out_file_prefix is not None:\n # concatenate prefix, filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = self.out_file_prefix+pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n # if no prefix don't add it!\n else:\n # concatenate filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n\n # add temp to end of file name to create a temporary output filename\n self.tempoutputfilename = self.outputfilename.replace(\".txt\", '') + \"temp.txt\"\n\n # open temp output file\n self.tempoutputfile = open(self.outputfolder + self.tempoutputfilename, 'w')\n\n \n # open FE files\n file1_open = open(self.chosenfolder + self.file1, 'r')\n file2_open = open(self.chosenfolder + self.file2, 'r')\n\n # open file1 and create a dict of the features.\n for linenumber, line in enumerate(file1_open):\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file1_dict[int(splitline[1])] = line\n # get n of rows in file1 (take the linenumber of the last line)\n self.file1_len = linenumber\n\n # repeat for features in second file but first writing the feparam and stats to temp file - when pairing with control this ensures the \"header\" comes from the test (file2) not control (file1), NB NEITHER ARE ACCURATE!!!!\n for linenumber, line in enumerate(file2_open):\n if linenumber < 10:\n self.tempoutputfile.write(line)\n # then add all features to a dictionary, with the unique feature number as a key\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file2_dict[int(splitline[1])] = line\n # get n of rows in file2\n self.file2_len = linenumber\n\n # close files\n file1_open.close()\n file2_open.close()", "def read_pronunciation(pronunciation_file):\n # file = open('dictionary.txt', 'r')\n #\n # for line in file:\n # print line\n\n ################# https://m.reddit.com/r/CompSciPortfolio/comments/303fyo/assignment_3_poetry_reader/\n\n pronunciation_dictionary = {}\n line = pronunciation_file.readline()\n while line.startswith(';;;'):\n line = pronunciation_file.readline()\n while line != '':\n stripped_line = line.strip()\n separation = stripped_line.find(' ')\n pronunciation_dictionary[stripped_line[:separation]] = stripped_line[(separation + 2):].split()\n line = pronunciation_file.readline()\n return pronunciation_dictionary\n\n\n\n # my_list = {}\n # for line in pronunciation_file.readlines():\n # line = line.strip()\n # if line and \";;;\" not in line:\n # r = line.split()\n # word = r[0]\n # phonemes = r[1:]\n # my_list[word] = phonemes\n # return my_list", "def process_pathway_ontology(self) -> None:\n # Load pathway ontology from file\n pw = PathwayOntology(name=\"PW\",\n filename=self.pathway_ontology_file)\n pw.load_from_file()\n\n pw_dict = dict()\n\n for cl in pw.owl_classes:\n synonyms, annotations = pw.get_synonyms(cl)\n pw_dict[cl] = {\n 'name': pw.get_label(cl),\n 'aliases': pw.get_all_labels(cl) + synonyms,\n 'synonyms': annotations,\n 'definition': pw.get_definition(cl),\n 'subClassOf': pw.get_subClassOf(cl),\n 'part_of': pw.get_part_of(cl)\n }\n\n with open(self.pw_json_file, 'w') as outf:\n json.dump(pw_dict, outf, indent=4, sort_keys=True)", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CCLB').get('abstractTypes')\n exolinks = globalMap.get('CCLB').get('exolinks')\n\n # Class AtomLabel\n currentMap = {}\n abstractTypes['AtomLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'] = currentMap\n loadMaps['CCLB.AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'atomLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AtomLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AtomLabel.isotopeCode\n currentMap = {}\n contentMap['isotopeCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'] = currentMap\n loadMaps['CCLB.AtomLabel.isotopeCode'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.isotopeCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'\n currentMap['name'] = 'isotopeCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'] = currentMap\n loadMaps['CCLB.AtomLabel.name'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.subType\n currentMap = {}\n contentMap['subType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'] = currentMap\n loadMaps['CCLB.AtomLabel.subType'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.subType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'\n currentMap['name'] = 'subType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute AtomLabel.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'] = currentMap\n loadMaps['CCLB.AtomLabel.weight'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role AtomLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of AtomLabel\n\n currentMap = abstractTypes.get('AtomLabel')\n aList = ['isotopeCode', 'name', 'subType', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ChemCompLabel\n currentMap = {}\n abstractTypes['ChemCompLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'] = currentMap\n loadMaps['CCLB.ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemCompLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemCompLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemCompLabel.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'] = currentMap\n loadMaps['CCLB.ChemCompLabel.ccpCode'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ChemCompLabel.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'] = currentMap\n loadMaps['CCLB.ChemCompLabel.molType'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ChemCompLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemCompLabel.isotopomers\n currentMap = {}\n contentMap['isotopomers'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'] = currentMap\n loadMaps['CCLB.ChemCompLabel.isotopomers'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.isotopomers'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'\n currentMap['name'] = 'isotopomers'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of ChemCompLabel\n\n currentMap = abstractTypes.get('ChemCompLabel')\n aList = ['ccpCode', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['isotopomers', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['isotopomers']\n currentMap['children'] = aList\n\n # Class Isotopomer\n currentMap = {}\n abstractTypes['Isotopomer'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'] = currentMap\n loadMaps['CCLB.Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'isotopomers'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Isotopomer.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Isotopomer.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'] = currentMap\n loadMaps['CCLB.Isotopomer.serial'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Isotopomer.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'] = currentMap\n loadMaps['CCLB.Isotopomer.weight'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role Isotopomer.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Isotopomer.atomLabels\n currentMap = {}\n contentMap['atomLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'] = currentMap\n loadMaps['CCLB.Isotopomer.atomLabels'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.atomLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'\n currentMap['name'] = 'atomLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of Isotopomer\n\n currentMap = abstractTypes.get('Isotopomer')\n aList = ['serial', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['atomLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['atomLabels']\n currentMap['children'] = aList\n\n # Class LabelingScheme\n currentMap = {}\n abstractTypes['LabelingScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'] = currentMap\n loadMaps['CCLB.LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'labelingSchemes'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute LabelingScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute LabelingScheme.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'] = currentMap\n loadMaps['CCLB.LabelingScheme.details'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute LabelingScheme.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute LabelingScheme.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.longName\n currentMap = {}\n contentMap['longName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'] = currentMap\n loadMaps['CCLB.LabelingScheme.longName'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.longName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'\n currentMap['name'] = 'longName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute LabelingScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'] = currentMap\n loadMaps['CCLB.LabelingScheme.name'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role LabelingScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role LabelingScheme.chemCompLabels\n currentMap = {}\n contentMap['chemCompLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'] = currentMap\n loadMaps['CCLB.LabelingScheme.chemCompLabels'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.chemCompLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'\n currentMap['name'] = 'chemCompLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of LabelingScheme\n\n currentMap = abstractTypes.get('LabelingScheme')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy', 'name']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'longName']\n currentMap['simpleAttrs'] = aList\n aList = ['chemCompLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['chemCompLabels']\n currentMap['children'] = aList\n\n # Out-of-package link to AtomLabel\n currentMap = {}\n exolinks['AtomLabel'] = currentMap\n loadMaps['CCLB.exo-AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-AtomLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['name'] = 'AtomLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ChemCompLabel\n currentMap = {}\n exolinks['ChemCompLabel'] = currentMap\n loadMaps['CCLB.exo-ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-ChemCompLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['name'] = 'ChemCompLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n\n # Out-of-package link to Isotopomer\n currentMap = {}\n exolinks['Isotopomer'] = currentMap\n loadMaps['CCLB.exo-Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.exo-Isotopomer'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['name'] = 'Isotopomer'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to LabelingScheme\n currentMap = {}\n exolinks['LabelingScheme'] = currentMap\n loadMaps['CCLB.exo-LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.exo-LabelingScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['name'] = 'LabelingScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))", "def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')", "def store_wn_lookup():\n syns = list( wn.all_synsets() )\n #syn_str = map(lambda s: str(s).replace(\"Synset\",'').strip('()').strip(\"'\"), syns)\n syn_str = map(lambda s: str(s).replace(\"Synset\",'').strip('()').strip(\"'\").strip('\"'), syns)\n #offsets_list = [(\"n%08d\" % s.offset, s) for s in syns]\n olist = map(lambda a, b: (\"n%08d\" % a.offset, b), syns, syn_str)\n offset_dict = dict(olist)\n pickle.dump(offset_dict, open('/Users/xlx/Documents/proj/imgnet-flickr/db3/wn_offset_dict.pickle', 'wb'))", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CLAS').get('abstractTypes')\n exolinks = globalMap.get('CLAS').get('exolinks')\n\n # Class AbstractCategory\n currentMap = {}\n abstractTypes['AbstractCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:19:17_00001'] = currentMap\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:19:17_00001'\n currentMap['eType'] = 'cplx'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.AbstractCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AbstractCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AbstractCategory.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001'] = currentMap\n loadMaps['CLAS.AbstractCategory.details'] = currentMap\n currentMap['tag'] = 'CLAS.AbstractCategory.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute AbstractCategory.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014'] = currentMap\n loadMaps['CLAS.AbstractCategory.name'] = currentMap\n currentMap['tag'] = 'CLAS.AbstractCategory.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AbstractCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of AbstractCategory\n\n currentMap = abstractTypes.get('AbstractCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class Classification\n currentMap = {}\n abstractTypes['Classification'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'] = currentMap\n loadMaps['CLAS.Classification'] = currentMap\n currentMap['tag'] = 'CLAS.Classification'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'classifications'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'namingSystem'\n currentMap['class'] = ccp.api.lims.Classification.Classification\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Classification.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Classification.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute Classification.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute Classification.namingSystem\n currentMap = {}\n contentMap['namingSystem'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00013'] = currentMap\n loadMaps['CLAS.Classification.namingSystem'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.namingSystem'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00013'\n currentMap['name'] = 'namingSystem'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role Classification.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Classification.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00002'] = currentMap\n loadMaps['CLAS.Classification.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.experimentTypes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00002'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.hazardPhrases\n currentMap = {}\n contentMap['hazardPhrases'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00006'] = currentMap\n loadMaps['CLAS.Classification.hazardPhrases'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.hazardPhrases'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00006'\n currentMap['name'] = 'hazardPhrases'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.holderCategorys\n currentMap = {}\n contentMap['holderCategorys'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:33:23_00002'] = currentMap\n loadMaps['CLAS.Classification.holderCategorys'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.holderCategorys'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:33:23_00002'\n currentMap['name'] = 'holderCategorys'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.instrumentTypes\n currentMap = {}\n contentMap['instrumentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:33_00001'] = currentMap\n loadMaps['CLAS.Classification.instrumentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.instrumentTypes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:33_00001'\n currentMap['name'] = 'instrumentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.sampleCategories\n currentMap = {}\n contentMap['sampleCategories'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00008'] = currentMap\n loadMaps['CLAS.Classification.sampleCategories'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.sampleCategories'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00008'\n currentMap['name'] = 'sampleCategories'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.sampleComponentCategory\n currentMap = {}\n contentMap['sampleComponentCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00004'] = currentMap\n loadMaps['CLAS.Classification.sampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.sampleComponentCategory'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00004'\n currentMap['name'] = 'sampleComponentCategory'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.targetScoreboards\n currentMap = {}\n contentMap['targetScoreboards'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00012'] = currentMap\n loadMaps['CLAS.Classification.targetScoreboards'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.targetScoreboards'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00012'\n currentMap['name'] = 'targetScoreboards'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n\n # Role Classification.targetStatus\n currentMap = {}\n contentMap['targetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00010'] = currentMap\n loadMaps['CLAS.Classification.targetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.Classification.targetStatus'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00010'\n currentMap['name'] = 'targetStatus'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CLAS').get('abstractTypes')\n # End of Classification\n\n currentMap = abstractTypes.get('Classification')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['namingSystem']\n currentMap['simpleAttrs'] = aList\n aList = ['targetStatus', 'targetScoreboards', 'sampleComponentCategory', 'sampleCategories', 'instrumentTypes', 'holderCategorys', 'hazardPhrases', 'experimentTypes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['experimentTypes', 'hazardPhrases', 'holderCategorys', 'instrumentTypes', 'sampleCategories', 'sampleComponentCategory', 'targetScoreboards', 'targetStatus']\n currentMap['children'] = aList\n\n # Class SampleComponentCategory\n currentMap = {}\n abstractTypes['SampleComponentCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'] = currentMap\n loadMaps['CLAS.SampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.SampleComponentCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleComponentCategory'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.SampleComponentCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleComponentCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleComponentCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute SampleComponentCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role SampleComponentCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of SampleComponentCategory\n\n currentMap = abstractTypes.get('SampleComponentCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ExperimentType\n currentMap = {}\n abstractTypes['ExperimentType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'] = currentMap\n loadMaps['CLAS.ExperimentType'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'experimentTypes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.ExperimentType\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ExperimentType.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ExperimentType.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute ExperimentType.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role ExperimentType.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ExperimentType.instrumentTypes\n currentMap = {}\n contentMap['instrumentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00002'] = currentMap\n loadMaps['CLAS.ExperimentType.instrumentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType.instrumentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00002'\n currentMap['name'] = 'instrumentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role ExperimentType.sampleCategories\n currentMap = {}\n contentMap['sampleCategories'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00031'] = currentMap\n loadMaps['CLAS.ExperimentType.sampleCategories'] = currentMap\n currentMap['tag'] = 'CLAS.ExperimentType.sampleCategories'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00031'\n currentMap['name'] = 'sampleCategories'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of ExperimentType\n\n currentMap = abstractTypes.get('ExperimentType')\n aList = ['details', 'name', 'instrumentTypes', 'sampleCategories']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class TargetScoreboard\n currentMap = {}\n abstractTypes['TargetScoreboard'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'] = currentMap\n loadMaps['CLAS.TargetScoreboard'] = currentMap\n currentMap['tag'] = 'CLAS.TargetScoreboard'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'targetScoreboards'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.TargetScoreboard\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute TargetScoreboard.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute TargetScoreboard.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute TargetScoreboard.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role TargetScoreboard.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role TargetScoreboard.targetStatus\n currentMap = {}\n contentMap['targetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00039'] = currentMap\n loadMaps['CLAS.TargetScoreboard.targetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.TargetScoreboard.targetStatus'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00039'\n currentMap['name'] = 'targetStatus'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of TargetScoreboard\n\n currentMap = abstractTypes.get('TargetScoreboard')\n aList = ['details', 'name', 'targetStatus']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class HolderCategory\n currentMap = {}\n abstractTypes['HolderCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'] = currentMap\n loadMaps['CLAS.HolderCategory'] = currentMap\n currentMap['tag'] = 'CLAS.HolderCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'holderCategorys'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.HolderCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute HolderCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute HolderCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute HolderCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role HolderCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of HolderCategory\n\n currentMap = abstractTypes.get('HolderCategory')\n aList = ['details', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class HazardPhrase\n currentMap = {}\n abstractTypes['HazardPhrase'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'] = currentMap\n loadMaps['CLAS.HazardPhrase'] = currentMap\n currentMap['tag'] = 'CLAS.HazardPhrase'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'hazardPhrases'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.HazardPhrase\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute HazardPhrase.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute HazardPhrase.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute HazardPhrase.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Attribute HazardPhrase.phrase\n currentMap = {}\n contentMap['phrase'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00017'] = currentMap\n loadMaps['CLAS.HazardPhrase.phrase'] = currentMap\n currentMap['tag'] = 'CLAS.HazardPhrase.phrase'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:28_00017'\n currentMap['name'] = 'phrase'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Role HazardPhrase.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of HazardPhrase\n\n currentMap = abstractTypes.get('HazardPhrase')\n aList = ['details', 'name', 'phrase']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class InstrumentType\n currentMap = {}\n abstractTypes['InstrumentType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'] = currentMap\n loadMaps['CLAS.InstrumentType'] = currentMap\n currentMap['tag'] = 'CLAS.InstrumentType'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'instrumentTypes'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.InstrumentType\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute InstrumentType.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute InstrumentType.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute InstrumentType.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role InstrumentType.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role InstrumentType.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00001'] = currentMap\n loadMaps['CLAS.InstrumentType.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.InstrumentType.experimentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:57_00001'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of InstrumentType\n\n currentMap = abstractTypes.get('InstrumentType')\n aList = ['details', 'name', 'experimentTypes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class SampleCategory\n currentMap = {}\n abstractTypes['SampleCategory'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'] = currentMap\n loadMaps['CLAS.SampleCategory'] = currentMap\n currentMap['tag'] = 'CLAS.SampleCategory'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'sampleCategories'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.SampleCategory\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute SampleCategory.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute SampleCategory.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute SampleCategory.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role SampleCategory.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role SampleCategory.experimentTypes\n currentMap = {}\n contentMap['experimentTypes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00030'] = currentMap\n loadMaps['CLAS.SampleCategory.experimentTypes'] = currentMap\n currentMap['tag'] = 'CLAS.SampleCategory.experimentTypes'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:22:56_00030'\n currentMap['name'] = 'experimentTypes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of SampleCategory\n\n currentMap = abstractTypes.get('SampleCategory')\n aList = ['details', 'name', 'experimentTypes']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class TargetStatus\n currentMap = {}\n abstractTypes['TargetStatus'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'] = currentMap\n loadMaps['CLAS.TargetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.TargetStatus'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'targetStatus'\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.lims.Classification.TargetStatus\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute TargetStatus.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute TargetStatus.details\n contentMap['details'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:35_00001')\n\n # Attribute TargetStatus.name\n contentMap['name'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-04-17:21:34_00014')\n\n # Role TargetStatus.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role TargetStatus.targetScoreboards\n currentMap = {}\n contentMap['targetScoreboards'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00040'] = currentMap\n loadMaps['CLAS.TargetStatus.targetScoreboards'] = currentMap\n currentMap['tag'] = 'CLAS.TargetStatus.targetScoreboards'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:18_00040'\n currentMap['name'] = 'targetScoreboards'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of TargetStatus\n\n currentMap = abstractTypes.get('TargetStatus')\n aList = ['details', 'name', 'targetScoreboards']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to Classification\n currentMap = {}\n exolinks['Classification'] = currentMap\n loadMaps['CLAS.exo-Classification'] = currentMap\n currentMap['tag'] = 'CLAS.exo-Classification'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:54_00002'\n currentMap['name'] = 'Classification'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.Classification\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to SampleComponentCategory\n currentMap = {}\n exolinks['SampleComponentCategory'] = currentMap\n loadMaps['CLAS.exo-SampleComponentCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-SampleComponentCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00014'\n currentMap['name'] = 'SampleComponentCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.SampleComponentCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to ExperimentType\n currentMap = {}\n exolinks['ExperimentType'] = currentMap\n loadMaps['CLAS.exo-ExperimentType'] = currentMap\n currentMap['tag'] = 'CLAS.exo-ExperimentType'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:45_00014'\n currentMap['name'] = 'ExperimentType'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.ExperimentType\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to TargetScoreboard\n currentMap = {}\n exolinks['TargetScoreboard'] = currentMap\n loadMaps['CLAS.exo-TargetScoreboard'] = currentMap\n currentMap['tag'] = 'CLAS.exo-TargetScoreboard'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00040'\n currentMap['name'] = 'TargetScoreboard'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.TargetScoreboard\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to HolderCategory\n currentMap = {}\n exolinks['HolderCategory'] = currentMap\n loadMaps['CLAS.exo-HolderCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-HolderCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00049'\n currentMap['name'] = 'HolderCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.HolderCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to HazardPhrase\n currentMap = {}\n exolinks['HazardPhrase'] = currentMap\n loadMaps['CLAS.exo-HazardPhrase'] = currentMap\n currentMap['tag'] = 'CLAS.exo-HazardPhrase'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00052'\n currentMap['name'] = 'HazardPhrase'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.HazardPhrase\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to InstrumentType\n currentMap = {}\n exolinks['InstrumentType'] = currentMap\n loadMaps['CLAS.exo-InstrumentType'] = currentMap\n currentMap['tag'] = 'CLAS.exo-InstrumentType'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:46_00005'\n currentMap['name'] = 'InstrumentType'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.InstrumentType\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to SampleCategory\n currentMap = {}\n exolinks['SampleCategory'] = currentMap\n loadMaps['CLAS.exo-SampleCategory'] = currentMap\n currentMap['tag'] = 'CLAS.exo-SampleCategory'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:44_00009'\n currentMap['name'] = 'SampleCategory'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.SampleCategory\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to TargetStatus\n currentMap = {}\n exolinks['TargetStatus'] = currentMap\n loadMaps['CLAS.exo-TargetStatus'] = currentMap\n currentMap['tag'] = 'CLAS.exo-TargetStatus'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:51_00039'\n currentMap['name'] = 'TargetStatus'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.lims.Classification.TargetStatus\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))", "def construct_dict(self):\n i = 0\n self.word2idx = dict()\n fi = open(self.config.word_vec_fi_glove, 'r')\n\n for line in fi:\n self.word2idx[line.split(\" \")[0]] = i\n i += 1\n\n self.vocab_size = i\n self.write_dict()\n fi.close()", "def build_article_map(f='./wikispeedia_paths-and-graph/articles.tsv'):\n out_dict = {}\n count = 0\n with open(f, 'r') as r:\n for _ in xrange(12):\n next(r)\n for line in r:\n out_dict[line.strip('\\n')] = count\n count += 1\n return out_dict", "def get_dictionary(filename):\n asop_dict = {}\n # Defaults for standard observational data\n if 'CMORPH_V1.0.mjodiab_period_3hrmeans.precip.nc' in filename or \\\n 'TRMM_3B42V7A.mjodiab_period_3hrmeans.precip.nc' in filename:\n asop_dict['infile'] = filename\n asop_dict['name'] = ''\n asop_dict['dt'] = 10800\n asop_dict['dx'] = 27\n asop_dict['dy'] = 27\n asop_dict['constraint'] = 'precipitation'\n asop_dict['scale_factor'] = 8.0\n asop_dict['legend_name'] = ''\n asop_dict['region'] = [-10,10,60,90]\n asop_dict['box_size'] = 1680\n asop_dict['color'] = 'red'\n asop_dict['region_size'] = 7\n asop_dict['lag_length'] = 6\n asop_dict['grid_type'] = 'native'\n asop_dict['time_type'] = '3hr'\n asop_dict['grid_desc'] = 'native'\n asop_dict['time_desc'] = '3-hourly'\n asop_dict['autocorr_length'] = 60*60*24\n else:\n asop_dict=build_asop_dict(filename)\n return(asop_dict)", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CHEL').get('abstractTypes')\n exolinks = globalMap.get('CHEL').get('exolinks')\n\n # DataType HalfLifeType\n currentMap = {}\n abstractTypes['HalfLifeType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002'] = currentMap\n loadMaps['CHEL.HalfLifeType'] = currentMap\n currentMap['tag'] = 'CHEL.HalfLifeType'\n currentMap['type'] = 'simple'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002'\n currentMap['toStr'] = 'text'\n currentMap['cnvrt'] = 'text'\n\n # Class ChemElement\n currentMap = {}\n abstractTypes['ChemElement'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'] = currentMap\n loadMaps['CHEL.ChemElement'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemElements'\n currentMap['objkey'] = 'symbol'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElement\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemElement.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemElement.atomNumber\n currentMap = {}\n contentMap['atomNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00017'] = currentMap\n loadMaps['CHEL.ChemElement.atomNumber'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.atomNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00017'\n currentMap['name'] = 'atomNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute ChemElement.atomicRadius\n currentMap = {}\n contentMap['atomicRadius'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00018'] = currentMap\n loadMaps['CHEL.ChemElement.atomicRadius'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.atomicRadius'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00018'\n currentMap['name'] = 'atomicRadius'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.covalentRadius\n currentMap = {}\n contentMap['covalentRadius'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00019'] = currentMap\n loadMaps['CHEL.ChemElement.covalentRadius'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.covalentRadius'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00019'\n currentMap['name'] = 'covalentRadius'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.mass\n currentMap = {}\n contentMap['mass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00020'] = currentMap\n loadMaps['CHEL.ChemElement.mass'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.mass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00020'\n currentMap['name'] = 'mass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute ChemElement.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00021'] = currentMap\n loadMaps['CHEL.ChemElement.name'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00021'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055')\n\n # Attribute ChemElement.symbol\n currentMap = {}\n contentMap['symbol'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00015'] = currentMap\n loadMaps['CHEL.ChemElement.symbol'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.symbol'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00015'\n currentMap['name'] = 'symbol'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055')\n\n # Role ChemElement.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemElement.isotopes\n currentMap = {}\n contentMap['isotopes'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00004'] = currentMap\n loadMaps['CHEL.ChemElement.isotopes'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElement.isotopes'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00004'\n currentMap['name'] = 'isotopes'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CHEL').get('abstractTypes')\n # End of ChemElement\n\n currentMap = abstractTypes.get('ChemElement')\n aList = ['atomNumber', 'atomicRadius', 'covalentRadius', 'mass', 'name', 'symbol']\n currentMap['headerAttrs'] = aList\n aList = ['isotopes', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['isotopes']\n currentMap['children'] = aList\n\n # Class ChemElementStore\n currentMap = {}\n abstractTypes['ChemElementStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'] = currentMap\n loadMaps['CHEL.ChemElementStore'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemElementStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElementStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemElementStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemElementStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute ChemElementStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute ChemElementStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00024'] = currentMap\n loadMaps['CHEL.ChemElementStore.name'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00024'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role ChemElementStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemElementStore.chemElements\n currentMap = {}\n contentMap['chemElements'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00023'] = currentMap\n loadMaps['CHEL.ChemElementStore.chemElements'] = currentMap\n currentMap['tag'] = 'CHEL.ChemElementStore.chemElements'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00023'\n currentMap['name'] = 'chemElements'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CHEL').get('abstractTypes')\n # End of ChemElementStore\n\n currentMap = abstractTypes.get('ChemElementStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['chemElements', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['chemElements']\n currentMap['children'] = aList\n\n # Class Isotope\n currentMap = {}\n abstractTypes['Isotope'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'] = currentMap\n loadMaps['CHEL.Isotope'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'isotopes'\n currentMap['objkey'] = 'massNumber'\n currentMap['class'] = ccp.api.molecule.ChemElement.Isotope\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Isotope.abundance\n currentMap = {}\n contentMap['abundance'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00011'] = currentMap\n loadMaps['CHEL.Isotope.abundance'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.abundance'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00011'\n currentMap['name'] = 'abundance'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00058')\n\n # Attribute Isotope.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Isotope.gyroMagneticRatio\n currentMap = {}\n contentMap['gyroMagneticRatio'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00008'] = currentMap\n loadMaps['CHEL.Isotope.gyroMagneticRatio'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.gyroMagneticRatio'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00008'\n currentMap['name'] = 'gyroMagneticRatio'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.halfLife\n currentMap = {}\n contentMap['halfLife'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00001'] = currentMap\n loadMaps['CHEL.Isotope.halfLife'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLife'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00001'\n currentMap['name'] = 'halfLife'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute Isotope.halfLifeError\n currentMap = {}\n contentMap['halfLifeError'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00002'] = currentMap\n loadMaps['CHEL.Isotope.halfLifeError'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLifeError'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00002'\n currentMap['name'] = 'halfLifeError'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00007')\n\n # Attribute Isotope.halfLifeType\n currentMap = {}\n contentMap['halfLifeType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00003'] = currentMap\n loadMaps['CHEL.Isotope.halfLifeType'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.halfLifeType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-06-07-18:18:13_00003'\n currentMap['name'] = 'halfLifeType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'unknown'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-06-07-18:18:10_00002')\n\n # Attribute Isotope.magneticMoment\n currentMap = {}\n contentMap['magneticMoment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00013'] = currentMap\n loadMaps['CHEL.Isotope.magneticMoment'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.magneticMoment'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00013'\n currentMap['name'] = 'magneticMoment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.mass\n currentMap = {}\n contentMap['mass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00010'] = currentMap\n loadMaps['CHEL.Isotope.mass'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.mass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00010'\n currentMap['name'] = 'mass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.massNumber\n currentMap = {}\n contentMap['massNumber'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00007'] = currentMap\n loadMaps['CHEL.Isotope.massNumber'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.massNumber'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00007'\n currentMap['name'] = 'massNumber'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Isotope.quadrupoleMoment\n currentMap = {}\n contentMap['quadrupoleMoment'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00014'] = currentMap\n loadMaps['CHEL.Isotope.quadrupoleMoment'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.quadrupoleMoment'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00014'\n currentMap['name'] = 'quadrupoleMoment'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.receptivity\n currentMap = {}\n contentMap['receptivity'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00012'] = currentMap\n loadMaps['CHEL.Isotope.receptivity'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.receptivity'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00012'\n currentMap['name'] = 'receptivity'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00031')\n\n # Attribute Isotope.spin\n currentMap = {}\n contentMap['spin'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00009'] = currentMap\n loadMaps['CHEL.Isotope.spin'] = currentMap\n currentMap['tag'] = 'CHEL.Isotope.spin'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:12_00009'\n currentMap['name'] = 'spin'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Isotope.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of Isotope\n\n currentMap = abstractTypes.get('Isotope')\n aList = ['abundance', 'gyroMagneticRatio', 'halfLife', 'halfLifeError', 'halfLifeType', 'magneticMoment', 'mass', 'massNumber', 'quadrupoleMoment', 'receptivity', 'spin']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to ChemElement\n currentMap = {}\n exolinks['ChemElement'] = currentMap\n loadMaps['CHEL.exo-ChemElement'] = currentMap\n currentMap['tag'] = 'CHEL.exo-ChemElement'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00004'\n currentMap['name'] = 'ChemElement'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElement\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055'))\n\n # Out-of-package link to ChemElementStore\n currentMap = {}\n exolinks['ChemElementStore'] = currentMap\n loadMaps['CHEL.exo-ChemElementStore'] = currentMap\n currentMap['tag'] = 'CHEL.exo-ChemElementStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00005'\n currentMap['name'] = 'ChemElementStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.ChemElementStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to Isotope\n currentMap = {}\n exolinks['Isotope'] = currentMap\n loadMaps['CHEL.exo-Isotope'] = currentMap\n currentMap['tag'] = 'CHEL.exo-Isotope'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:19:49_00003'\n currentMap['name'] = 'Isotope'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemElement.Isotope\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00055'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))", "def file_to_dictionary():\n\n return;", "def __init__(self, filename):\n\n self.term_dict = {}\n for line in open(filename):\n if line.startswith(\"#\"):\n continue\n\n #print line\n word, w_type = line.strip().split(\"\\t\")\n self.term_dict[word.strip().lower()] = \"CHESS_\" + w_type.strip().lower()", "def get_wiki_synonyms_mapping() -> Dict[str, str]:\n with open(Config.Path.synonym_mapping, \"r\") as f:\n return json.load(f)", "def label_maps_from_file(path_to_summary, label_mapping_code, separate_apostrophe_embedding, saved_dict=False):\n with open(path_to_summary, 'r') as mapping_file:\n mapping_list = mapping_file.readlines()\n\n apostrophe_options = find_apostrophe_options(mapping_list)\n\n label_map = {}\n for line in mapping_list:\n split_line = line.split()\n # Design the mapping such that the mapping code can be used to index the list\n # code: [code, phonetic English, native characters]\n code = split_line[1]\n code_options = [code, split_line[-1], split_line[0]]\n label_map[code] = code_options[label_mapping_code]\n if not separate_apostrophe_embedding:\n label_map[code + APOSTROPHE_TOKEN] = code_options[label_mapping_code] + apostrophe_options[label_mapping_code]\n # Add special characters sp and sil\n label_map['sp'] = 'sp'\n label_map['sil'] = 'sil'\n # Add special characters for the two hesitation characters\n label_map['G00'] = 'G00'\n label_map['G01'] = 'G01'\n return label_map", "def create_mapping_file(options):\n\n mapping_file = open(os.path.splitext(options.bco)[0] + \"mapping.txt\", 'w')\n mapping_file.writelines(\n\"\"\"# Use this file to provide mapping values for a bco.\n# MISSING PROPERTIES/FIELDS lists properties/fields that are missing from bco\n# NONALLOWED PROPERTIES/FIELDS shows properties that are not allowed\n# Syntax for specifying values\n# To delete a value\n# PATH --> FIELD: DELETE\n# To add a value\n# PATH --> FIELD: ADD-value_to_add\n# To rename a field name\n# PATH --> FIELD: RENAME-new_field_name\n# To swap a field name with another current field name\n# PATH --> FIELD: SWAP-other_field_name\n# Blank values will be skipped. Data does not need to be double represented\n# For example, \n# if <bco_id> needs renamed to <object_id>, either\n# ['object_id'] --> object_id: \n# SWAP-bco_id\n# OR \n# ['bco_id'] --> bco_id: RENAME:object_id \n# will work. No need to fill out both values.\n\"\"\"\n)\n validate_bco(options)\n\n missing_reg = r'(.*?) is a required property' # missing required property\n additional_reg = r'Additional properties are not allowed (.*?)' # unalloewd extra property\n\n attribute_reg = r\"'(.*?)'\" # getting an attribute (field surronded by single quotes)\n index_reg = r\"On instance(.*?)\" # getting key path\n\n failed_validation_reg = r'Failed validating (.*?)' # invalid type\n\n missing = []\n additional = []\n invalid = []\n\n path = {}\n\n with open('error.log') as errors:\n for line in errors:\n if re.match(missing_reg, line): # if there is a missing property\n to_add = re.findall(attribute_reg, line)\n for match in to_add:\n missing.append(match)\n elif re.match(additional_reg, line): # if there is an additional property\n to_add = re.findall(attribute_reg, line)\n for match in to_add:\n additional.append(match)\n elif re.match(failed_validation_reg, line): # if a property is invalid\n # additional and required properties are already represnted by the above regexes,\n # so skip\n if line.__contains__(\"'additionalProperties'\") is False \\\n and line.__contains__(\"'required'\") is False:\n to_add = [line.split(\"schema\")[1].split(\"['\")[-1].strip(\"']:\\n\")]\n invalid.append(to_add[0])\n\n # field contains an index for some attribute\n # this attribute will be the last attribute found the above regexes, and is stored in\n # to_add\n if re.match(index_reg, line):\n keys = \"\"\n index_path = line.removeprefix(\"On instance\").removesuffix(\":\\n\")\n if index_path is not None:\n keys = str(index_path)\n if len(to_add) > 0: # if there are any attributes to add\n for item in to_add:\n add_or_update_list_HELPER(path, str(item), keys + \"['\" + str(item) +\n \"']\")\n to_add = [] # reset to_add\n mapping_file.write(\"====MISSING PROPERTIES/FIELDS====\\n\")\n for attribute in missing:\n mapping_file.write(str(path[attribute][0]) + \"-->\" + str(attribute) + \":\\n\")\n path[attribute].pop(0)\n\n mapping_file.write(\"====NONALLOWED PROPERTIES/FIELDS====\\n\")\n for attribute in additional:\n mapping_file.write(str(path[attribute][0]) + \"-->\" + str(attribute) + \":\\n\")\n path[attribute].pop(0)\n for attribute in invalid:\n mapping_file.write(str(path[attribute][0]).split(\"]\")[0]\n + \"]-->\" + str(attribute) + \":\\n\")\n path[attribute].pop(0)\n\n return mapping_file.name", "def make_lex_dict(self):\n lex_dict = {}\n for line in self.lexicon_full_filepath.split('\\n'):\n sp = line.strip().split('\\t')\n if(len(sp) > 1):\n (word, measure) = line.strip().split('\\t')[0:2]\n lex_dict[word] = float(measure)\n return lex_dict", "def _load_map_creator_dict(self, source_fallback_dict, _textin=''):\n global dictionary, attributes\n if not _textin:\n fin = codecs.open(source_fallback_dict, mode='r', encoding='utf-8-sig')\n lines = fin.readlines()\n else:\n lines = _textin\n line = ' '.join([aline.strip() for aline in lines[1:]])\n root = etree.fromstring(line)\n# messagebox.showerror('_load_map_creator_dict','{}'.format(etree.tostring(root[0])))\n dictionary = root[0]\n self.Source = dictionary.get(\"SourceLanguage\")\n self.Regional = dictionary.get(\"TargetLanguage\")\n# messagebox.showerror('_load_map_creator_dict','{}'.format(etree.tostring(dictionary)))\n# translation = dictionary[0]\n fallback = dict()\n for translation in dictionary:\n fallback[str(translation.get(\"Source\"))] = str(translation.get(\"Target\"))\n #messagebox.showerror('_load_map_creator_dict','{} => {}'.format(fallback[str(translation.get(\"Source\"))], str(translation.get(\"Target\"))))\n# aline = lines[2]\n# start = aline.find('SourceLanguage=\"')+len('SourceLanguage=\"') + 1\n# end = aline.find('\"',start)\n# source = aline[start:end]\n# start = aline.find('TargetLanguage=\"')+len('TargetLanguage=\"') + 1\n# end = aline.find('\"',start)\n# regional = aline[start:end]\n# for translation in lines[3:-2]:\n# start = translation.find('<Translation Source=\"') + len('<Translation Source=\"')\n# end = translation.find('\"', start)\n# key = translation[start:end]\n# if not key:\n# messagebox.showerror('_load_map_creator_dict',' null key')\n# return\n# start = translation.find(' Target=\"', end) + len('<Translation Source=\"')\n# end = translation.find('\"', start)\n# value = translation[start:end]\n# fallback[key] = value\n## else:\n## print('fallback empty')\n return(self.Source, self.Regional, fallback)", "def load_lemma_pos_offset_map():\n lemma_pos_offset_map = defaultdict(dict)\n ##pos_lemma_offset_map = defaultdict(dict)\n for suffix in _FILEMAP.values():\n # parse each line of the file (ignoring comment lines)\n with open(wordnet_dir+'index.%s' % suffix) as fin:\n for i, line in enumerate(fin):\n if line.startswith(' '):\n continue\n _iter = iter(line.split())\n def _next_token():\n return next(_iter)\n try:\n # get the lemma and part-of-speech\n lemma = _next_token()\n pos = _next_token()\n # get the number of synsets for this lemma\n n_synsets = int(_next_token())\n assert n_synsets > 0\n # get and ignore the pointer symbols for all synsets of\n # this lemma\n n_pointers = int(_next_token())\n [_next_token() for _ in range(n_pointers)]\n # same as number of synsets\n n_senses = int(_next_token())\n assert n_synsets == n_senses\n # get and ignore number of senses ranked according to\n # frequency\n _next_token()\n # get synset offsets\n synset_offsets = [int(_next_token()) for _ in range(n_synsets)]\n\n # raise more informative error with file name and line number\n except (AssertionError, ValueError) as e:\n tup = ('index.%s' % suffix), (i + 1), e\n raise WordNetError('file %s, line %i: %s' % tup)\n\n # map lemmas and parts of speech to synsets\n lemma_pos_offset_map[lemma][pos] = synset_offsets\n ##pos_lemma_offset_map[pos][lemma] = synset_offsets\n if pos == ADJ:\n lemma_pos_offset_map[lemma][ADJ_SAT] = synset_offsets\n ##pos_lemma_offset_map[ADJ_SAT][lemma] = synset_offsets\n return lemma_pos_offset_map##, pos_lemma_offset_map", "def audit_process():\n st_types, pc_types = audit(OSMFILE)\n #pprint.pprint(dict(st_types))\n #pprint.pprint(dict(pc_types))\n\n correct_name = {}\n for st_type, ways in st_types.iteritems():\n for name in ways:\n better_name = update_name(name, mapping)\n correct_name[name] = better_name\n #print name, \"=>\", better_name\n \n correct_code = {}\n for _, pc_type in pc_types.iteritems():\n for code in pc_type:\n better_code = update_postalcode(code)\n correct_code[code] = better_code\n #print code, \"=>\", better_code\n \n return correct_name, correct_code", "def Parser(wa1, wa2):\r\n #Note that in the documentation, they start counting at position 1\r\n output = { \r\n 'First Borough Name': wa1[360 :369].strip(),\r\n 'House Number Display Format': wa1[369: 385].strip(),\r\n 'House Number Sort Format': wa1[385: 396].strip(),\r\n 'B10SC First Borough and Street Code': wa1[396: 407].strip(),\r\n 'Second Street Name Normalized': wa1[407:439].strip(),\r\n 'Community District': wa2[149:152].strip(),\r\n 'Zip Code': wa2[152:157].strip(),\r\n 'Election District': wa2[157:160].strip(),\r\n 'Assembly District': wa2[160:162].strip(),\r\n 'Congressional District': wa2[163:165].strip(),\r\n 'State Senatorial District': wa2[165:167].strip(),\r\n 'City Council District': wa2[169:171].strip(),\r\n 'Police Precinct': wa2[191:194].strip(),\r\n 'Community School District': wa2[203:205].strip(),\r\n 'Atomic Polygon': wa2[205: 208].strip(),\r\n '2010 Census Tract': wa2[223: 229].strip(),\r\n '2010 Census Block': wa2[229:233].strip(),\r\n '2010 Census Block Suffix': wa2[233].strip(),\r\n 'Neighborhood Tabulation Area (NTA)': wa2[245:249].strip(),\r\n 'DSNY Snow Priority Code': wa2[249].strip(),\r\n 'Hurricane Evacuation Zone (HEZ)': wa2[260:262].strip(),\r\n 'Spatial Coordinates of Segment': {'X Coordinate, Low Address End': wa2[313:320].strip(),\r\n 'Y Coordinate, Low Address End': wa2[320:327].strip(),\r\n 'Z Coordinate, Low Address End': wa2[327:334].strip(),\r\n 'X Coordinate, High Address End': wa2[334:341].strip(),\r\n 'Y Coordinate, High Address End': wa2[341:348].strip(),\r\n 'Z Coordinate, High Address End': wa2[348:355].strip(),\r\n },\r\n 'Roadway Type': wa2[444:446].strip(),\r\n 'Bike Lane': wa2[486].strip(),\r\n 'NTA Name': wa2[553: 628].strip(),\r\n 'USPS Preferred City Name': wa2[628:653].strip(),\r\n 'Latitude': wa2[653:662].strip(),\r\n 'Longitude': wa2[662: 673].strip(),\r\n 'Borough Block Lot (BBL)': {'Borough code': wa2[1533].strip(),\r\n 'Tax Block': wa2[1534:1539].strip(),\r\n 'Tax Lot': wa2[1539:1543].strip(),\r\n },\r\n 'Building Identification Number (BIN) of Input Address or NAP': wa2[1581:1588].strip(),\r\n 'X-Y Coordinates of Lot Centroid': wa2[1699:1713].strip(),\r\n 'Spatial X': wa2[125:132].strip(),\r\n 'Spatial Y': wa2[132:139].strip(),\r\n 'Message': wa1[579:659].strip(),\r\n }\r\n return output", "def standard_map_peninsula():\n geogr = \"\"\"\\\n OOOOOOOOOOOOOOOOOOOOO\n OOOOOOOOSMMMMJJJJJJJO\n OSSSSSJJJJMMJJJJJJJOO\n OSSSSSSSSSMMJJJJJJOOO\n OSSSSSJJJJJJJJJJJJOOO\n OSSSSSJJJDDJJJSJJJOOO\n OSSJJJJJDDDJJJSSSSOOO\n OOSSSSJJJDDJJJSOOOOOO\n OSSSJJJJJDDJJJJJJJOOO\n OSSSSJJJJDDJJJJOOOOOO\n OOSSSSJJJJJJJJOOOOOOO\n OOOSSSSJJJJJJJOOOOOOO\n OOOOOOOOOOOOOOOOOOOOO\"\"\"\n island = isle.Island(geogr)\n occupants = [{'loc': (1, 19),\n 'pop': [{'species': 'Herbivore', 'age': 9, 'weight': 10},\n {'species': 'Carnivore', 'age': 9, 'weight': 10}]}]\n island.populate_island(occupants)\n return island", "def create_dict(fd):\n # initialize an empty dictionary\n full_dict = {}\n # loop through file\n for line in fd:\n # lowercase everything in line, then split line into a list\n line = line.lower().split()\n # loop through elements in the list of words in the splitted line\n for word in line:\n # strip words from puncuation using string module\n word = word.strip(string.punctuation)\n # if words contains only alphabatic characters and of length > 1\n if word.isalpha() and len(word)!= 1:\n if len(word) in full_dict:\n full_dict[len(word)].add(word)\n else:\n full_dict[len(word)] = set()\n full_dict[len(word)].add(word)\n return full_dict", "def test_build_map_dict_by_name():\n gdpinfo = {\n \"gdpfile\": \"isp_gdp.csv\",\n \"separator\": \",\",\n \"quote\": '\"',\n \"min_year\": 1960,\n \"max_year\": 2015,\n \"country_name\": \"Country Name\",\n \"country_code\": \"Country Code\"\n }\n\n # Get pygal country code map\n pygal_countries = {'KEN':'Kenya', 'IDN':'Indonesia'}\n\n # 1960\n res = build_map_dict_by_name(gdpinfo, pygal_countries, \"1960\")\n print(res)", "def createAllDictionaries(self):\r\n self.makeSentenceLengths()\r\n self.makeWords()\r\n self.makeStems()\r\n self.makeGerund()\r\n self.makeWordLengths()", "def dictionary_creation(filename):\n\tfp = open(filename)\n\td = dict()\n\tfor line in fp:\n\t\t# print line\n\t\tfor word in line.split():\n\t\t\tword = word.strip(string.punctuation + string.whitespace)\n\t\t\t# print word\n\t\t\tif len(word) >5:\n\t\t\t\tif word not in d:\n\t\t\t\t\t# print 'in'\n\t\t\t\t\td[word] = 1\n\t\t\t\telse:\n\t\t\t\t\t# print 'not in'\n\t\t\t\t\td[word] += 1\n\treturn d\n\n\tfp.close()", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('ACCO').get('abstractTypes')\n exolinks = globalMap.get('ACCO').get('exolinks')\n\n # Class AccessControlStore\n currentMap = {}\n abstractTypes['AccessControlStore'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:18:10_00001'] = currentMap\n loadMaps['ACCO.AccessControlStore'] = currentMap\n currentMap['tag'] = 'ACCO.AccessControlStore'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:10_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'accessControlStores'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = memops.api.AccessControl.AccessControlStore\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AccessControlStore.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AccessControlStore.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AccessControlStore.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute AccessControlStore.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AccessControlStore.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute AccessControlStore.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00006'] = currentMap\n loadMaps['ACCO.AccessControlStore.name'] = currentMap\n currentMap['tag'] = 'ACCO.AccessControlStore.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00006'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AccessControlStore.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AccessControlStore.accessObjects\n currentMap = {}\n contentMap['accessObjects'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00013'] = currentMap\n loadMaps['ACCO.AccessControlStore.accessObjects'] = currentMap\n currentMap['tag'] = 'ACCO.AccessControlStore.accessObjects'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00013'\n currentMap['name'] = 'accessObjects'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ACCO').get('abstractTypes')\n\n # Role AccessControlStore.userGroups\n currentMap = {}\n contentMap['userGroups'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00003'] = currentMap\n loadMaps['ACCO.AccessControlStore.userGroups'] = currentMap\n currentMap['tag'] = 'ACCO.AccessControlStore.userGroups'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00003'\n currentMap['name'] = 'userGroups'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ACCO').get('abstractTypes')\n\n # Role AccessControlStore.users\n currentMap = {}\n contentMap['users'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00001'] = currentMap\n loadMaps['ACCO.AccessControlStore.users'] = currentMap\n currentMap['tag'] = 'ACCO.AccessControlStore.users'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:21:38_00001'\n currentMap['name'] = 'users'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('ACCO').get('abstractTypes')\n # End of AccessControlStore\n\n currentMap = abstractTypes.get('AccessControlStore')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy']\n currentMap['headerAttrs'] = aList\n aList = ['name']\n currentMap['simpleAttrs'] = aList\n aList = ['users', 'userGroups', 'accessObjects', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['accessObjects', 'userGroups', 'users']\n currentMap['children'] = aList\n\n # Class AccessObject\n currentMap = {}\n abstractTypes['AccessObject'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00014'] = currentMap\n loadMaps['ACCO.AccessObject'] = currentMap\n currentMap['tag'] = 'ACCO.AccessObject'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'accessObjects'\n currentMap['objkey'] = 'name'\n currentMap['class'] = memops.api.AccessControl.AccessObject\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AccessObject.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AccessObject.description\n currentMap = {}\n contentMap['description'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00005'] = currentMap\n loadMaps['ACCO.AccessObject.description'] = currentMap\n currentMap['tag'] = 'ACCO.AccessObject.description'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00005'\n currentMap['name'] = 'description'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Attribute AccessObject.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00004'] = currentMap\n loadMaps['ACCO.AccessObject.name'] = currentMap\n currentMap['tag'] = 'ACCO.AccessObject.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00004'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role AccessObject.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role AccessObject.permissions\n currentMap = {}\n contentMap['permissions'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00001'] = currentMap\n loadMaps['ACCO.AccessObject.permissions'] = currentMap\n currentMap['tag'] = 'ACCO.AccessObject.permissions'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:27_00001'\n currentMap['name'] = 'permissions'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('ACCO').get('abstractTypes')\n # End of AccessObject\n\n currentMap = abstractTypes.get('AccessObject')\n aList = ['description', 'name']\n currentMap['simpleAttrs'] = aList\n aList = ['permissions', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['permissions']\n currentMap['children'] = aList\n\n # Class Permission\n currentMap = {}\n abstractTypes['Permission'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00018'] = currentMap\n loadMaps['ACCO.Permission'] = currentMap\n currentMap['tag'] = 'ACCO.Permission'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00018'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'permissions'\n currentMap['class'] = memops.api.AccessControl.Permission\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Permission.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Permission.opType\n currentMap = {}\n contentMap['opType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00021'] = currentMap\n loadMaps['ACCO.Permission.opType'] = currentMap\n currentMap['tag'] = 'ACCO.Permission.opType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00021'\n currentMap['name'] = 'opType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'any'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Permission.permission\n currentMap = {}\n contentMap['permission'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00023'] = currentMap\n loadMaps['ACCO.Permission.permission'] = currentMap\n currentMap['tag'] = 'ACCO.Permission.permission'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00023'\n currentMap['name'] = 'permission'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = True\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute Permission.permissionClass\n currentMap = {}\n contentMap['permissionClass'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00020'] = currentMap\n loadMaps['ACCO.Permission.permissionClass'] = currentMap\n currentMap['tag'] = 'ACCO.Permission.permissionClass'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00020'\n currentMap['name'] = 'permissionClass'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'any'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute Permission.roleName\n currentMap = {}\n contentMap['roleName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00022'] = currentMap\n loadMaps['ACCO.Permission.roleName'] = currentMap\n currentMap['tag'] = 'ACCO.Permission.roleName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00022'\n currentMap['name'] = 'roleName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 'any'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role Permission.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Permission.userGroup\n currentMap = {}\n contentMap['userGroup'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00016'] = currentMap\n loadMaps['ACCO.Permission.userGroup'] = currentMap\n currentMap['tag'] = 'ACCO.Permission.userGroup'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00016'\n currentMap['name'] = 'userGroup'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['copyOverride'] = True\n # End of Permission\n\n currentMap = abstractTypes.get('Permission')\n aList = ['opType', 'permission', 'permissionClass', 'roleName']\n currentMap['headerAttrs'] = aList\n aList = ['userGroup']\n currentMap['optLinks'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class User\n currentMap = {}\n abstractTypes['User'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00017'] = currentMap\n loadMaps['ACCO.User'] = currentMap\n currentMap['tag'] = 'ACCO.User'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00017'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'users'\n currentMap['objkey'] = 'name'\n currentMap['class'] = memops.api.AccessControl.User\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute User.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute User.isSuperuser\n currentMap = {}\n contentMap['isSuperuser'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2010-05-06-13:30:17_00060'] = currentMap\n loadMaps['ACCO.User.isSuperuser'] = currentMap\n currentMap['tag'] = 'ACCO.User.isSuperuser'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2010-05-06-13:30:17_00060'\n currentMap['name'] = 'isSuperuser'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = False\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00028')\n\n # Attribute User.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00019'] = currentMap\n loadMaps['ACCO.User.name'] = currentMap\n currentMap['tag'] = 'ACCO.User.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00019'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute User.passwordHashed\n currentMap = {}\n contentMap['passwordHashed'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2009-08-19-17:31:11_00005'] = currentMap\n loadMaps['ACCO.User.passwordHashed'] = currentMap\n currentMap['tag'] = 'ACCO.User.passwordHashed'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2009-08-19-17:31:11_00005'\n currentMap['name'] = 'passwordHashed'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['proc'] = 'direct'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00035')\n\n # Role User.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role User.ledGroups\n currentMap = {}\n contentMap['ledGroups'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00014'] = currentMap\n loadMaps['ACCO.User.ledGroups'] = currentMap\n currentMap['tag'] = 'ACCO.User.ledGroups'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00014'\n currentMap['name'] = 'ledGroups'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role User.userGroups\n currentMap = {}\n contentMap['userGroups'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00012'] = currentMap\n loadMaps['ACCO.User.userGroups'] = currentMap\n currentMap['tag'] = 'ACCO.User.userGroups'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00012'\n currentMap['name'] = 'userGroups'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n # End of User\n\n currentMap = abstractTypes.get('User')\n aList = ['isSuperuser']\n currentMap['headerAttrs'] = aList\n aList = ['name', 'passwordHashed', 'ledGroups', 'userGroups']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class UserGroup\n currentMap = {}\n abstractTypes['UserGroup'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00016'] = currentMap\n loadMaps['ACCO.UserGroup'] = currentMap\n currentMap['tag'] = 'ACCO.UserGroup'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00016'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'userGroups'\n currentMap['objkey'] = 'name'\n currentMap['class'] = memops.api.AccessControl.UserGroup\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute UserGroup.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute UserGroup.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00018'] = currentMap\n loadMaps['ACCO.UserGroup.name'] = currentMap\n currentMap['tag'] = 'ACCO.UserGroup.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00018'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Role UserGroup.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role UserGroup.leaders\n currentMap = {}\n contentMap['leaders'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00015'] = currentMap\n loadMaps['ACCO.UserGroup.leaders'] = currentMap\n currentMap['tag'] = 'ACCO.UserGroup.leaders'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00015'\n currentMap['name'] = 'leaders'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = True\n\n # Role UserGroup.members\n currentMap = {}\n contentMap['members'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00013'] = currentMap\n loadMaps['ACCO.UserGroup.members'] = currentMap\n currentMap['tag'] = 'ACCO.UserGroup.members'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00013'\n currentMap['name'] = 'members'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n\n # Role UserGroup.permissions\n currentMap = {}\n contentMap['permissions'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00017'] = currentMap\n loadMaps['ACCO.UserGroup.permissions'] = currentMap\n currentMap['tag'] = 'ACCO.UserGroup.permissions'\n currentMap['type'] = 'link'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-17-14:16:23_00017'\n currentMap['name'] = 'permissions'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['copyOverride'] = False\n # End of UserGroup\n\n currentMap = abstractTypes.get('UserGroup')\n aList = ['name', 'leaders', 'members', 'permissions']\n currentMap['simpleAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Out-of-package link to AccessControlStore\n currentMap = {}\n exolinks['AccessControlStore'] = currentMap\n loadMaps['ACCO.exo-AccessControlStore'] = currentMap\n currentMap['tag'] = 'ACCO.exo-AccessControlStore'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-09-04-17:18:10_00001'\n currentMap['name'] = 'AccessControlStore'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = memops.api.AccessControl.AccessControlStore\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n\n # Out-of-package link to AccessObject\n currentMap = {}\n exolinks['AccessObject'] = currentMap\n loadMaps['ACCO.exo-AccessObject'] = currentMap\n currentMap['tag'] = 'ACCO.exo-AccessObject'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00014'\n currentMap['name'] = 'AccessObject'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = memops.api.AccessControl.AccessObject\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to Permission\n currentMap = {}\n exolinks['Permission'] = currentMap\n loadMaps['ACCO.exo-Permission'] = currentMap\n currentMap['tag'] = 'ACCO.exo-Permission'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00018'\n currentMap['name'] = 'Permission'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = memops.api.AccessControl.Permission\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n aList.append(globalMap.get('ACCO').get('exolinks'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to User\n currentMap = {}\n exolinks['User'] = currentMap\n loadMaps['ACCO.exo-User'] = currentMap\n currentMap['tag'] = 'ACCO.exo-User'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00017'\n currentMap['name'] = 'User'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = memops.api.AccessControl.User\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))\n\n # Out-of-package link to UserGroup\n currentMap = {}\n exolinks['UserGroup'] = currentMap\n loadMaps['ACCO.exo-UserGroup'] = currentMap\n currentMap['tag'] = 'ACCO.exo-UserGroup'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00016'\n currentMap['name'] = 'UserGroup'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = memops.api.AccessControl.UserGroup\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033'))", "def build_reverse_article_map(f='./wikispeedia_paths-and-graph/articles.tsv'):\n out_dict = {}\n count = 0\n with open(f, 'r') as r:\n for _ in xrange(12):\n next(r)\n for line in r:\n out_dict[count] = line.strip('\\n')\n count += 1\n return out_dict", "def read_denoiser_mapping(mapping_fh):\r\n denoiser_mapping = {}\r\n for i, line in enumerate(mapping_fh):\r\n if line == \"\":\r\n continue\r\n centroid, members = line.split(':')\r\n denoiser_mapping[centroid] = members.split()\r\n return denoiser_mapping", "def parse(filepath):\n wos_list = []\n\n paper_start_key = 'PT'\n paper_end_key = 'ER'\n\n\n #\n line_list = []\n try:\n with open(filepath, 'r') as f:\n line_list = f.read().splitlines()\n except IOError: # File does not exist, or couldn't be read.\n raise IOError(\"File does not exist, or cannot be read.\")\n\n if len(line_list) is 0:\n raise IOError(\"Unable to read filepath or filepath is empty.\")\n # Convert the data in the file to a usable list of dictionaries.\n # Note: first two lines of file are not related to any paper therein.\n last_field_tag = paper_start_key # initialize to something.\n for line in line_list[2:]:\n\n field_tag = line[:2]\n\n if field_tag == ' ':\n pass\n\n if field_tag == paper_start_key:\n # Then prepare for next paper.\n wos_dict = _new_wos_dict()\n\n if field_tag == paper_end_key:\n # Then add paper to our list.\n wos_list.append(wos_dict)\n\n # Handle keys like AU,AF,CR that continue over many lines.\n if field_tag == ' ':\n field_tag = last_field_tag\n\n # Add value for the key to the wos_dict: only for the five tags.\n try:\n if field_tag in ['DE', 'DI', 'TI', 'SO', 'UT','PY']:\n wos_dict[field_tag] += ' ' + str(line[3:])\n # Rest all will just get passed\n else:\n pass\n\n except (KeyError, TypeError, UnboundLocalError):\n wos_dict[field_tag] = str(line[3:])\n\n last_field_tag = field_tag\n # End line loop.\n\n # Define keys that should be lists instead of default string.\n list_keys = ['DE']\n delims = {'DE': ';'}\n\n # And convert the data at those keys into lists.\n for wos_dict in wos_list:\n for key in list_keys:\n delim = delims[key]\n try:\n key_contents = wos_dict[key]\n if delim != '\\n':\n wos_dict[key] = key_contents.split(delim)\n else:\n wos_dict[key] = key_contents.splitlines()\n except KeyError:\n # One of the keys to be converted to a list didn't exist.\n pass\n except AttributeError:\n # Again a key didn't exist but it belonged to the wos\n # data_struct set of keys; can't split a None.\n pass\n\n return wos_list", "def PSPLdict():\n pspl_dict = {}\n # individual files\n PSPLs = glob.glob(\"./msresist/data/PSPL/*.csv\")\n for sp in PSPLs:\n if sp == \"./msresist/data/PSPL/pssm_data.csv\":\n continue\n sp_mat = pd.read_csv(sp).sort_values(by=\"Unnamed: 0\")\n\n if sp_mat.shape[0] > 20: # Remove profiling of fixed pY and pT, include only natural AA\n assert np.all(sp_mat.iloc[:-2, 0] == AAlist), \"aa don't match\"\n sp_mat = sp_mat.iloc[:-2, 1:].values\n else:\n assert np.all(sp_mat.iloc[:, 0] == AAlist), \"aa don't match\"\n sp_mat = sp_mat.iloc[:, 1:].values\n\n if np.all(sp_mat >= 0):\n sp_mat = np.log2(sp_mat)\n\n pspl_dict[sp.split(\"PSPL/\")[1].split(\".csv\")[0]] = sp_mat\n\n # NetPhores PSPL results\n f = pd.read_csv(\"msresist/data/PSPL/pssm_data.csv\", header=None)\n matIDX = [np.arange(16) + i for i in range(0, f.shape[0], 16)]\n for ii in matIDX:\n kin = f.iloc[ii[0], 0]\n mat = f.iloc[ii[1:], :].T\n mat.columns = np.arange(mat.shape[1])\n mat = mat.iloc[:-1, 2:12].drop(8, axis=1).astype(\"float64\").values\n mat = np.ma.log2(mat)\n mat = mat.filled(0)\n mat = np.clip(mat, a_min=0, a_max=3)\n pspl_dict[kin] = mat\n\n return pspl_dict", "def create_meta_dict_L1(adcp_meta):\n meta_dict = {}\n with open(adcp_meta) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n next(csv_reader, None) # Skip header row\n for row in csv_reader:\n # extract all metadata from csv file into dictionary -- some items not passed to netCDF file but are extracted anyway\n if row[0] == '' and row[1] == '':\n print('Metadata file contains a blank row; skipping this row !')\n elif row[0] != '' and row[1] == '':\n print('Metadata item in csv file has blank value; skipping this row '\n 'in metadata file !')\n else:\n meta_dict[row[0]] = row[1]\n\n # Add conventions metadata to meta_dict\n meta_dict['deployment_type'] = 'Sub Surface'\n meta_dict['flag_meaning'] = 'no_quality_control, good_value, probably_good_value, probably_bad_value, ' \\\n 'bad_value, changed_value, value_below_detection, value_in_excess, ' \\\n 'interpolated_value, missing_value'\n meta_dict['flag_references'] = 'BODC SeaDataNet'\n meta_dict['flag_values'] = '0, 1, 2, 3, 4, 5, 6, 7, 8, 9'\n meta_dict['keywords'] = 'Oceans > Ocean Circulation > Ocean Currents'\n meta_dict['keywords_vocabulary'] = 'GCMD Science Keywords'\n meta_dict['naming_authority'] = 'BODC, MEDS, CF v72'\n meta_dict['variable_code_reference'] = 'BODC P01'\n meta_dict['Conventions'] = \"CF-1.8\"\n\n return meta_dict", "def create_rel_doctitle_dict():\n claim_rel_docno_dict = {} #key is claim text, value is a set of doc_title that are relevant\n clm_sen_doc_title_dict = read_pickle(\"sen_doc_title_dict\")\n claim_sen_true_relevance_dict = read_pickle(\"claim_sen_relevance_dict_\"+curr_source)\n exclude = set(string.punctuation)\n docID_title_mapping_wiki_pickle = read_pickle(\"dicID_title_mapping_wiki_pickle\")\n \n title_docID_mapping_wiki_pickle = {}\n for (docID,doc_title) in docID_title_mapping_wiki_pickle.iteritems():\n non_asci_char = [c for c in doc_title if not 0 < ord(c) < 127]\n new_doc_title = doc_title\n for c in non_asci_char:\n new_doc_title = new_doc_title.replace(c,\"\")\n doc_title_no_punc = ''.join(ch for ch in new_doc_title if ch not in exclude)\n doc_title_no_space = doc_title_no_punc.replace(\" \",\"\")\n title_docID_mapping_wiki_pickle[doc_title_no_space] = docID\n# title_docID_mapping_wiki_pickle = dict((y,x) for x,y in docID_title_mapping_wiki_pickle.iteritems()) \n for (clm) in claim_sen_true_relevance_dict.keys():\n rel_docno_set = set()\n for (sen,rel_score) in claim_sen_true_relevance_dict[clm]:\n try: \n if rel_score == 1:\n sen_no_punc = ''.join(ch for ch in sen if ch not in exclude)\n sen_no_space = sen_no_punc.replace(\" \",\"\")\n curr_rel_doc_title = clm_sen_doc_title_dict[sen_no_space]\n non_asci_char = [c for c in curr_rel_doc_title if not 0 < ord(c) < 127]\n new_curr_doc_title = curr_rel_doc_title\n for c in non_asci_char:\n new_curr_doc_title = new_curr_doc_title.replace(c,\"\")\n curr_doc_title_no_punc = ''.join(ch for ch in new_curr_doc_title if ch not in exclude)\n curr_doc_title_no_space = curr_doc_title_no_punc.replace(\" \",\"\")\n rel_docno_set.add((title_docID_mapping_wiki_pickle[curr_doc_title_no_space],1))\n \n except Exception as err: \n sys.stderr.write('problem in sen:'+sen) \n print err.args\n \n rel_docno_list = [(docid,rel_score) for (docid,rel_score) in rel_docno_set]\n claim_rel_docno_dict[clm] = rel_docno_list\n save_pickle(\"claim_rel_docno_dict\", claim_rel_docno_dict)", "def ps_lensed_theory_to_dict(filename,output_type,lmax=None,startAtZero=False):\n\n fields=['TT','TE','TB','ET','BT','EE','EB','BE','BB']\n ps={}\n l,ps['TT'],ps['EE'],ps['BB'],ps['TE']=np.loadtxt(filename,unpack=True)\n ps['ET']=ps['TE'].copy()\n ps['TB'],ps['BT'],ps['EB'],ps['BE']=np.zeros((4,len(l)))\n\n if lmax is not None:\n l=l[:lmax]\n scale=l*(l+1)/(2*np.pi)\n for f in fields:\n if lmax is not None:\n ps[f]=ps[f][:lmax]\n if output_type=='Cl':\n ps[f]/=scale\n if startAtZero:\n ps[f]=np.append( np.array([0,0]),ps[f])\n if startAtZero:\n l=np.append( np.array([0,1]),l)\n return l,ps", "def parse_nasari_dictionary():\n nasari_dict = {}\n reader = csv.reader(open('./part2/exercise3/input/dd-small-nasari-15.txt', \"r\", encoding=\"utf-8\"), delimiter=';') \n for line in reader:\n vector_dict = {}\n for term_value in line[2:]:\n term, *value = term_value.split(\"_\")\n vector_dict[term] = value[0] if value else None\n nasari_dict[line[1].lower()] = vector_dict\n return nasari_dict", "def create_lemma_dict(lemma_file):\n\tacademic_words= open(lemma_file)\n\tLEMMA_DICT = {}\n\tfor line in academic_words:\n\t\tline =line.rstrip().rstrip(',')\n\t\twords = line.split(',')\n\t\tstem = words[0]\n\t\tfor word in words:\n\t\t\tLEMMA_DICT[word] = stem\n\n\treturn LEMMA_DICT", "def _create_ligand_smiles_dict(self) -> None:\n import json\n\n import pandas as pd\n\n from ..databases.pdb import smiles_from_pdb\n from ..utils import LocalFileStorage\n\n logging.debug(\"Reading available KLIFS structures from cache ...\")\n klifs_structures = pd.read_csv(LocalFileStorage.klifs_structure_db(self.cache_dir))\n\n logging.debug(\"Retrieving SMILES for orthosteric ligands ...\")\n pdb_to_smiles = smiles_from_pdb(set(klifs_structures[\"ligand.expo_id\"]))\n\n logging.debug(\"Saving local PDB SMILES dictionary ...\")\n with open(LocalFileStorage.pdb_smiles_json(self.cache_dir), \"w\") as wf:\n json.dump(pdb_to_smiles, wf)\n\n return", "def get_text_mining_mir_dictionary():\n if logger.getEffectiveLevel() == logging.DEBUG or not os.path.exists(OUT_MIR_ALIAS_FILE):\n __create_mir_alias_dictionary__()\n\n mir_alias_to_identifier = {}\n with gzip.open(OUT_MIR_ALIAS_FILE, 'rb') as mir_alias_file:\n for line in mir_alias_file:\n tax_id, mir_id, mir_alias = line.rstrip('\\r\\n').split('\\t')\n mir_alias_to_identifier[(tax_id, mir_alias)] = mir_id\n return mir_alias_to_identifier", "def map_protein_to_go(filename):\n\n try:\n with open(filename) as go_association_file:\n go_association = go_association_file.read()\n split_go_association = re.split(r\"\\n\", go_association)\n\n # Ignore the general file information, which is the line starting\n # with \"!\"\".\n go_association_info = []\n for line in split_go_association:\n if line and not line.startswith(\"!\"):\n go_association_info.append(line)\n\n # Declare the tuple to parse the protein and go term as a pair and\n # store it in the set to avoid duplicate situation\n go_protein_dict = {}\n for column in go_association_info:\n column_info = re.split(r\"\\t\", column)\n protein_id = column_info[1]\n go_term = column_info[4]\n\n if protein_id in go_protein_dict:\n go_protein_dict[protein_id].add(go_term)\n else:\n go_protein_dict[protein_id] = {go_term}\n return go_protein_dict\n\n except FileNotFoundError:\n return {}", "def preprocess_corpus(train_sents):\n global lookupLexiconDict\n lookupLexiconDict = {}\n \n lexiconDir = getcwd()+'\\\\data\\\\lexicon'\n filesList = [hfile for hfile in listdir(lexiconDir) if path.isfile(lexiconDir+'\\\\'+hfile) ]\n \n decision_tags = ['facility','product','musicartist']\n fileMappingDict = \\\n {\n 'architecture.museum':'facility',\n 'automotive.make':'product',\n 'automotive.model':'product',\n 'award.award':'musicartist',\n 'base.events.festival_series':'geo-loc',\n #'bigdict':'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@',\n 'book.newspaper':'company',\n 'broadcast.tv_channel':'tvshow',\n 'business.brand':'company',\n 'business.consumer_company':'company',\n 'business.consumer_product':'product',\n 'business.sponsor':'company',\n 'cap.1000':'geo-loc',\n 'cvg.computer_videogame':'product',\n 'cvg.cvg_developer':'company',\n 'cvg.cvg_platform':'product',\n 'education.university':'facility',\n 'english.stop':'O',\n 'firstname.5k':'person',\n 'government.government_agency':'company',\n 'internet.website':'company',\n 'lastname.5000':'person',\n 'location':'geo-loc',\n 'location.country':'geo-loc',\n 'lower.5000':'O',\n 'people.family_name':'person',\n 'people.person':'person',\n 'people.person.lastnames':'person', # <-----------------------------\n 'product':'product',\n 'sports.sports_league':'sportsteam',\n 'sports.sports_team':'sportsteam',\n 'time.holiday':'O',\n 'time.recurring_event':'O',\n 'transportation.road':'geo-loc',\n 'tv.tv_network':'tvshow',\n 'tv.tv_program':'tvshow',\n 'venture_capital.venture_funded_company':'company',\n 'venues':'geo-loc'\n }\n\n for lexFile in filesList:\n if lexFile not in fileMappingDict: continue\n print 'Processing ', lexFile\n \n with open(lexiconDir+'\\\\'+lexFile) as f:\n for line in f:\n line = line.lower().split()\n if len(line) == 1: low=0\n else:low=1\n for i in range(low,len(line)):\n key = tuple(line[:i+1])\n if key not in lookupLexiconDict:\n lookupLexiconDict[key] = [fileMappingDict[lexFile]]\n else:\n lookupLexiconDict[key].append(fileMappingDict[lexFile]) \n\n \n #pass ", "def load_nwsli(txn):\n sql = \"\"\"SELECT nwsli, river_name as r, \n proximity || ' ' || name || ' ['||state||']' as rname \n from hvtec_nwsli\"\"\"\n txn.execute(sql)\n for row in txn:\n nwsli_dict[ row['nwsli'] ] = {\n 'rname': (row['r']).replace(\"&\",\" and \"), \n 'river': (row['rname']).replace(\"&\",\" and \") }\n\n log.msg(\"nwsli_dict is loaded...\")", "def build_dict(fname):\n\t\n\twith open(fname) as file:\n\n\t\tword_count_dict = {}\n\n\t\tfor line in file:\n\t\t\tline = line.rstrip()\n\t\t\tline =line.split(' ')\n\t\t\tfor word in line:\n\t\t\t\tword = word.strip('\"!.,?_;():')\n\t\t\t\tword = word.lower()\n\t\t\t\tword_count_dict[word] = word_count_dict.get(word, 0) + 1\n\t\t#return word_count_dict\n\n\t\tfor each in word_count_dict:\n\t\t\tcount = word_count_dict[each]\n\t\t\tprint(each, count)\n\n\t\treturn", "def load_label_map(location=\"configs/label_map.txt\"):\n ret = dict()\n num_class = 0\n with open(location) as f:\n for line in f:\n line = line.strip('\\n')\n index, relation = line.split(' ')\n ret[relation] = int(index)\n ret[int(index)] = relation\n num_class += 1\n return ret", "def NOAD_to_wordnet(data):\r\n NOAD_to_wordnet = {}\r\n with open(algorithmic_map, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n noad, wordnet = line.split()\r\n NOAD_to_wordnet[noad] = wordnet\r\n with open(manual_map, 'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n noad, wordnet = line.split()\r\n NOAD_to_wordnet[noad] = wordnet\r\n \r\n count = 0\r\n for elem in data: \r\n if elem[\"is_target\"]:\r\n if elem[\"sense\"] not in NOAD_to_wordnet:\r\n count += 1\r\n continue\r\n noad_sense = elem[\"sense\"]\r\n elem[\"sense\"] = NOAD_to_wordnet[noad_sense]\r\n print(\"NOAD sense not in mapping text: %d\" %count)\r\n return data", "def mkwrddct(inputfile):\n fin = open(inputfile)\n words = dict()\n for line in fin:\n w = line.strip()\n words[w] = w\n return words", "def _new_wos_dict():\n wos_dict = {\n 'DI': None,\n 'TI': None,\n 'PY': None,\n 'SO': None,\n 'UT': None,\n 'DE': None,\n }\n\n return wos_dict", "def produce_uniprotID_dict(inPath, spList, outPath):\n with open(spList, 'r') as f:\n swissProtIDs = set(f.read().split())\n with open(inPath, 'r') as fIn:\n idMap = {}\n for line in fIn:\n uniprotID, otherIDtype, otherID = line.strip().split('\\t')\n if uniprotID in swissProtIDs:\n if otherIDtype == 'Gene_Name':\n otherID = otherID.upper()\n idMap[otherID] = uniprotID\n with open(outPath, 'wb') as fOut:\n pickle.dump(idMap, fOut)", "def new_counts_dict():\n\n\tIN_FILES = [\"../_semtag_dataset_webanno_tfidf_inimigo.txt\",\"../_semtag_dataset_webanno_tfidf_publico.txt\" ]\n\n\ttxt = []\n\tfor in_file in IN_FILES:\n\t with codecs.open(in_file,\"r\",\"utf-8\") as fid:\n\t txt += fid.readlines()\n\t#words\n\twords = [w for m in txt for w in m.split()]\n\t#unique words\n\twords = list(set(words))\n\t#word index\n\twrd2idx = {w:-1 for w in words}\n\n\tset_trace()\n\t\n\twith open(COUNTS_DIC,\"w\") as fod:\n\t\tcPickle.dump(wrd2idx, fod, cPickle.HIGHEST_PROTOCOL)", "def syllable_dict():\n counts = dict()\n \n with open('data/Syllable_dictionary.txt') as file:\n for line in file:\n arr = line.split(' ', 1)\n if 'E' in arr[1]:\n cts = arr[1].split(' ', 1)\n counts[arr[0].strip('\\'')] = int(cts[1][0])\n counts[(arr[0].strip('\\'') + \"_\")] = int(cts[0][1])\n else:\n counts[arr[0].strip('\\'')] = int(arr[1][0])\n return counts", "def test_mapping_file_to_dict(self):\r\n s1 = ['#sample\\ta\\tb', '#comment line to skip',\r\n 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n mapres = parse_mapping_file(s1) # map_data, header, comments\r\n mapdict = mapping_file_to_dict(*mapres[:2])\r\n expdict = {'x': {'a': 'y', 'b': 'z'}, 'i': {'a': 'j', 'b': 'k'}}\r\n self.assertEqual(mapdict, expdict)", "def process_n_hmmer_output(file_name):\n dict = {}\n with file_open(file_name, \"r\") as f:\n for l in f:\n if not l.startswith(\"#\"):\n field = l.split()\n target_name = field[0]\n ali_from = int(field[6])\n ali_to = int(field[7])\n sq_len = field[10]\n # flip the co-ordinates start position is bigger then the stop position\n if ali_from > ali_to:\n ali_from = int(field[7])\n ali_to = int(field[6])\n\n if not target_name in dict:\n dict[target_name] = [sq_len, ali_from, ali_to]\n else:\n if dict[target_name][1] > ali_from:\n dict[target_name][1] = ali_from\n if dict[target_name][2] < ali_to:\n dict[target_name][2] = ali_to\n return dict", "def build(filename=\"JMdict_e.gz\", output_filename=DATABASE_FILENAME):\n # NOTE: The JMdict XML file contains XML entities, that are expanded when\n # parsed using Python's stdlib xml.etree.ElementTree like so:\n # ElementTree.parse(f). That is undesired behavior for our use-case. Oshi\n # needs to parse the short entity string, for example &adj-i; should be\n # \"adj-i\" instead of \"adjective (keiyoushi)\". That's why it uses an external\n # xml parser: lxml that allows you to specify whether to expand entites.\n extension = path.splitext(filename)[1].lower()\n parser = etree.XMLParser(resolve_entities=False)\n if extension == \".gz\":\n with gzip.open(filename) as f:\n tree = etree.parse(f, parser)\n elif extension == \".xml\":\n tree = etree.parse(filename, parser)\n else:\n raise ValueError(\"File extension not supported: \" + extension)\n\n entries = []\n # variables starting with x contain xml element(s)\n for xentry in tree.getroot():\n entry = {}\n entry[\"writings\"] = [x.find('keb').text for x in xentry.findall('k_ele')]\n entry[\"readings\"] = [x.find('reb').text for x in xentry.findall('r_ele')]\n xsenses = xentry.findall('sense')\n senses = []\n # last_tags will contain a reference to previously found tags (JMdict\n # specifies that when pos is empty, the previous one should be used)\n last_tags = []\n for xsense in xsenses:\n tags = []\n xtags = xsense.findall('pos') # + xsense.findall('misc')\n for xtag in xtags:\n match = re.search(r'&([\\w-]+?);', etree.tostring(xtag, encoding=\"utf-8\").decode('utf-8') or \"\")\n if match: tags.append(match.group(1))\n glosses = [x.text for x in xsense.findall('gloss')]\n senses.append({\"glosses\": glosses, \"tags\": tags or last_tags})\n last_tags = tags or last_tags\n entry[\"senses\"] = senses\n entries.append(entry)\n with open(output_filename, 'w', encoding='utf-8') as f:\n json.dump(entries, f, ensure_ascii=False)", "def dict() -> Dict[str, Pin]:", "def convert_to_assoc(input_filename, output_filename):\n out_stream = codecs.open(output_filename, 'w', encoding='utf-8')\n \n for info in read_json_stream(input_filename):\n startc = reduce_concept(info['start'])\n endc = reduce_concept(info['end'])\n rel = info['rel']\n weight = info['weight']\n\n if 'dbpedia' in info['sources'] and '/or/' not in info['sources']:\n # DBPedia associations are still too numerous and too weird to\n # associate.\n continue\n\n pairs = []\n if startc == '/c/en/person':\n if rel == '/r/Desires':\n pairs = [('/c/en/good', endc), ('/c/en/bad/neg', endc)]\n elif rel == '/r/NotDesires':\n pairs = [('/c/en/bad', endc), ('/c/en/good/neg', endc)]\n else:\n pairs = [(startc, endc)]\n elif startc == '/c/zh/人':\n if rel == '/r/Desires':\n pairs = [('/c/zh/良好', endc), ('/c/zh/不良/neg', endc)]\n elif rel == '/r/NotDesires':\n pairs = [('/c/zh/良好/neg', endc), ('/c/zh/不良', endc)]\n else:\n pairs = [(startc, endc)]\n else:\n negated = (rel.startswith('/r/Not') or rel.startswith('/r/Antonym'))\n if not negated:\n pairs = [(startc, endc)]\n else:\n pairs = [(startc, endc + '/neg'), (startc + '/neg', endc)]\n\n for (start, end) in pairs:\n line = \"%(start)s\\t%(end)s\\t%(weight)s\" % {\n 'start': start,\n 'end': end,\n 'weight': weight,\n }\n print(line, file=out_stream)", "def load_cmudict():\n with open(\"text/en/cmudict-0.7b.txt\", encoding=\"ISO-8859-1\") as file_reader:\n cmudict = (line.strip().split(\" \") for line in islice(file_reader, 126, 133905))\n\n cmudict = {format_alt_entry(word): pronunciation for word, pronunciation in cmudict}\n\n return cmudict", "def data_petrol_stations():\n petrol_stations = {}\n with codecs.open('azs.txt', 'r', encoding='UTF-8') as file_in:\n for string in file_in.readlines():\n string = string.split()\n station_number = int(string[0])\n queue_length = int(string[1])\n petrol_stations[station_number] = {}\n petrol_stations[station_number]['queue'] = queue_length\n petrol_stations[station_number]['kinds'] = string[2:]\n\n return petrol_stations", "def create_dictionary(file_dir):\r\n\tword_list = []\r\n\tfile_list = read_files(file_dir, \"lab\") # step 7\r\n\tfor file in file_list:\r\n\t\twith open(file, 'r') as f:\r\n\t\t\ttext = f.read()\r\n\t\tword_list = store_to_dictionary(text, word_list) # step 8cii\r\n\tmake_dictionary_file(file_dir, word_list) # step 9\r", "def parse_prnu_file():\n hdf_name = r'C:\\Users\\nmishra\\Workspace\\TEMPO\\PRNU_map\\\n batch_2017Jun20_TEMPO_PRNU_-20Tccd__46Tfpe_3pixSpectral_3pixSpatial.h5'\n file = h5py.File(hdf_name, 'r')\n prnu = file.get('prnu')\n prnu = np.array(prnu).transpose()\n quad_d = prnu[2:1030, 10:1034]\n quad_c = prnu[2:1030, 1078:2102]\n quad_a = prnu[1062:2090, 10:1034]\n quad_b = prnu[1062:2090, 1078:2102]\n prnu_map_lower = np.concatenate((quad_d, quad_c), axis=1)\n prnu_map_upper = np.concatenate((quad_a, quad_b), axis=1)\n prnu_map = np.concatenate((prnu_map_lower, prnu_map_upper), axis=0)\n return prnu_map", "def parseDuMap(output):\n #z00du00(DB-SL-MSL-CH-SCH) : 00-00-0-0-0 01-01-0-0-0 04-04-2-0-0 05-05-2-0-0\n # 02-02-1-1-0 03-03-1-1-0 02-02-1-0-0 03-03-1-0-0\n duMap = {}\n for l in output:\n \n l_a = l.split(\":\")\n #print l_a\n #sys.exit(1)\n du = l_a[0]\n # string of 00-00-0-0-0 01-01-0-0-0\n sbChs = l_a[1]\n \n #z00du00(DB-SL-MSL-CH-SCH)\n # get 0 and from z00du0 9\n partDu = getDuPart(du)\n \n sbChArr = getAllSlChSbCh(sbChs)\n \n duMap[partDu] = sbChArr\n \n \n return duMap", "def __getUniprotChainMapping(self, siftsSummaryDirPath, csvFileName):\n #\n #\n fp = os.path.join(siftsSummaryDirPath, csvFileName)\n rowDL = self.__readSiftsSummaryFile(fp)\n logger.info(\"Length of SIFTS UniProt summary file %s %d\", csvFileName, len(rowDL))\n logger.debug(\"%r\", list(rowDL[0].items()))\n uD = {}\n # uIdD = {}\n for rowD in rowDL:\n entryId = rowD[\"PDB\"]\n chainId = rowD[\"CHAIN\"]\n unpId = rowD[\"SP_PRIMARY\"]\n #\n entitySeqBeg = int(rowD[\"RES_BEG\"]) if rowD[\"RES_BEG\"].isdigit() else None\n entitySeqEnd = int(rowD[\"RES_END\"]) if rowD[\"RES_END\"].isdigit() else None\n entityLength = entitySeqEnd - entitySeqBeg + 1\n # authSeqBeg = int(rowD[\"PDB_BEG\"]) if rowD[\"PDB_BEG\"].isdigit() else None\n # authSeqEnd = int(rowD[\"PDB_END\"]) if rowD[\"PDB_END\"].isdigit() else None\n unpSeqBeg = int(rowD[\"SP_BEG\"]) if rowD[\"SP_BEG\"].isdigit() else None\n unpSeqEnd = int(rowD[\"SP_END\"]) if rowD[\"SP_END\"].isdigit() else None\n # dD = {\"UP\": unpId, \"BG\": entitySeqBeg, \"ND\": entitySeqEnd, \"AUBG\": authSeqBeg, \"AUND\": authSeqEnd, \"UBG\": unpSeqBeg, \"UND\": unpSeqEnd}\n # dD = {\"UP\": unpId, \"BG\": entitySeqBeg, \"UBG\": unpSeqBeg, \"LEN\": entityLength}\n dD = {\"UP\": unpId, \"BG\": entitySeqBeg, \"LEN\": entityLength, \"UBG\": unpSeqBeg, \"UND\": unpSeqEnd}\n uD.setdefault(entryId.upper(), {}).setdefault(chainId, {}).setdefault(\"UNPAL\", []).append(dD)\n uD.setdefault(entryId.upper(), {}).setdefault(chainId, {}).setdefault(\"UNPID\", []).append(unpId)\n #\n logger.info(\"UniProt mapping for %d entries\", len(uD))\n # -----\n return uD", "def read_mapping_file(map_file):\n new_name_old_name = {}\n with open(map_file, 'r') as z:\n for line in z:\n clean = line.strip().split()\n\n new_name_old_name[clean[1]] = clean[0]\n\n return new_name_old_name", "def convert(self) -> dict:\n def get_triples_linker():\n \"\"\"\n Retrieve all of the triples linking phrases from the AMR object.\n We use the Framenet words that are found in the AMR Object as the linker.\n \"\"\"\n triples_linkers = []\n concepts = list(self.amr_obj.concepts())\n\n # Retrieve all concept that has the word ARG in it\n for concept in concepts:\n triple = self.amr_obj.triples(head=concept[0])\n items = [item for item in triple if 'ARG' in item[1]]\n if len(items) > 0:\n triples_linkers.append(triple)\n return triples_linkers\n\n def generate_triples():\n\n def fixing_annotation(key, n):\n \"\"\"\n Fixing some inconsistency in the annotation\n \"\"\"\n if key + '.' + n not in self.propbank:\n key = key.replace('-', '_')\n return key + '.' + n\n\n def is_agent(f_rel, rel_var):\n \"\"\"\n Checking whether the role is an agent (denoted by 'pag') or not\n \"\"\"\n # TODO: beside 'pag' is there any other role?\n m = re.match(r'(.*)-(\\d*)$', rel_var)\n key = m.group(1)\n n = m.group(2)\n\n # some annotation does not have the correspondence frameset, just put false if found\n if n == '00':\n return False\n\n concept = fixing_annotation(key, n)\n roleset = self.propbank[concept]\n\n m = re.match(r':ARG(.).*', f_rel[1])\n n = int(m.group(1))\n roles = roleset.getElementsByTagName('role')\n\n for role in roles:\n if dict(role.attributes)['n'].value == str(n) and dict(role.attributes)['f'].value.lower() == 'pag':\n return True\n return False\n\n # Case 1: ARG\n for triple_linker in self.triples_linkers:\n triple = [None, triple_linker[0][0], []]\n for rel in triple_linker:\n if 'ARG' in rel[1] and 'of' not in rel[1]:\n # check whether the propbank verb rel[0] and its argument rel[2] is an agent or not\n if is_agent(rel, self.var2c[rel[0]].__str__()):\n triple[0] = rel[2]\n else:\n triple[2].append(rel[2])\n if not (triple[0] is None and triple[2] == []):\n self.triples[triple[1]] = triple\n\n # Case 2: ARG-of\n for triple_linker in self.triples_linkers:\n for rel in triple_linker:\n if 'ARG' in rel[1] and 'of' in rel[1]:\n if rel[2] not in self.triples:\n self.triples[rel[2]] = [None, rel[2], []]\n if is_agent(rel, self.var2c[rel[2]].__str__()):\n self.triples[rel[2]][0] = rel[0]\n else:\n self.triples[rel[2]][2].append(rel[0])\n return self.triples\n\n self.triples_linkers = get_triples_linker()\n return generate_triples()", "def get_sample_info(lines):\r\n mapping_data, header, comments = parse_mapping_file(lines)\r\n labels = [\"from\", \"to\", \"eweight\", \"consensus_lin\"]\r\n node_labels = [\"node_name\", \"node_disp_name\", \"ntype\", \"degree\",\r\n \"weighted_degree\", \"consensus_lin\"]\r\n cat_by_sample = {}\r\n sample_by_cat = defaultdict(list)\r\n meta_dict = {}\r\n category_labels = header[1:-1]\r\n labels.extend(category_labels)\r\n node_labels.extend(category_labels)\r\n label_list = [[] for c in category_labels]\r\n for r in mapping_data:\r\n categories = r[0:len(category_labels) + 1]\r\n sample = categories[0]\r\n meta_dict[sample] = ['\\t'.join(categories[1:]), 0]\r\n\r\n cat_by_sample[sample] = [(l.strip(), c.strip())\r\n for l, c in zip(category_labels, categories[1:])]\r\n\r\n cat_list = []\r\n for i, (l, c) in enumerate(zip(category_labels, categories[1:])):\r\n if c not in label_list[i]:\r\n label_list[i].append(c)\r\n l = l.strip()\r\n c = c.strip()\r\n cat_list.append((l, c))\r\n sample_by_cat[(l, c)].append(sample)\r\n\r\n cat_by_sample[sample] = cat_list\r\n\r\n return cat_by_sample, sample_by_cat, len(category_labels), meta_dict,\\\r\n labels, node_labels, label_list", "def createDictionary(self):\n\t\tdictionary: dict = {}\n\t\tdictionary.update({'deckname': self.mDeckName})\n\t\tdictionary.update({'filename': self.autoFilename})\n\t\tdictionary.update({'creatorname': str(self.mCreatorname)})\n\t\tdictionary.update({'maxAttrPoints': str(self.mMaxAttributePoints)})\n\t\tminionListDict: dict = {}\n\t\tfor minion in self.mMinionSet:\n\t\t\tminionDict: dict = {}\n\t\t\tminionDict.update({'minionName': str(minion.mMinionName)})\n\t\t\tminionDict.update({'attack': str(minion.mAttackPoints)})\n\t\t\tminionDict.update({'hp': str(minion.mHealthPoints)})\n\t\t\tskillList: list = minion.mSkills\n\t\t\tskillNames: list = []\n\t\t\tfor skill in skillList:\n\t\t\t\tskillNames.append(skill.mSkillName)\n\t\t\tminionDict.update({'skills': skillNames})\n\t\t\tminionListDict.update({minion.mMinionName: minionDict})\n\t\tdictionary.update({'minions': minionListDict})\n\t\tdictionary.update({'id' : hash(str(dictionary))}) # TODO LPO: let DB handle that\n\t\tself.mDeckDict = dictionary\n\t\treturn dictionary", "def create_model_owc(text: str) -> Dict[str, Set[str]]:\n dict_so_far = {}\n list_of_words = str.split(text)\n\n\n for x in range(0, len(list_of_words)):\n \"\"\"\n check if the word is followed by a period and add it to the follow list if it is, then remove the period to \n check if the word is followed by something else\n \"\"\"\n if list_of_words[x][-1] == '.':\n list_of_words[x] = list_of_words[x][0:-1]\n update_follow_set(dict_so_far, list_of_words[x], '.')\n\n else:\n update_follow_set(dict_so_far, list_of_words[x], list_of_words[x + 1].rstrip('.'))\n return dict_so_far", "def create_word_map(tokenized_descriptions_file_path, word_dictionary_output_path):\n if os.path.exists(word_dictionary_output_path):\n print(\"Word map already exists in workspace. Will be reused.\")\n return\n\n print(\"Word map not found. Generating....\")\n\n words_list = []\n words_to_id = {}\n\n with open(tokenized_descriptions_file_path, 'r') as file:\n for line in file:\n tokens = line.strip().split(\",\")\n words_list.extend(tokens[1:])\n\n # remove duplicate words\n words_list = list(set(words_list))\n\n # sorting the words\n words_list = sorted(words_list)\n for i in range(len(words_list)):\n words_to_id[words_list[i]] = i\n\n with open(word_dictionary_output_path, 'w') as f:\n [f.write('{0},{1}'.format(key, value) + \"\\n\") for key, value in words_to_id.items()]", "def map2mw_Des(d,k1,entry):\n if k1 in map2mw_special_Des:\n return map2mw_special_Des[k1]\n regexes = [\n u'<ab>dés.</ab> de {%(.*?)%}',\n u'<ab>dés.</ab> {%(.*?)%}',\n u'<ab>dés.</ab> du <ab>c.</ab> de {%(.*?)%}',\n\n ]\n line = entry.datalines[0] # first line of entry in bur.txt\n for regex in regexes:\n m = re.search(regex,line)\n if m:\n root = m.group(1) # root in \n root_slp1=roman_slp1_mw(root,'verb',d)\n if root_slp1 != None:\n return root_slp1\n\n return '?'", "def preprocess_oracle(self):\n self.morpheme_to_id, self.max_morph_per_word = self.build_oracle_vocab()\n self.subword_vocab_size = len(self.morpheme_to_id)\n with open(self.sub_vocab_file, 'wb') as f:\n pickle.dump((self.morpheme_to_id, self.max_morph_per_word), f)", "def get_mapping():\n \n import pandas as pd\n data = pd.read_csv('/home/yuheng/Downloads/ADE20K_2016_07_26/objectInfo150.txt',sep='\\t',lineterminator='\\n') \n mapping = {}\n for i in range(150):\n line = data.loc[i]\n mapping[ int(line['Idx']) ] = line['Name']\n \n return mapping", "def build_dictionary_gensim():\r\n\t# if load_dictionary_gensim():\r\n\t#\treturn\r\n\t\r\n\tglobal gensim_dictionary, common_corpus_list\r\n\t\r\n\tprint('\\nbuilding dictionary')\r\n\tgensim_dictionary = gensim.corpora.Dictionary()\r\n\t\r\n\tfor v in common_corpus_list:\r\n\t\tgensim_dictionary.add_documents([v[1].lower().split()])\r\n\t\t\r\n\tgensim_dictionary.save_as_text(paths.path_data_dictionary_txt)\r\n\tgensim_dictionary.save(paths.path_data_dictionary_dict)\r\n\r\n\t# print(gensim_dictionary.token2id)\r\n\tprint(gensim_dictionary)", "def define_info_dict():\n\n d = {\n \"PRED\": {\n \"COLUMN\": [\"predicted_class\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"Predicted class: somatic, germline, artifact\",\n },\n \"PROB\": {\n \"COLUMN\": [\"prob_s\", \"prob_g\", \"prob_a\"],\n \"Number\": \"3\",\n \"Type\": \"Float\",\n \"Description\": \"Prediction probability of \"\n \"being somatic, germline, artifact in this order\",\n },\n \"SNP\": {\n \"COLUMN\": [\"is_on_db\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Present on SNP database (modified dbSNP/gnomAD (default) or user-provided database)\",\n },\n \"ANNO\": {\n \"COLUMN\": [\"annotation\"],\n \"Number\": \".\",\n \"Type\": \"String\",\n \"Description\": \"Indel annotation formatted as \"\n \"GeneSymbol|RefSeqAccession|CodonPos|IndelEffect\"\n \"Delimited by comma for multiple isoforms\",\n },\n \"COSMIC_CNT\": {\n \"COLUMN\": [\"cosmic_cnt\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"COSMIC count in v89\",\n },\n \"MAXMAF\": {\n \"COLUMN\": [\"max_maf\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Maximum minor allele frequency (MAF) \"\n \"reported in dbSNP, ClinVar and gnomAD non-cancer population\",\n },\n \"COMMON\": {\n \"COLUMN\": [\"is_common\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Common in dbSNP or MAXMAF > 0.01\",\n },\n \"CLIN\": {\n \"COLUMN\": [\"clin_info\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"ClinVar annotation formatted as ClinicalSignificance|Condition\",\n },\n \"ICP\": {\n \"COLUMN\": [\"indel_complexity\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Indel complexity: mismatches around the indel measured by edit distance\",\n },\n \"DSM\": {\n \"COLUMN\": [\"dissimilarity\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Dissimilarity: edit distance between indel and flanking sequences\",\n },\n \"ISZ\": {\n \"COLUMN\": [\"indel_size\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Indel size\",\n },\n \"REP\": {\n \"COLUMN\": [\"repeat\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Repeat: count of the indel-sequence repeats in flanking region\",\n },\n \"UQM\": {\n \"COLUMN\": [\"is_uniq_mapped\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Supported by uniquely mapped reads\",\n },\n \"NEB\": {\n \"COLUMN\": [\"is_near_boundary\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Near exon boundary\",\n },\n \"EQX\": {\n \"COLUMN\": [\"equivalence_exists\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Equivalent alignments exist for the indel\",\n },\n \"BID\": {\n \"COLUMN\": [\"is_bidirectional\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Supported by forward and reverse reads\",\n },\n \"MTA\": {\n \"COLUMN\": [\"is_multiallelic\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Multialleleic\",\n },\n \"FRM\": {\n \"COLUMN\": [\"is_inframe\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"In-frame indel\",\n },\n \"SPL\": {\n \"COLUMN\": [\"is_splice\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Located in splice region\",\n },\n \"TRN\": {\n \"COLUMN\": [\"is_truncating\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Truncating indel\",\n },\n \"CDD\": {\n \"COLUMN\": [\"is_in_cdd\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Located in conserved domain\",\n },\n \"LOC\": {\n \"COLUMN\": [\"indel_location\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Relative indel location within the transcript coding region\",\n },\n \"NMD\": {\n \"COLUMN\": [\"is_nmd_insensitive\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Insensitive to nonsense mediated decay\",\n },\n \"IPG\": {\n \"COLUMN\": [\"ipg\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Indels per gene\",\n },\n \"LEN\": {\n \"COLUMN\": [\"cds_length\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Coding sequence length. Median value if multiple isoforms exist\",\n },\n \"LC\": {\n \"COLUMN\": [\"lc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Linguistic complexity: diversity of k-mers in flanking 50-bp region\",\n },\n \"LLC\": {\n \"COLUMN\": [\"local_lc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local linguistic complexity: diversity of k-mers in flanking 6-bp region\",\n },\n \"GC\": {\n \"COLUMN\": [\"gc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"GC-content in flanking 50-bp region\",\n },\n \"LGC\": {\n \"COLUMN\": [\"local_gc\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local GC-content in flanking 6-bp region\",\n },\n \"SG\": {\n \"COLUMN\": [\"strength\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"DNA bond strength of 2-mers in flanking 50-bp region\",\n },\n \"LSG\": {\n \"COLUMN\": [\"local_strength\"],\n \"Number\": \"1\",\n \"Type\": \"Float\",\n \"Description\": \"Local DNA bond strength of 2-mers in flanking 6-bp region\",\n },\n \"INS\": {\n \"COLUMN\": [\"is_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Insertion\",\n },\n \"ATI\": {\n \"COLUMN\": [\"is_at_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single insertion of A or T\",\n },\n \"ATD\": {\n \"COLUMN\": [\"is_at_del\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single deletion of A or T\",\n },\n \"GCI\": {\n \"COLUMN\": [\"is_gc_ins\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single insertion of G or C\",\n },\n \"GCD\": {\n \"COLUMN\": [\"is_gc_del\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Single deletion of G or C\",\n },\n \"ALTC\": {\n \"COLUMN\": [\"alt_count\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Alt count: count of unique reads supporting ALT allele\",\n },\n \"REFC\": {\n \"COLUMN\": [\"ref_count\"],\n \"Number\": \"1\",\n \"Type\": \"Integer\",\n \"Description\": \"Ref count: count of unique reads supporting REF allele\",\n },\n \"RCF\": {\n \"COLUMN\": [\"reclassified\"],\n \"Number\": \"0\",\n \"Type\": \"Flag\",\n \"Description\": \"Reclassification applied\",\n },\n \"RQB\": {\n \"COLUMN\": [\"filtered\", \"rescued\"],\n \"Number\": \"1\",\n \"Type\": \"String\",\n \"Description\": \"Indel used to rescue this entry formatted as CHROM:POS:REF:ALT\",\n },\n }\n\n return d", "def cmu_to_json(p_in, p_out):\n\td = {}\n\tf_in = open(p_in, 'r')\n\tfor line in f_in:\n\t\tif not line[0].isalpha():\n\t\t\tcontinue\n\t\tword, syls = line.split(' ')\n\t\tnum = num_syls(syls)\n\t\tlast = last_syl_perfect(syls)\n\t\tif num not in d:\n\t\t\td[num] = {}\n\t\tif last not in d[num]:\n\t\t\td[num][last] = []\n\t\td[num][last].append(word.lower())\n\tf_in.close()\n\n\t# Save the dictionary to the output file in JSON\n\tf_out = open(p_out, 'w')\n\tjson.dump(d, f_out, separators=(',', ':'))\n\tf_out.close()", "def get_snps(self):\n d = {}\n with open(self.snp_file, 'r') as infile:\n for row in infile:\n if row:\n row_split = row.strip().split('\\t')\n chrom = row_split[0]\n pos = row_split[1]\n name = row_split[3].split('|')\n snp_id = name[0]\n gene = name[1]\n ref_allele = name[2]\n alt_alleles = name[3]\n freq = name[4]\n genome = name[5]\n d[snp_id] = {\n 'chrom': chrom,\n 'pos': pos,\n 'ref': ref_allele,\n 'alt': alt_alleles,\n 'gene': gene,\n 'maf': freq,\n 'genome_build': genome\n }\n return d", "def getSHSIDDict():\n m = {}\n fin = open(\"SHSDataset/Chromas/msd_keys_mapping.cly\")\n for l in fin.readlines():\n l = l.rstrip()\n f = l.split(\",\")\n m[f[0]] = int(f[1])\n fin.close()\n return m", "def set_country_populations_dict():\n countries=country_populations.split('\\n')\n for country in countries:\n country_data= country.split('\\t')\n name= country_data[1]\n pop_2017= country_data[5]\n percentage= country_data[6]\n country_populations_dict.update({name:(pop_2017,percentage)})\n return country_populations_dict", "def uniprot_txt_parser(uniprot_txt_lines):\n uniprot = {}\n entry_line = [i for i,l in enumerate(uniprot_txt_lines) if l[:2]=='ID']\n entry_line.append(len(uniprot_txt_lines))\n begin_end = [(begin,entry_line[i+1]) for i,begin in enumerate(entry_line[:-1])]\n for begin,end in begin_end:\n for line in uniprot_txt_lines[begin:end]:\n line = line.rstrip('\\r\\n')\n line = line.rstrip('.')\n line = line.replace(';',' ')\n words = line.split()\n if words[0] == 'AC':\n acc = words[1]\n uniprot[acc] = {}\n elif words[0] == 'DR' and words[1] =='InterPro':\n if uniprot[acc].has_key('interpro'):\n uniprot[acc]['interpro'].append((words[2],1))\n else:\n uniprot[acc]['interpro'] = [(words[2],1)]\n elif words[0] == 'DR' and words[1] == 'Pfam':\n if uniprot[acc].has_key('pfam'):\n uniprot[acc]['pfam'].append((words[2],int(words[-1])))\n else:\n uniprot[acc]['pfam'] = [(words[2],int(words[-1]))]\n elif words[0] == 'DR' and words[1] == 'SMART':\n if uniprot[acc].has_key('smart'):\n uniprot[acc]['smart'].append((words[2],words[-1]))\n else:\n uniprot[acc]['smart'] = [(words[2],words[-1])]\n elif words[0] == 'DR' and words[1] == 'SUPFAM':\n if uniprot[acc].has_key('supfam'):\n uniprot[acc]['supfam'].append((words[2],words[-1]))\n else:\n uniprot[acc]['supfam'] = [(words[2],words[-1])]\n elif words[0] == 'DR' and words[1] == 'PROSITE':\n if uniprot[acc].has_key('prosite'):\n uniprot[acc]['prosite'].append((words[2],words[-1]))\n else:\n uniprot[acc]['prosite'] = [(words[2],words[-1])]\n # elif words[0] == 'DR' and words[1] =='PDB':\n # w = words[-1].replace('/',' ')\n # w = w.replace('=',' ')\n # w = w.replace('-',' ')\n # w = w.split()\n # w = words[2:-1]+w\n\n # if uniprot[acc].has_key('pdb'):\n # uniprot[acc]['pdb'].append(w)\n # else:\n # uniprot[acc]['pdb'] = [w]\n\n return uniprot", "def _buildSpecializeMap(cls, namespaces, interwikimap):\n\n from mwlib.lang import languages\n \n res = {}\n\n def reg(name, num):\n name = name.lower()\n if num == namespace.NS_CATEGORY:\n res[name] = (CategoryLink, num)\n elif num == namespace.NS_FILE:\n res[name] = (ImageLink, num)\n else:\n res[name] = (NamespaceLink, num)\n\n for name, num in namespaces.iteritems():\n if isinstance(name, basestring):\n reg(name, num)\n else:\n for n in name:\n reg(n, num)\n\n for prefix, d in interwikimap.items():\n if 'language' in interwikimap[prefix] or prefix in languages:\n res[prefix] = (LangLink, prefix)\n else:\n res[prefix] = (InterwikiLink, d.get('renamed', prefix))\n \n return res", "def create_charmap_dictionary(input_data='nltk', file_path=None, logger=None):\n data_list = fetch_word_list(input_data, file_path, logger)\n\n # Fetch the dictionary\n dict_file_path = fetch_dictionary_information()\n\n # Creating and Persisting the dictionary\n create_dictionary(data_list, dict_file_path, CharacterMap)\n return dict_file_path", "def create_wrs_to_mgrs_lookup(wrs_shapefile):\n\n shapefile_driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n grid_ds = shapefile_driver.Open(wrs_shapefile, 0)\n\n layer = grid_ds.GetLayer()\n\n\n path_row_list = []\n\n total_features = layer.GetFeatureCount()\n\n for idx, f in enumerate(layer):\n\n print(f'{idx} of {total_features}')\n\n footprint = f.GetGeometryRef().ExportToWkt()\n pathrow = f.GetField('PR')\n\n\n mgrs_list = find_mgrs_intersection_large(footprint)\n print(mgrs_list)\n mgrs_list_fine = []\n\n mgrs_list_fine += find_mgrs_intersection_100km(footprint, mgrs_list)\n\n print('for path row')\n print(pathrow)\n print(mgrs_list)\n print(mgrs_list_fine)\n print('\\n\\n')\n path_row_list.append((str(pathrow), ' '.join(mgrs_list_fine)))\n\n with open('wrs_to_mgrs.csv','w', newline='') as out:\n csv_out = csv.writer(out)\n csv_out.writerow(['pathrow','mgrs_list'])\n\n for row in path_row_list:\n csv_out.writerow(row)", "def read_dictionary(filename='/Users/Paul/Documents/c06d.txt'):\n d = dict()\n fin = open(filename)\n for line in fin:\n\n # skip over the comments\n if line[0] == '#': continue\n\n t = line.split()\n word = t[0].lower()\n pron = ' '.join(t[1:])\n d[word] = pron\n\n return d", "def parse_etymology() -> Dict[str, str]:\n with open(PATH_ETYMOLOGY, encoding=\"utf-8\") as f:\n\n buffer = defaultdict(list)\n for line in f:\n line = line.strip()\n\n # Skip empty lines\n if not line:\n continue\n\n # New block\n if line[1] == \" \" and line[2] == \"(\":\n hanzi = line[0]\n else:\n buffer[hanzi].append(line)\n\n result = {}\n for k,v in buffer.items():\n result[k] = \" \".join(v)\n\n return result", "def composeWorkplaceOntology():\n\n import ossPyFuncs \n import pandas as pd\n \n #mysql query to extract full table from government organizations\n #certian table columns feature capital letters which cases uproblems\n postgreSql_selectQuery=\"SELECT * FROM us_gov_manual.us_govman_2019 ;\"\n #pass querry and obtain table\n govTable=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n\n #mysql query to obtain academic instutions\n postgreSql_selectQuery=\"SELECT institution FROM hipolabs.universities ;\"\n #pass querry and obtain table\n univTable=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2018_us1000;\"\n businesses1=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2019_us1000;\"\n businesses2=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2020_global2000;\"\n businesses3=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n\n #combine theinsitutions into a vector\n combinedSeries=[govTable['AgencyName'],univTable['institution'],businesses1['company'],businesses2['company'],businesses3['company']]\n #turn the multi item vector into a single series\n fullWordbank=pd.concat(combinedSeries)\n #turn that series into a pd dataframe\n wordbankTable=pd.DataFrame(fullWordbank.unique())\n\n return wordbankTable", "def dict_initialise(metadata, analysistype):\n for sample in metadata:\n sample[analysistype].dnaseq = dict()\n sample[analysistype].protseq = dict()\n sample[analysistype].ntindex = dict()\n sample[analysistype].aaindex = dict()\n sample[analysistype].ntalign = dict()\n sample[analysistype].aaalign = dict()\n sample[analysistype].aaidentity = dict()\n return metadata", "def _predefined_mapping_tables(dset):\n # for now this is mechanism independent.\n to_airnow = {\n \"OZONE\": \"o3\",\n \"PM2.5\": \"PM2_5_DRY\",\n \"PM10\": \"PM10\",\n \"CO\": \"co\",\n \"SO2\": \"so2\",\n \"NO\": \"no\",\n \"NO2\": \"no2\",\n }\n dset = dset.assign_attrs({\"mapping_tables_to_airnow\": to_airnow})\n return dset", "def produce_geneName_dict(inPath, spList, outPath):\n with open(spList, 'r') as f:\n swissProtIDs = set(f.read().split())\n with open(inPath, 'r') as fIn:\n idMap = {}\n for line in fIn:\n uniprotID, otherIDtype, otherID = line.strip().split('\\t')\n if otherIDtype == 'Gene_Name':\n if uniprotID in swissProtIDs:\n idMap[uniprotID] = otherID.upper()\n with open(outPath, 'wb') as fOut:\n pickle.dump(idMap, fOut)", "def populateDict(string, region, data):\n found = False\n dictionary = {}\n string_norm = norm.normalize_alphabet(string)\n fName = data[\"properties\"][\"cornuData\"][\"toponym_arabic\"]\n fName_norm = norm.normalize_alphabet(fName)\n sName = re.split('،|,',data[\"properties\"][\"cornuData\"][\"toponym_arabic_other\"])\n cornu_reg = data[\"properties\"][\"cornuData\"][\"region_code\"]\n key = ','.join([string] + region.strip().split(\",\"))\n key_norm = ','.join([string_norm] + region.strip().split(\",\"))\n\n if not any(x in dictionary for x in [key, key_norm]): \n if (fuzz.ratio(string_norm , fName) >= 90 or fuzz.ratio(string , fName) >= 90\n or any(x in [fName, fName_norm] for x in [string, string_norm])) \\\n and cornu_reg in region.strip().split(\",\"):\n #print(\"key fName: \", string, \"-\", key)\n dictionary[key] = {}\n dictionary[key]['lat']= data[\"properties\"][\"cornuData\"][\"coord_lat\"]\n dictionary[key]['lon'] = data[\"properties\"][\"cornuData\"][\"coord_lon\"]\n dictionary[key]['region'] = data[\"properties\"][\"cornuData\"][\"region_code\"]\n dictionary[key]['cornuUri'] = data[\"properties\"][\"cornuData\"][\"cornu_URI\"]\n found = True\n\n else:\n for n in sName:\n if (fuzz.ratio(string_norm , n.strip()) >= 90 or fuzz.ratio(string , n.strip()) >= 90 or any(x in [n.strip(), norm.normalize_alphabet(n.strip())] for x in [string, string_norm])) and cornu_reg in region.strip().split(\",\"):\n #print(\"key sName: \", string, \"-\", key)\n dictionary[key] = {}\n dictionary[key]['lat']= data[\"properties\"][\"cornuData\"][\"coord_lat\"]\n dictionary[key]['lon'] = data[\"properties\"][\"cornuData\"][\"coord_lon\"]\n dictionary[key]['region'] = data[\"properties\"][\"cornuData\"][\"region_code\"]\n dictionary[key]['cornuUri'] = data[\"properties\"][\"cornuData\"][\"cornu_URI\"]\n found = True\n break\n\n '''if key not in dictionary and found == False: \n #print(\"not in dic2: \", key)\n dictionary[key] = {}\n dictionary[key]['lat']= \"null\"\n dictionary[key]['lon'] = \"null\"\n dictionary[key]['region'] = region\n dictionary[key]['cornuUri'] = \"null\"'''\n return dictionary", "def total_hpwl(file_name):\r\n\r\n nodes = {}\r\n netsx = {}\r\n netsy = {}\r\n counter = 0\r\n hpwl = 0\r\n\r\n with open(file_name + \".nodes\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if line.split()[0] not in nodes:\r\n nodes[line.split()[0]] = []\r\n nodes[line.split()[0]].append(line.split()[1])\r\n nodes[line.split()[0]].append(line.split()[2])\r\n\r\n with open(file_name + \".pl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n nodes[line.split()[0]].append(line.split()[1])\r\n nodes[line.split()[0]].append(line.split()[2])\r\n\r\n with open(file_name + \".nets\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if \"NetDegree\" in line:\r\n num_of_nodes = int(line.split()[2])\r\n net_name = \"n\" + str(counter)\r\n counter += 1\r\n netsx[net_name] = []\r\n netsy[net_name] = []\r\n elif re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if net_name in netsx:\r\n if len(netsx[net_name]) == 0:\r\n netsx[net_name].append(int(nodes[line.split()[0]][2]))\r\n netsx[net_name].append(int(nodes[line.split()[0]][2]) + int(nodes[line.split()[0]][0]))\r\n\r\n netsy[net_name].append(int(nodes[line.split()[0]][3]))\r\n netsy[net_name].append(int(nodes[line.split()[0]][3]) + int(nodes[line.split()[0]][1]))\r\n else:\r\n if int(nodes[line.split()[0]][2]) < netsx[net_name][0]:\r\n netsx[net_name][0] = int(nodes[line.split()[0]][2])\r\n\r\n if int(nodes[line.split()[0]][2]) + int(nodes[line.split()[0]][0]) > netsx[net_name][1]:\r\n netsx[net_name][1] = int(nodes[line.split()[0]][2]) + int(nodes[line.split()[0]][0])\r\n\r\n if int(nodes[line.split()[0]][3]) < netsy[net_name][0]:\r\n netsy[net_name][0] = int(nodes[line.split()[0]][3])\r\n\r\n if int(nodes[line.split()[0]][3]) + int(nodes[line.split()[0]][1]) > netsy[net_name][1]:\r\n netsy[net_name][1] = int(nodes[line.split()[0]][3]) + int(nodes[line.split()[0]][1])\r\n\r\n for net in netsx:\r\n hpwl += float(netsx[net][1] - netsx[net][0] + netsy[net][1] - netsy[net][0])\r\n\r\n return (hpwl)" ]
[ "0.61954165", "0.5963542", "0.5888741", "0.5863957", "0.58637106", "0.58131486", "0.5805316", "0.57554185", "0.5731265", "0.57232267", "0.5722358", "0.5715003", "0.5702462", "0.5668924", "0.56644607", "0.5656563", "0.56421024", "0.56056917", "0.55921763", "0.558435", "0.5583649", "0.55582", "0.55463654", "0.5545056", "0.55318254", "0.55279696", "0.5525093", "0.5518909", "0.5515069", "0.55044395", "0.5496429", "0.549533", "0.5479362", "0.54643214", "0.5457795", "0.5430051", "0.5428231", "0.5424769", "0.54246503", "0.5419014", "0.5410747", "0.54039675", "0.5393686", "0.5388969", "0.53837746", "0.53808016", "0.5377595", "0.5374498", "0.5347804", "0.53343666", "0.53341895", "0.53294516", "0.5324159", "0.5322312", "0.532036", "0.5316615", "0.53164023", "0.53108406", "0.5310091", "0.5309216", "0.5305927", "0.5305689", "0.52985424", "0.5295585", "0.52869093", "0.5284231", "0.52838933", "0.5275371", "0.5274261", "0.52689314", "0.5264124", "0.5263318", "0.52496874", "0.5246209", "0.5243088", "0.52398336", "0.52361006", "0.52354944", "0.5228823", "0.52223825", "0.5221761", "0.52152634", "0.52135026", "0.5212773", "0.5191746", "0.5188589", "0.51862514", "0.5185025", "0.51845944", "0.51815903", "0.5179815", "0.51615417", "0.515676", "0.5153476", "0.5149982", "0.5148686", "0.5144929", "0.5142186", "0.51411575", "0.5140959" ]
0.6826442
0
! Merge two given graphs, namely synsets graph and SUMO graph. The final graph contain one type of nodes, namely synsets nodes. Each synset node has an attribute named "synset",
def merge(self, g1, g2): logger = logging.getLogger(__name__) g = BaseGraph() g.copy_graph_from(g1) plwn2sumo_dict = defaultdict(set) plwn2sumo_dict = self.get_plwn2sumo_dict() synset_on_vertex_dict = {} for node in g.all_nodes(): synset_id = node.synset.synset_id if synset_id in synset_on_vertex_dict: logger.warning("ID of some synset is not unique.") continue synset_on_vertex_dict[synset_id] = node num_of_edge = 0 for edge in g2.all_edges(): num_of_edge += 1 logger.info("%d/%d", num_of_edge, g2.num_edges()) parent_sumo_concept = edge.source().sumo child_sumo_concept = edge.target().sumo if parent_sumo_concept not in plwn2sumo_dict: logger.warning("The mapping file doesn't contain sumo concept '%s'.", parent_sumo_concept) continue if child_sumo_concept not in plwn2sumo_dict: logger.warning("The mapping file doesn't contain sumo concept '%s'.", child_sumo_concept) continue for parent_syn_id in plwn2sumo_dict[parent_sumo_concept]: if parent_syn_id not in synset_on_vertex_dict: logger.warning("The mapping file contains synset '%d' that is not in the graph.", parent_syn_id) continue p_node = synset_on_vertex_dict[parent_syn_id] for child_syn_id in plwn2sumo_dict[child_sumo_concept]: if child_syn_id not in synset_on_vertex_dict: logger.warning("The mapping file contains synset '%d' that is not in the graph.", child_syn_id) continue ch_node = synset_on_vertex_dict[child_syn_id] g.add_edge(p_node, ch_node, [("rel", edge.rel)], simply=True) return g
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_graph(self, other):\n self.add_nodes( (nLabel,nInfo) for nLabel,nInfo in other.nodes() )\n \n for nLabel,nInfo in other.nodes():\n for edgeLabel,edgeInfo in other.edgesFrom(nLabel):\n self.add_edge(edgeLabel,edgeInfo)", "def merge(self, other: \"GraphSet\") -> None:\n if other.name != self.name:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with name {other.name} into {self.name}\"\n )\n if other.version != self.version:\n raise UnmergableGraphSetsException(\n f\"Unable to merge graph with version {other.version} into {self.version}\"\n )\n self.start_time = min(self.start_time, other.start_time)\n self.end_time = max(self.end_time, other.end_time)\n self.resources += other.resources\n self._resolve_duplicates()\n self.errors += other.errors\n self.stats.merge(other.stats)", "def graph_union(*args, **kwargs):\n\n if not len(args) > 1:\n raise AttributeError('At least two input Graphs required')\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(*args)\n\n all_share_common_origin = all([share_common_origin(args[0], n) for n in args[1:]])\n if all_share_common_origin and not kwargs.get('return_copy', False):\n\n nids = []\n for graph in args:\n nids.extend([n for n in graph.nodes if n not in nids])\n\n eids = []\n for graph in args:\n eids.extend([e for e in graph.edges if e not in eids])\n\n result = args[0].origin.getnodes(nids)\n result.edges.set_view(eids)\n return result\n else:\n\n # make a deep copy of the first graph\n result = args[0].copy(deep=True, copy_view=False)\n\n # we need control over the node ID to add\n # temporary turn off auto_nid if needed\n auto_nid = result.data.auto_nid\n result.data.auto_nid = False\n\n for graph in args[1:]:\n for node, attrib in graph.nodes.items():\n if node not in result.nodes:\n result.add_node(node, **attrib)\n\n for edge, attrib in graph.edges.items():\n if edge not in result.edges:\n result.add_edge(*edge, **attrib)\n\n # Restore auto_nid\n result.data.auto_nid = auto_nid\n\n return result", "def mix_graphs(source_graph1, source_graph2):\n g = clone_graph(source_graph1, identifier=source_graph1.identifier)\n g = clone_graph(source_graph2, target_graph=g)\n return g", "def merge_graphs(graphs, G=nx.Graph(), contig=None, coords=None):\n for graph in graphs:\n G = append_graph(G, graph, contig=contig, coords=coords)\n return G", "def sub_graph_merging(self):", "def merge_sidewalks(sidewalk_network1, sidewalk_network2):\n\n for node in sidewalk_network1.nodes.get_list():\n node.confirmed = True\n\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n in_other = False\n same_node = None\n for other_sidewalk_node in sidewalk_network1.nodes.get_list():\n if sidewalk_node.location() == other_sidewalk_node.location():\n in_other = True\n same_node = other_sidewalk_node\n if not in_other: # If street network 2 contains the node but street network 1 does not\n sidewalk_network1.add_node(sidewalk_node) # Add node from street network 2 to street network 1\n else: # If both networks contain the node\n sidewalk_network2.nodes.update(sidewalk_node.id, same_node)\n '''\n # add new nodes from sidewalk_network2 to sidewalk_network1\n\n network1_dict = {}\n for sidewalk_node in sidewalk_network1.nodes.get_list():\n network1_dict[sidewalk_node.location] = sidewalk_node\n\n for sidewalk_node in sidewalk_network2.nodes.get_list():\n if sidewalk_node.location not in network1_dict:\n sidewalk_network1.add_node(sidewalk_node)\n else:\n sidewalk_network2.nodes.update(sidewalk_node.id, network1_dict[sidewalk_node.location])\n\n # add new ways from sidewalk_network2 to sidewalk_network1\n for way in sidewalk_network2.ways.get_list():\n # ensure all ways have correct nids, if incorrect update to correct nid from network1\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid) is None:\n way.swap_nodes(nid, sidewalk_network2.nodes.get(nid).id)\n\n has_confirmed_parents = False\n for nid in way.get_node_ids():\n if sidewalk_network1.nodes.get(nid).confirmed:\n has_confirmed_parents = True\n if not has_confirmed_parents:\n sidewalk_network1.add_way(way)\n\n return sidewalk_network1", "def merge(self):\n leftSon = self.nodes.getNode(0)\n rightSon = self.nodes.getNode(1)\n self.nodes.removeNode(0)\n self.nodes.removeNode(0)\n self.nodes.addNode(Node(leftSon, rightSon))", "def concatenate_graphs(G1, G2):\n V = G1.V + G2.V\n edges = np.vstack((G1.edges, G1.V + G2.edges))\n weights = np.hstack((G1.weights, G2.weights))\n G = WeightedGraph(V, edges, weights)\n return G", "def merge(self, ASGgraph ):\r\n \r\n self.mergedASG.append(ASGgraph)\t\t\t\t\t# add the graph to the list of merged graphs\r\n for nodeType in ASGgraph.listNodes.keys():\r\n if not nodeType in self.listNodes.keys():\t\t\t# node type was not known\r\n self.listNodes[nodeType] = ASGgraph.listNodes[nodeType]\r\n self.nodeTypes.append(nodeType)\r\n else: \t# node type existed...\r\n for node in ASGgraph.listNodes[nodeType]:\t\t\t# add each node of merged graph to actual graph\r\n self.listNodes[nodeType].append(node)\r\n \r\n # copy also the model's attribute\r\n errors = []\r\n for attr in ASGgraph.generatedAttributes.keys():\r\n if attr in self.generatedAttributes.keys(): # Attribute is present!\r\n #print \"Attribute collision for \", attr, \"<-- New attribute value ignored\" \r\n errors.append(attr)\r\n if( not self.__collidedAttributeTracker.has_key( attr ) ):\r\n self.__collidedAttributeTracker[ attr ] = 1\r\n else:\r\n self.__collidedAttributeTracker[ attr ] += 1\r\n continue\r\n self.generatedAttributes[attr] = ASGgraph.generatedAttributes[attr]\r\n # now create the attribute!\r\n self.setAttrValue(attr, ASGgraph.getAttrValue(attr).clone())\r\n if( errors ):\r\n print 'Attribute name collisions occured during load (could affect '\\\r\n + 'old formalisms)\\nThe following attributes collided: '\\\r\n + str(errors) \r\n ## print 'In fact, these messages are slated for removal, as this ' \\\r\n ## 'attribute system is being bypassed to fix this problem'\r", "def merge_pores(self, n1, n2, setcategory='union', radius=None, center=None, check_throats=True, inner_category='inner', verbose=False):\n\n if not self.graph.has_node(n1) or not self.graph.has_node(n2):\n warn(\"Nodes {} or {} does not exist. Cannot merge them\".format(u, v))\n return\n elif verbose:\n print(\"Merging pore {} and {}\".format(n1, n2))\n\n if center is not None:\n self.graph.nodes[n1]['center'] = center\n\n if radius is not None:\n self.graph.nodes[n1]['radius'] = radius\n\n category = self.graph.nodes[n2]['category']\n\n if setcategory == 'union':\n self.graph.nodes[n1]['category'] = self.graph.nodes[n1]['category'].union(\n category)\n if len(self.graph.nodes[n1]['category']) > 1 and 'inner' in self.graph.nodes[n1]['category']:\n self.graph.nodes[n1]['category'] = self.graph.nodes[n1]['category'].difference(\n set(['inner']))\n\n # if not G.has_edge(u,v):\n # warn(\"Nodes {} and {} will be merged but they are not adjacent\".format(u,v))\n\n # Warning : here we just copy the old edge attributes to the new one, so that the attributes are already defined.\n # The values must however be checked !\n new_edges = [(n1, n3, d)\n for _, n3, d in self.graph.edges(n2, data=True)\n if (n3 != n1 and n3 != n2)]\n try:\n self.graph.add_edges_from(new_edges)\n except:\n warn(\n 'Error trying to create new edges when merging pores {} and {}'.format(n1, n2))\n warn('Edges list {}'.format(new_edges))\n\n self.graph.remove_node(n2)\n\n if check_throats:\n for n3 in self.graph[n1]:\n self._compute_auto_throat_length(n1, n3)\n self._compute_auto_throat_radius(n1, n3)", "def merge_networks_in_series(n1, n2):\n new_l_size = n1.l_size + n2.l_size + 1 # One additional vertex in between.\n new_u_size = n1.u_size + n2.u_size\n\n # Connect the 0-pole and the inf-pole in the result network.\n new_link_edge = n1.zero_pole.insert_before()\n new_link_edge_opp = n2.inf_pole.insert_after()\n new_link_edge.opposite = new_link_edge_opp\n new_link_edge_opp.opposite = new_link_edge\n\n # Merge the 0-pole of n1 with the inf-pole of n2.\n n1.inf_pole.insert_all_after(n2.zero_pole)\n\n # Remove the link edges in n1 and n2 if they are not real.\n if not n1.is_linked:\n n1.zero_pole.remove()\n n1.inf_pole.remove()\n if not n2.is_linked:\n n2.zero_pole.remove()\n n2.inf_pole.remove()\n\n # After a serial merge the poles are never linked.\n res = Network(new_link_edge, is_linked=False, l_size=new_l_size, u_size=new_u_size)\n res.type = 'S'\n return res\n\n # # Extract the poles from both networks.\n # first_net_zero_pole_edge = n1.zero_pole\n # first_net_inf_pole_edge = n1.inf_pole\n #\n # second_net_zero_pole_edge = n2.zero_pole\n # second_net_inf_pole_edge = n2.inf_pole\n #\n # # Create a new half edges for connecting the poles of the network. The\n # # edge will not be part from the edges list.\n # new_root_half_edge = first_net_zero_pole_edge.insert_after()\n # new_root_opposite = second_net_inf_pole_edge.insert_after()\n #\n # new_root_half_edge.opposite = new_root_opposite\n # new_root_opposite.opposite = new_root_half_edge\n #\n # # Get the half edges from both networks for merging\n # first_net_inf_pole_prior = first_net_inf_pole_edge.prior\n # second_net_zero_pole_edge_prior = second_net_zero_pole_edge.prior\n #\n # # Merge the both networks so that the inf-pole from the first network is\n # # identified with the zero-pole from the second one. Handling different\n # # while merging the two networks.\n # first_net_inf_pole_edge.prior = second_net_zero_pole_edge_prior\n # second_net_zero_pole_edge_prior.next = first_net_inf_pole_edge\n #\n # first_net_inf_pole_prior.next = second_net_zero_pole_edge\n # second_net_zero_pole_edge.prior = first_net_inf_pole_prior\n #\n # # Update the node numbers in the second network zero-pole edges\n # half_edge_walker = first_net_inf_pole_prior.next\n # while half_edge_walker != first_net_inf_pole_prior:\n # half_edge_walker.node_nr = first_net_inf_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next\n #\n # # Check whether the original poles of the network that are merged are\n # # linked or not. If they are not linked then the corresponding half\n # # edges between them have to be removed.\n # if not n1.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # first_net_zero_pole_edge.remove()\n # first_net_inf_pole_edge.remove()\n #\n # if not n2.is_linked:\n # # Remove the half edges between the zero and inf pole from the first\n # # network.\n # second_net_zero_pole_edge.remove()\n # second_net_inf_pole_edge.remove()\n #\n # # After a serial merge the poles are never linked.\n # res = Network(new_root_half_edge, is_linked=False,\n # l_size=new_l_size, u_size=new_u_size)\n # res.type = 'S'\n # return res", "def mergeNodes(new, t1, t2):\n \n if t1 and t2:\n new.val = t1.val + t2.val\n elif not t1:\n new.val = t2.val\n elif not t2:\n new.val = t1.val", "def merge(self):\n collapsable = self.findSameSubtrees()\n\n dummy = GraphiusNode(-1, None)\n for i, node in self.nodes.items():\n dummy.addNeighbor(node)\n\n # Perform the merge\n self.mergeHelper(dummy, collapsable)\n\n # Regenerate trees\n newNodes = self.dfs(dummy)\n assert(-1 not in newNodes)\n self.nodes = newNodes", "def union(*graphs):\n from sets import Set\n out = {}\n for G in graphs:\n for v in G:\n out.setdefault(v,Set()).update(list(G[v]))\n return out", "def union(self, node1, node2):\n root1 = self.find(node1)\n root2 = self.find(node2)\n if root1 != root2: # only merge if the connected components differ\n if self.ranks[root1] > self.ranks[root2]:\n self.parents[root2] = root1\n else:\n self.parents[root1] = root2\n if self.ranks[root1] == self.ranks[root2]:\n self.ranks[root2] += 1", "def merge(self, a, b):\n old_id, target_id = sorted((self.node_id[a], self.node_id[b]), key = lambda id: len(self.groups[id]))\n for node in self.groups[old_id]:\n self.node_id[node] = target_id\n self.groups[target_id] |= self.groups[old_id]\n del self.groups[old_id]", "def combine_graphs(\n graphs: List[dgl.DGLGraph],\n atom_map_number: List[List[int]],\n bond_map_number: List[List[int]],\n) -> dgl.DGLGraph:\n\n # Batch graph structure for each relation graph\n\n relations = graphs[0].canonical_etypes\n ntypes = graphs[0].ntypes\n\n edges_dict = defaultdict(list)\n num_nodes_dict = defaultdict(int)\n\n # reorder atom nodes\n for i, g in enumerate(graphs):\n for rel in relations:\n srctype, etype, dsttype = rel\n u, v, eid = g.edges(form=\"all\", order=\"eid\", etype=rel)\n\n # deal with nodes (i.e. atom and optionally global)\n if srctype == \"atom\":\n src = [atom_map_number[i][j] for j in u]\n else:\n # global nodes\n src = u + num_nodes_dict[srctype]\n src = src.numpy().tolist()\n\n if dsttype == \"atom\":\n dst = [atom_map_number[i][j] for j in v]\n else:\n # global nodes\n dst = v + num_nodes_dict[dsttype]\n dst = dst.numpy().tolist()\n\n edges_dict[rel].extend([(s, d) for s, d in zip(src, dst)])\n\n for ntype in ntypes:\n num_nodes_dict[ntype] += g.number_of_nodes(ntype)\n\n # reorder bond edges (bond edges)\n bond_map_number_list = []\n for i in itertools.chain.from_iterable(bond_map_number):\n bond_map_number_list.extend([2 * i, 2 * i + 1])\n bond_reorder = [\n bond_map_number_list.index(i) for i in range(len(bond_map_number_list))\n ]\n\n rel = (\"atom\", \"bond\", \"atom\")\n a2a_edges = edges_dict.pop(rel)\n a2a_edges = [a2a_edges[i] for i in bond_reorder]\n\n edges_dict[rel] = a2a_edges\n\n # create graph\n new_g = dgl.heterograph(edges_dict, num_nodes_dict=num_nodes_dict)\n\n # Batch features\n\n # reorder node features (atom and global)\n atom_map_number_list = list(itertools.chain.from_iterable(atom_map_number))\n atom_reorder = [\n atom_map_number_list.index(i) for i in range(len(atom_map_number_list))\n ]\n\n for ntype in graphs[0].ntypes:\n feat_dicts = [g.nodes[ntype].data for g in graphs]\n\n # concatenate features\n keys = feat_dicts[0].keys()\n new_feats = {k: torch.cat([fd[k] for fd in feat_dicts], 0) for k in keys}\n\n # reorder atom features\n if ntype == \"atom\":\n new_feats = {k: v[atom_reorder] for k, v in new_feats.items()}\n\n new_g.nodes[ntype].data.update(new_feats)\n\n # reorder edge features (bond)\n\n for etype in graphs[0].etypes:\n feat_dicts = [g.edges[etype].data for g in graphs]\n\n # concatenate features\n keys = feat_dicts[0].keys()\n new_feats = {k: torch.cat([fd[k] for fd in feat_dicts], 0) for k in keys}\n\n if etype == \"bond\":\n new_feats = {k: v[bond_reorder] for k, v in new_feats.items()}\n\n new_g.edges[etype].data.update(new_feats)\n\n # add _ID to atom feature\n new_g.nodes[\"atom\"].data[\"_ID\"] = torch.arange(new_g.num_nodes(\"atom\"))\n\n return new_g", "def union(self, a, b):\n if (a in self.node_id) and (b in self.node_id) and (self.node_id[a] != self.node_id[b]):\n self.merge(a, b)\n elif (a in self.node_id) or (b in self.node_id):\n self.add(a,b)\n else:\n self.create_new_group(a,b)", "def synsets_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n # Compute similarity\n if len(synsets_sentence_1) != 0 and len(synsets_sentence_2) != 0:\n similarity = 1 - jaccard_distance(set(synsets_sentence_1), set(synsets_sentence_2))\n return similarity\n else:\n return 0", "def coco_union(dsets):\n merged = ub.odict([\n ('categories', []),\n ('licenses', []),\n ('info', []),\n ('images', []),\n ('annotations', []),\n ])\n\n merged_cat_name_to_id = {}\n\n def update_ifnotin(d1, d2):\n \"\"\" copies keys from d2 that doent exist in d1 into d1 \"\"\"\n for k, v in d2.items():\n if k not in d1:\n d1[k] = v\n return d1\n\n for key, old_dset in dsets.items():\n # hack: in our case the key is the subdir\n subdir = key\n\n # Create temporary indexes to map from old to new\n cat_id_map = {}\n img_id_map = {}\n\n # Add the licenses / info into the merged dataset\n # Licenses / info are unused in our datas, so this might not be correct\n merged['licenses'].extend(old_dset['licenses'])\n merged['info'].extend(old_dset['info'])\n\n # Add the categories into the merged dataset\n for old_cat in old_dset['categories']:\n new_id = merged_cat_name_to_id.get(old_cat['name'], None)\n if new_id is None:\n # The same category might exist in different datasets.\n new_id = len(merged_cat_name_to_id) + 1\n merged_cat_name_to_id[old_cat['name']] = new_id\n\n new_cat = ub.odict([\n ('id', new_id),\n ('name', old_cat['name']),\n ('supercategory', old_cat['supercategory']),\n ])\n update_ifnotin(new_cat, old_cat)\n cat_id_map[old_cat['id']] = new_cat['id']\n merged['categories'].append(new_cat)\n\n # Add the images into the merged dataset\n for old_img in old_dset['images']:\n new_img = ub.odict([\n ('id', len(merged['images']) + 1),\n ('file_name', join(subdir, old_img['file_name'])),\n ])\n # copy over other metadata\n update_ifnotin(new_img, old_img)\n img_id_map[old_img['id']] = new_img['id']\n merged['images'].append(new_img)\n\n # Add the annotations into the merged dataset\n for old_annot in old_dset['annotations']:\n old_cat_id = old_annot['category_id']\n old_img_id = old_annot['image_id']\n new_cat_id = cat_id_map.get(old_cat_id, None)\n new_img_id = img_id_map.get(old_img_id, None)\n if new_cat_id is None:\n continue\n print('annot {} in {} has bad category-id {}'.format(old_annot['id'], key, old_cat_id))\n if new_img_id is None:\n continue\n print('annot {} in {} has bad image-id {}'.format(old_annot['id'], key, old_img_id))\n new_annot = ub.odict([\n ('id', len(merged['annotations']) + 1),\n ('image_id', new_img_id),\n ('category_id', new_cat_id),\n ])\n update_ifnotin(new_annot, old_annot)\n merged['annotations'].append(new_annot)\n return merged", "def union(set1, set2):", "def merge(self, graph):\n # keep previous self.filename\n # copy data\n for x in graph.data:\n self.data.append(x)\n # copy headers, unless already exists (is so, info is lost)\n for key in graph.headers:\n if key not in self.headers:\n self.headers.update({key: graph.headers[key]})\n # copy graphInfo, unless already exists (is so, info is lost)\n for key in graph.graphInfo:\n if key not in self.graphInfo:\n self.graphInfo.update({key: graph.graphInfo[key]})\n # copy sampleInfo, unless already exists (is so, info is lost)\n for key in graph.sampleInfo:\n if key not in self.sampleInfo:\n self.sampleInfo.update({key: graph.sampleInfo[key]})", "def graph_union(g1, g2, intersection=None, props=None, include=False,\n internal_props=False):\n pnames = None\n if props is None:\n props = []\n if internal_props:\n pnames = []\n for (k, name), p1 in g1.properties.items():\n if k == 'g':\n continue\n p2 = g2.properties.get((k, name), None)\n props.append((p1, p2))\n pnames.append(name)\n for (k, name), p2 in g2.properties.items():\n if k == 'g' or (k, name) in g1.properties:\n continue\n props.append((None, p2))\n pnames.append(name)\n gprops = [[(name, g1.properties[('g', name)]) for name in g1.graph_properties.keys()],\n [(name, g2.properties[('g', name)]) for name in g2.graph_properties.keys()]]\n if not include:\n g1 = GraphView(g1, skip_properties=True)\n p1s = []\n for i, (p1, p2) in enumerate(props):\n if p1 is None:\n continue\n if p1.key_type() == \"v\":\n g1.vp[str(i)] = p1\n elif p1.key_type() == \"e\":\n g1.ep[str(i)] = p1\n\n g1 = Graph(g1, prune=True)\n\n for i, (p1, p2) in enumerate(props):\n if p1 is None:\n continue\n if str(i) in g1.vp:\n props[i] = (g1.vp[str(i)], p2)\n del g1.vp[str(i)]\n else:\n props[i] = (g1.ep[str(i)], p2)\n del g1.ep[str(i)]\n else:\n emask, emask_flip = g1.get_edge_filter()\n emask_flipped = False\n if emask is not None and not emask_flip:\n emask.a = numpy.logical_not(emask.a)\n emask_flipped = True\n g1.set_edge_filter(emask, True)\n\n vmask, vmask_flip = g1.get_vertex_filter()\n vmask_flipped = False\n if vmask is not None and not vmask_flip:\n vmask.a = not vmask.a\n g1.set_vertex_filter(vmask, True)\n vmask_flipped = True\n\n if intersection is None:\n intersection = g2.new_vertex_property(\"int64_t\", -1)\n else:\n intersection = intersection.copy(\"int64_t\")\n\n u1 = GraphView(g1, directed=True, skip_properties=True)\n u2 = GraphView(g2, directed=True, skip_properties=True)\n\n vmap, emap = libgraph_tool_generation.graph_union(u1._Graph__graph,\n u2._Graph__graph,\n _prop(\"v\", g1,\n intersection))\n\n if include:\n emask, emask_flip = g1.get_edge_filter()\n if emask is not None and emask_flipped:\n emask.a = numpy.logical_not(emask.a)\n g1.set_edge_filter(emask, False)\n\n vmask, vmask_flip = g1.get_vertex_filter()\n if vmask is not None and vmask_flipped:\n vmask.a = numpy.logical_not(vmask.a)\n g1.set_vertex_filter(vmask, False)\n\n n_props = []\n for p1, p2 in props:\n if p1 is None:\n p1 = g1.new_property(p2.key_type(), p2.value_type())\n if p2 is None:\n p2 = g2.new_property(p1.key_type(), p1.value_type())\n if not include:\n p1 = g1.copy_property(p1)\n if p2.value_type() != p1.value_type():\n p2 = g2.copy_property(p2, value_type=p1.value_type())\n if p1.key_type() == 'v':\n libgraph_tool_generation.\\\n vertex_property_union(u1._Graph__graph, u2._Graph__graph,\n vmap, emap,\n _prop(p1.key_type(), g1, p1),\n _prop(p2.key_type(), g2, p2))\n else:\n libgraph_tool_generation.\\\n edge_property_union(u1._Graph__graph, u2._Graph__graph,\n vmap, emap,\n _prop(p1.key_type(), g1, p1),\n _prop(p2.key_type(), g2, p2))\n n_props.append(p1)\n\n if pnames is not None:\n for name, p in zip(pnames, n_props):\n g1.properties[(p.key_type(), name)] = p\n if not include:\n for name, p in gprops[0]:\n g1.graph_properties[name] = p.copy()\n for name, p in gprops[1]:\n if name not in g1.graph_properties:\n g1.graph_properties[name] = p.copy()\n n_props = []\n\n if len(n_props) > 0:\n return g1, n_props\n else:\n return g1", "def merge_graphs(\n graph: DiGraph,\n ) -> Tuple[list[str], GraphAccess, Generator[Tuple[str, GraphAccess], None, None]]:\n\n # Find merge nodes: all nodes that are marked as merge node -> all children (merge roots) should be merged.\n # This method returns all merge roots as key, with the respective predecessor nodes as value.\n def merge_roots() -> dict[str, set[str]]:\n graph_root = GraphAccess.root_id(graph)\n merge_nodes = [node_id for node_id, data in graph.nodes(data=True) if data.get(\"merge\", False)]\n assert len(merge_nodes) > 0, \"No merge nodes provided in the graph. Mark at least one node with merge=true!\"\n result: dict[str, set[str]] = {}\n for node in merge_nodes:\n # compute the shortest path from root to here and sort out all successors that are also predecessors\n pres: set[str] = reduce(lambda res, p: res | set(p), all_shortest_paths(graph, graph_root, node), set())\n for a in graph.successors(node):\n if a not in pres:\n result[a] = pres\n return result\n\n # Walk the graph from given starting node and return all successors.\n # A successor which is also a predecessor is not followed.\n def sub_graph_nodes(from_node: str, parent_ids: set[str]) -> set[str]:\n to_visit = [from_node]\n visited: set[str] = {from_node}\n\n def successors(node: str) -> list[str]:\n return [a for a in graph.successors(node) if a not in visited and a not in parent_ids]\n\n while to_visit:\n to_visit = reduce(lambda li, node: li + successors(node), to_visit, [])\n visited.update(to_visit)\n return visited\n\n # Create a generator for all given merge roots by:\n # - creating the set of all successors\n # - creating a subgraph which contains all predecessors and all succors\n # - all predecessors are marked as visited\n # - all predecessor edges are marked as visited\n # This way it is possible to have nodes in the graph that will not be touched by the update\n # while edges will be created from successors of the merge node to predecessors of the merge node.\n def merge_sub_graphs(\n root_nodes: dict[str, set[str]], parent_nodes: set[str], parent_edges: set[Tuple[str, str, str]]\n ) -> Generator[Tuple[str, GraphAccess], None, None]:\n all_successors: Set[str] = set()\n for root, predecessors in root_nodes.items():\n successors: set[str] = sub_graph_nodes(root, predecessors)\n # make sure nodes are not \"mixed\" between different merge nodes\n overlap = successors & all_successors\n if overlap:\n raise AttributeError(f\"Nodes are referenced in more than one merge node: {overlap}\")\n all_successors |= successors\n # create subgraph with all successors and all parents, where all parents are already marked as visited\n sub = GraphAccess(graph.subgraph(successors | parent_nodes), root, parent_nodes, parent_edges)\n yield root, sub\n\n roots = merge_roots()\n parents: set[str] = reduce(lambda res, ps: res | ps, roots.values(), set())\n parent_graph = graph.subgraph(parents)\n graphs = merge_sub_graphs(roots, parents, set(parent_graph.edges(data=\"edge_type\")))\n return list(roots.keys()), GraphAccess(parent_graph, GraphAccess.root_id(graph)), graphs", "def _build_graphs(self):\n g1 = self._build_graph1()\n g2 = self._build_graph2(g1)\n return g1, g2", "def merge_synset(wn, synsets, reason, lexfile, ssid=None, change_list=None):\n pos = synsets[0].part_of_speech.value\n if not ssid:\n ssid = new_id(wn, pos, synsets[0].definitions[0].text)\n ss = Synset(ssid, \"in\",\n PartOfSpeech(pos), lexfile)\n ss.definitions = [d for s in synsets for d in s.definitions]\n ss.examples = [x for s in synsets for x in s.examples]\n members = {}\n wn.add_synset(ss)\n\n for s in synsets:\n # Add all relations\n for r in s.synset_relations:\n if not any(r == r2 for r2 in ss.synset_relations):\n add_relation(\n wn, ss, wn.synset_by_id(\n r.target), r.rel_type, change_list)\n # Add members\n for m in wn.members_by_id(s.id):\n if m not in members:\n members[m] = add_entry(wn, ss, m, change_list)\n add_entry(wn, ss, m, change_list)\n e = [e for e in [wn.entry_by_id(e2) for e2 in wn.entry_by_lemma(m)]\n if e.lemma.part_of_speech.value == pos][0]\n for f in e.forms:\n if not any(f2 == f for f in members[m].forms):\n members[m].add_form(f)\n # syn behaviours - probably fix manually for the moment\n if change_list:\n change_list.change_synset(ss)\n return ss", "def merge_working_sets(self, other):\n\n for dist in other.by_key.values(): self.add(dist)\n return self", "def testMergeNoEdges():\n\n n1 = DummyNode(x=1, y=2, z=4)\n n2 = DummyNode(x=1, y=2, z=3)\n\n assert n1.z == 4\n\n n1.merge_with(n2)\n\n assert n1.z == 3", "def union(G, H):\n\n if G.order() != H.order():\n msg = \"Node sets of the two directed graphs are not equal!\"\n raise StaticGraphNotEqNodesException(msg)\n\n n_nodes = G.order()\n edges = ((u, v) for u in G.nodes()\n\t\t for v in chain(G.successors(u), H.successors(u)))\n deg = make_deg(n_nodes, edges) \n edges = ((u, v) for u in G.nodes()\n\t\t for v in chain(G.successors(u), H.successors(u)))\n GC = make(n_nodes, G.size() + H.size(), edges, deg)\n return GC", "def stemset_combiner(stempool1, stempool2):\n return stempool1.stemset | stempool2.stemset", "def merge_fs(fs1,fs2):\n # This function merges fs2 into fs1, changing fs1 in-place\n # It's a cheaper and faster alternative of unify(), which will check\n # all the similarities and differences between fs1 and fs2. But this one\n # just assumes that fs2 and fs1 does not have any entries in common\n # NOTICE: In Templates.lex we cannot guarantee there is no overlap\n # so only use this function when it IS clear.\n for k in fs2.keys():\n if fs1.has_key(k):\n merge_fs(fs1[k],fs2[k])\n else:\n fs1[k] = fs2[k]\n return", "def addGeounitNodes(node1, node2):\n \n from operator import add\n \n argsDict = {} \n argsDict[\"raw\"] = node1.raw + node2.raw\n argsDict[\"raw_housing\"] = node1.raw_housing + node2.raw_housing\n if node1.syn and node2.syn:\n argsDict[\"syn\"] = node1.syn + node2.syn\n if node1.cons and node2.cons:\n argsDict[\"cons\"] = addConstraints(node1.cons,node2.cons)\n else:\n argsDict[\"cons\"] = {}\n if node1.invar and node2.invar:\n argsDict[\"invar\"] = addInvariants(node1.invar,node2.invar)\n else:\n argsDict[\"invar\"] = {}\n argsDict[\"geocodeDict\"] = node1.geocodeDict\n \n aggregatedNode = nodes.geounitNode(node1.geocode, **argsDict)\n \n return aggregatedNode", "def merge(self, other):\n\n for n in other.cfg_nodes:\n self.insert_cfgnode(n)\n\n for ins_addr, outs in other.out_branches.items():\n if ins_addr in self.out_branches:\n for stmt_idx, item in outs.items():\n if stmt_idx in self.out_branches[ins_addr]:\n self.out_branches[ins_addr][stmt_idx].merge(item)\n else:\n self.out_branches[ins_addr][stmt_idx] = item\n\n else:\n item = next(iter(outs.values()))\n self.out_branches[ins_addr][item.stmt_idx] = item", "def _merge(self, drifting_t, another_t):\n # drifting_t and another_t must exist in graph\n\n # add a (t --> another_t) edge for each (t --> drifting_t) edge\n for t, _, key, data in self.in_edges_iter(\n nbunch=[drifting_t], data=True, keys=True\n ):\n self.add_edge(t, another_t, key=key, attr_dict=data)\n\n # add a (another_t --> t) edge for each (drifting_t --> t) edge\n for _, t, key, data in self.edges_iter(\n nbunch=[drifting_t], data=True, keys=True\n ):\n self.add_edge(another_t, t, key=key, attr_dict=data)\n\n # remove drifting_t node (as it was replaced by another_t)\n self.remove_node(drifting_t)", "def sub_graph_merging(self):\n raise NotImplementedError()", "def merge(cls, analyses):\r\n raise NotImplementedError()", "def union(self, node1, node2):\n\n root1 = self.root(node1)\n root2 = self.root(node2)\n\n if root1 == root2:\n return\n\n if node1 < node2:\n self.set[root2] = root1\n self.root(node2)\n else:\n self.set[root1] = root2\n self.root(node1)", "def merge(self):\n G = ScaffoldGraph(self.components_fasta_fname)\n\n # TODO implement an add_nodes_from wrapper so direct access is unnecessary\n G.graph.add_nodes_from(self.nodes(data=True))\n for u, v, c in self.graph.edges:\n edge_data_list = [self.graph[u][v][i] for i in self.graph[u][v]]\n G.add_edge(u, v, **self._merge_edge_dicts(*edge_data_list))\n\n return G", "def _merge_mapper(mapper1, mapper2):\n if len(mapper1) > 0:\n if len(mapper2) > 0:\n clusters1 = mapper1['cluster']\n clusters2 = mapper2['cluster']\n clusters = np.unique(np.concatenate((clusters1, clusters2), 0))\n\n mapper1['cluster'] = clusters\n mapper1['links'] += mapper2['links']\n else:\n mapper1 = mapper2\n return mapper1", "def union(self, *graphs, **named_graphs):\n for item in graphs:\n self._store(item)\n\n for name, item in named_graphs.items():\n self._store(item, name=name)", "def merge_nodes(G,nodes, new_node, attr_dict=None, **attr):\n \n G.add_node(new_node, distToCancer=0, classCell=\"cancerCluster\") # Add the 'merged' node\n \n for n1,n2,data in G.edges(data=True):\n # For all edges related to one of the nodes to merge,\n # make an edge going to or coming from the `new gene`.\n if n1 in nodes:\n G.add_edge(new_node,n2,data)\n elif n2 in nodes:\n G.add_edge(n1,new_node,data)\n \n for n in nodes: # remove the merged nodes\n if(G.has_node(n)):\n G.remove_node(n)", "def wordNet_similarity(sentence1, sentence2):\r\n # Tokenize and tag\r\n \r\n # sentence1 = pos_tag(word_tokenize(sentence1))\r\n sentence1=st_tagger.tag(word_tokenize(sentence1))\r\n \r\n # sentence2 = pos_tag(word_tokenize(sentence2))\r\n sentence2=st_tagger.tag(word_tokenize(sentence2))\r\n\r\n \r\n # Get the synsets for the tagged words\r\n #################################################\r\n\r\n # synsets1=[]\r\n # synsets2=[]\r\n # for tagged_word in sentence1:\r\n # print(tagged_word)\r\n # tagged_word = list(tagged_word)\r\n # synsets1.append(tagged_to_synset(tagged_word[0],tagged_word[1]))\r\n # for tagged_word in sentence2:\r\n # print(tagged_word)\r\n # tagged_word = list(tagged_word)\r\n # print(tagged_word)\r\n # synsets2.append(tagged_to_synset(tagged_word[0],tagged_word[1]))\r\n\r\n # The code above is the elaboration of code below\r\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\r\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\r\n \r\n # Filter out the Nones in the synonym set\r\n synsets1 = [ss for ss in synsets1 if ss]\r\n synsets2 = [ss for ss in synsets2 if ss]\r\n \r\n score, count = 0.0, 0\r\n \r\n###########################################################################\r\n # for syn1 in synsets1:\r\n # arr_simi_score = []\r\n # print('=========================================')\r\n # print(syn1)\r\n # print('----------------')\r\n # for syn2 in synsets2:\r\n # print(syn2)\r\n # simi_score = syn1.path_similarity(syn2)\r\n # print(simi_score)\r\n # if simi_score is not None:\r\n # arr_simi_score.append(simi_score)\r\n # print('----------------')\r\n # print(arr_simi_score)\r\n # if(len(arr_simi_score) > 0):\r\n # best = max(arr_simi_score)\r\n # print(best)\r\n # score += best\r\n # count += 1\r\n # # Average the values\r\n # print('score: ', score)\r\n # print('count: ', count)\r\n # score /= count\r\n\r\n###########################################################################\r\n\r\n for syn1 in synsets1:\r\n arr_simi_score = []\r\n # print('=========================================')\r\n print(\"Each word from Synonym se1\",syn1)\r\n # print('----------------')\r\n for syn2 in synsets2:\r\n print(\"Each word from Synonym se2\",syn2)\r\n # simi_score = syn1.path_similarity(syn2)\r\n simi_score = syn1.wup_similarity(syn2)\r\n print(\"word to word path_similarity score\",simi_score)\r\n if simi_score is not None:\r\n arr_simi_score.append(simi_score)\r\n print('----------------')\r\n print(arr_simi_score)\r\n if(len(arr_simi_score) > 0):\r\n best = max(arr_simi_score)\r\n print(\"best score so far\", best)\r\n score += best\r\n count += 1\r\n # Average the values\r\n print('score: ', score)\r\n print('count: ', count)\r\n if count!=0:\r\n score /= count\r\n else:\r\n score=0.0\r\n return score", "def union(self, other):\n self.find_set()._link(other.find_set())", "def information_content_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n score, count = 0.0, 0\n # For each word in the first sentence\n for synset in synsets_sentence_1:\n L = []\n for ss in synsets_sentence_2:\n try:\n L.append(synset.lin_similarity(ss, brown_ic))\n except:\n continue\n if L: \n best_score = max(L)\n score += best_score\n count += 1\n # Average the values\n if count > 0: score /= count\n return score", "def combine_graphs(cls, ds, gr):\n\n topdir, file_prefix, outfiles = ds.get_allinone_outfiles()\n\n # For each host, combine all its graphs\n _, _, host_outfiles = ds.get_host_outfiles()\n for node in ds.get_hosts():\n logging.info(\"Combining graphs for %s\" % node)\n graphs = [v for k, v in host_outfiles[node].items()\n if k != \"rrdfile\" and v != RRDToolDB.SKIPPED]\n if graphs:\n newgraph = \"%s/%s_%s.png\" % (topdir, file_prefix, node)\n cls.combine_graphs_vertically(graphs, newgraph)\n outfiles[\"hosts\"][node] = newgraph\n else:\n outfiles[\"hosts\"][node] = RRDToolDB.SKIPPED\n\n # For each VM, combine all its graphs\n _, _, vm_outfiles = ds.get_vm_outfiles()\n for node in ds.get_vms():\n logging.info(\"Combining graphs for %s\" % node)\n graphs = [v for k, v in vm_outfiles[node].items()\n if k != \"rrdfile\" and v != RRDToolDB.SKIPPED]\n if graphs:\n newgraph = \"%s/%s_%s.png\" % (topdir, file_prefix, node)\n cls.combine_graphs_vertically(graphs, newgraph)\n outfiles[\"vms\"][node] = newgraph\n else:\n outfiles[\"vms\"][node] = RRDToolDB.SKIPPED\n\n # For each type of graphs (e.g., cpu, memory, etc.), combine all\n # graphs from hosts and VMs\n for gname in gr:\n logging.info(\"Combining all host and VM graphs for %s\" % gname)\n host_graphs = [host_outfiles[node][gname] for node in ds.get_hosts()\n if host_outfiles[node][gname] != RRDToolDB.SKIPPED]\n vm_graphs = [vm_outfiles[node][gname] for node in ds.get_vms()\n if vm_outfiles[node][gname] != RRDToolDB.SKIPPED]\n if host_graphs + vm_graphs:\n newgraph = \"%s/%s_%s.png\" % (topdir, file_prefix, gname)\n cls.combine_graphs_vertically(host_graphs + vm_graphs, newgraph)\n outfiles[\"metrics\"][gname] = newgraph\n else:\n outfiles[\"metrics\"][gname] = RRDToolDB.SKIPPED", "def union(s1, s2):\n \"*** YOUR CODE HERE ***\"\n s = set()\n for member in s1:\n s.add(member)\n for member in s2:\n s.add(member)\n return s", "def generate_networkx_graphs(raw_graphs):\n\n source_graphs = [source_from_raw(raw) for raw in raw_graphs]\n target_graphs = [target_from_raw(raw) for raw in raw_graphs]\n\n return source_graphs, target_graphs", "def merge_nodes(self,n0,n1):\n # -- Sanity checks - does not yet allow for collapsing edges.\n\n # if they share any cells, would update the cells, but for now\n # just signal failure.\n n0_cells=list(self.node_to_cells(n0))\n n1_cells=list(self.node_to_cells(n1))\n cell_to_edge_cache={}\n\n for c in n1_cells:\n if c in n0_cells:\n print(\"cell %d common to both nodes\"%c)\n raise GridException(\"Not ready for merging nodes in the same cell\")\n # otherwise record and fix up below\n\n # while we're looping, cache the edges as they will\n # be mutated along the way.\n cell_to_edge_cache[c]=self.cell_to_edges(c).copy()\n\n # do they share an edge, but not already fixed in the above stanza?\n j=self.nodes_to_edge(n0,n1)\n if j is not None:\n raise GridException(\"Not ready for merging endpoints of an edge\")\n\n edge_map={} # index of superceded edge => superceding edge\n\n # Update edges of n1 to point to n0\n # if that would cause a duplicate edge, then the n1 version is deleted\n n1_edges=list(self.node_to_edges(n1)) # make copy since we'll mutate it\n for j in n1_edges:\n if self.edges['nodes'][j,0]==n1:\n nj=0\n elif self.edges['nodes'][j,1]==n1:\n nj=1\n else:\n assert False # sanity check\n newnodes=self.edges[j]['nodes'].copy()\n newnodes[nj]=n0\n # it's possible that this is an edge which already exists\n jother=self.nodes_to_edge(*newnodes)\n if jother is not None:\n # want to keep jother, delete j. but is there info on\n # cells which should be brought over?\n edge_map[j]=jother\n # wait to delete j until after cells have been moved to jother.\n else:\n self.log.debug(\"Modifying edge j=%d\"%j)\n self.modify_edge(j,nodes=newnodes)\n\n # -- Transition any cells. \n for c in n1_cells:\n # update the node list:\n cnodes=self.cell_to_nodes(c).copy()\n nc=list(cnodes).index(n1)\n cnodes[nc]=n0\n\n # Dangerous to use cell_to_edges, since it may\n # have to consult the edge topology, which is disrupted\n # in the above code. \n # cell_to_edges: first checks cells['edges'], may \n # go to cell_to_nodes(c): that's safe.\n # and nodes_to_edge\n # -> node_to_edges, which in turn may consult self.edges['nodes']\n\n #cedges=self.cell_to_edges(c).copy()\n cedges=cell_to_edge_cache[c]\n\n for ji,j in enumerate(cedges):\n if j in edge_map:\n # is there were edges['cells'] should be updated?\n\n # sever the edge=>cell pointer, to p\n # could just set to [-1,-1], but this keeps things very explicit\n # for debugging\n j_cells=list(self.edges['cells'][j])\n j_cells_side=j_cells.index(c)\n j_cells[ j_cells_side ] = -1\n self.modify_edge(j,cells=j_cells)\n\n # and modify the receiving edge, too\n jo=edge_map[j]\n jo_cells=list(self.edges['cells'][jo])\n # which side of jo? a bit tedious...\n if list(self.edges['nodes'][j]).index(n1) == list(self.edges['nodes'][jo]).index(n0):\n # same orientation\n jo_cells_side=j_cells_side\n elif list( self.edges['nodes'][j]).index(n1) == 1-list(self.edges['nodes'][jo]).index(n0):\n jo_cells_side=1-j_cells_side\n else:\n raise Exception(\"Failed in some tedium\")\n assert jo_cells[jo_cells_side]<0\n jo_cells[jo_cells_side]=c\n self.modify_edge(edge_map[j],cells=jo_cells)\n # yikes. any chance that worked?\n\n cedges[ji]=edge_map[j]\n\n # maybe this is where we'd update cells['edges'] too?\n self.modify_cell(c,nodes=cnodes,edges=cedges)\n\n for dead_edge in edge_map:\n self.delete_edge(dead_edge)\n\n self.delete_node(n1)", "def union(self, other):\n # initialize new Set from the elements in the first Set\n union_set = Set(self.get_elements())\n\n # add every element in the second Set to a new Set and return it\n for element in other.get_elements():\n union_set.add(element)\n return union_set", "def _append_source_and_target(self, graph):\n graph.add_node( \"source\" )\n graph.add_node( \"target\" )\n \n for leave in (n for n,d in graph.out_degree_iter() if d==0):\n if leave is not \"source\" and leave is not \"target\":\n graph.add_edge( leave, \"target\" )\n \n for root in (n for n,d in graph.in_degree_iter() if d==0):\n if root is not \"source\" and root is not \"target\": \n graph.add_edge( \"source\", root )", "def merge_networks_in_parallel(n1, n2):\n # This operation is not defined if both networks are linked.\n assert not (n1.is_linked and n2.is_linked), (n1, n2)\n\n if n1.is_linked:\n return merge_networks_in_parallel(n2, n1)\n\n # Either n2 is linked and n1 not or both are not linked.\n assert not n1.is_linked\n\n new_l_size = n1.l_size + n2.l_size\n new_u_size = n1.u_size + n2.u_size\n res_is_linked = n1.is_linked or n2.is_linked\n\n # Merge 0-poles.\n n1.zero_pole.insert_all_before(n2.zero_pole.prior)\n\n # Merge inf-poles.\n n1.inf_pole.insert_all_after(n2.inf_pole.next)\n\n # Remove the link edge in n1\n n1.zero_pole.remove()\n n1.inf_pole.remove()\n\n res = Network(n2.zero_pole, res_is_linked, new_l_size, new_u_size)\n res.type = 'P'\n return res\n\n # # Merge their 0-poles.\n # first_net_zero_pole_prior = first_net_zero_pole_edge.prior\n # second_net_zero_pole_next = second_net_zero_pole_edge.next\n # second_net_zero_pole_prior = second_net_zero_pole_edge.prior\n # first_net_zero_pole_edge.prior = second_net_zero_pole_prior\n # second_net_zero_pole_prior.next = first_net_zero_pole_edge\n # first_net_zero_pole_prior.next = second_net_zero_pole_next\n # second_net_zero_pole_next.prior = first_net_zero_pole_prior\n #\n # # Update the node numbers in the zero pole.\n # half_edge_walker = first_net_zero_pole_edge.next\n # while half_edge_walker != first_net_zero_pole_edge:\n # half_edge_walker.node_nr = first_net_zero_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next\n\n # # Merge their inf-poles\n # first_net_inf_pole_next = first_net_inf_pole_edge.next\n # second_net_inf_pole_prior = second_net_inf_pole_edge.prior\n # second_net_inf_pole_next = second_net_inf_pole_edge.next\n # first_net_inf_pole_edge.next = second_net_inf_pole_next\n # second_net_inf_pole_next.prior = first_net_inf_pole_edge\n # first_net_inf_pole_next.prior = second_net_inf_pole_prior\n # second_net_inf_pole_prior.next = first_net_inf_pole_next\n #\n # # Update the node numbers in the inf pole\n # half_edge_walker = first_net_inf_pole_edge.next\n # while half_edge_walker != first_net_inf_pole_edge:\n # half_edge_walker.node_nr = first_net_inf_pole_edge.node_nr\n # half_edge_walker = half_edge_walker.next", "def mergeWith(self, other):\n assert not other.synthesised\n self.globals.update(other.globals)\n self.signals.update(other.signals)\n self.startsOfDataPaths.update(other.startsOfDataPaths)\n self.subUnits.update(other.subUnits)\n \n for s in other.signals:\n s.ctx = self", "def build_drop_fullgraphs(self, do_subgraph=False, graph_lib='pygraphviz'):\n if 'pygraphviz' == graph_lib:\n G = pgv.AGraph(strict=True, directed=True)\n else:\n G = nx.Graph()\n do_subgraph = False\n subgraph_dict = defaultdict(list) # k - node-ip, v - a list of graph nodes\n oid_gnid_dict = dict()\n\n for i, oid in enumerate(self.pg_spec.keys()):\n oid_gnid_dict[oid] = str(i)\n logger.info(\"oid to gid mapping done\")\n\n for dropspec in self.pg_spec.itervalues():\n gid = oid_gnid_dict[dropspec['oid']]\n ip = dropspec['node']\n subgraph_dict[ip].append(gid)\n if (dropspec['type'] == 'app'):\n G.add_node(gid, shape='rect', label='')#, fixedsize=True, hight=.05, width=.05)\n elif (dropspec['type'] == 'plain'): #parallelogram\n G.add_node(gid, shape='circle', label='')#, fixedsize=True, hight=.05, width=.05)\n logger.info(\"Graph nodes added\")\n\n for dropspec in self.pg_spec.itervalues():\n gid = oid_gnid_dict[dropspec['oid']]\n if (dropspec['type'] == 'app'):\n ds_kw = 'outputs' #down stream key word\n elif (dropspec['type'] == 'plain'):\n ds_kw = 'consumers'\n else:\n ds_kw = 'None'\n if (ds_kw in dropspec):\n for doid in dropspec[ds_kw]:\n G.add_edge(gid, oid_gnid_dict[doid])\n logger.info(\"Graph edges added\")\n\n if (do_subgraph):\n for i, subgraph_nodes in enumerate(subgraph_dict.values()):\n # we don't care about the subgraph label or rank\n subgraph = G.add_subgraph(subgraph_nodes, label='%d' % i, name=\"cluster_%d\" % i, rank=\"same\")\n subgraph.graph_attr['rank']='same'\n logger.info(\"Subgraph added\")\n\n return G", "def GenDumbbellGraph(n1, n2):\n G = nx.complete_graph(n1)\n H = nx.complete_graph(n2)\n\n mapping = {}\n for i in range(n2):\n mapping[i] = i+n1\n H = nx.relabel_nodes(H, mapping=mapping)\n\n I = nx.union(G,H)\n I.add_edge(n1-1,n1)\n I.weighted = False\n #set weight to 1\n for e in I.edges_iter():\n I.add_edge(e[0],e[1], weight = 1)\n\n print(I.number_of_edges())\n print(I.number_of_nodes())\n \n print(I.edges());\n #Draw(I);\n return I", "def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()", "def mergeWith(self, others):", "def merge(list1, list2):\n holding = list1.to_list()\n [holding.append(i) for i in list2.to_list()]\n # for i in list2.to_list():\n # holding.append(i)\n holding = sorted(holding)\n\n output = LinkedList(Node(holding[0]))\n for i in holding[1:]:\n output.append(i)\n return output", "def test_graph2():\n mol_graph1 = DGLGraph([(0, 1), (0, 2), (1, 2)])\n mol_graph2 = DGLGraph([(0, 1), (1, 2), (1, 3), (1, 4)])\n batch_mol_graph = dgl.batch([mol_graph1, mol_graph2])\n node_feats = torch.arange(batch_mol_graph.number_of_nodes()).float().reshape(-1, 1)\n edge_feats = torch.arange(2 * batch_mol_graph.number_of_edges()).float().reshape(-1, 2)\n\n complete_graph1 = get_complete_graph(mol_graph1.number_of_nodes())\n complete_graph2 = get_complete_graph(mol_graph2.number_of_nodes())\n batch_complete_graph = dgl.batch([complete_graph1, complete_graph2])\n atom_pair_feats = torch.arange(batch_complete_graph.number_of_edges()).float().reshape(-1, 1)\n\n return batch_mol_graph, node_feats, edge_feats, batch_complete_graph, atom_pair_feats", "def merge(t1, t2):\n if t2 is None:\n return t1\n if t1 is None:\n return t2\n\n t1 = _splay(_find_max(t1))\n t1.right = t2\n t2.parent = t1\n return t1", "def clone_graph(source_graph, target_graph=None, identifier=None):\n if target_graph is None:\n g = rdflib.Graph(identifier=identifier)\n for p, n in source_graph.namespace_manager.namespaces():\n g.namespace_manager.bind(p, n, override=True, replace=True)\n else:\n g = target_graph\n for p, n in source_graph.namespace_manager.namespaces():\n g.namespace_manager.bind(p, n, override=False, replace=False)\n for t in iter(source_graph):\n g.add(t)\n return g", "def merge(self,best1,best2):\n\t\treturn self.cu_for_merge(best1,best2,False)", "def merge(): #Status: WIP\r\n pass", "def unMerge(self, ASGgraph, metaModelName=None, atom3i=None ):\r\n \r\n #--- Denis added parallel data structure for tracking ASG's, 2005 -----\r\n self.__deleteASG2Tracker( metaModelName )\r\n \r\n # 1st check if the type of ASGgraph is our own type -> \r\n # so erase ourselves and return another ASG\r\n if self.getClass() == ASGgraph.getClass(): \r\n return self.selfUnMerge( atom3i=atom3i )\r\n \r\n # 2nd check if we have this ASGgraph\r\n isPresent = 0\r\n ASGClass = ASGgraph.getClass()\r\n for midx in self.mergedASG:\r\n if midx.getClass() == ASGClass:\r\n isPresent = 1\r\n break\r\n if not isPresent: return 0\r\n \r\n # now check that we do not have instances of the entities of the ASG to delete\r\n canDelete = 1\r\n for entity in midx.listNodes.keys():\r\n if self.listNodes[entity] != []:\r\n canDelete = 0\r\n break\r\n \r\n ##if not canDelete: return -1\r\n#----------------- Lets delete everything anyway, Denis, 2005 ------------------ \r\n if( not canDelete and not askokcancel( \r\n 'WARNING: Deleting '+metaModelName,\r\n 'If you press OK all things '+metaModelName \r\n +' will be cleanly and utterly removed\\n\\n' \r\n +'If you CANCEL, then the formalism will be half-kept,'+\r\n ' half-removed (this dialog appears to late too stop this) \\n\\n'\r\n +'CANCEL IS NOT RECOMMENDED'\r\n + ' (Almost 100% sure bad things will happen to you)') ):\r\n return -1\r\n \r\n self.mergedASG.remove(midx)\r\n \r\n # For each entity in the ASG that's going bye bye\r\n for entity in midx.listNodes.keys():\r\n # Remove from nodes list\r\n if( self.listNodes.has_key( entity ) ):\r\n # If object has an associated graphical object, kill it\r\n for obj in self.listNodes[ entity ]:\r\n if( obj.graphObject_ and atom3i): obj.graphObject_.erase(atom3i) \r\n # Kill all the semantic objects of this type\r\n del self.listNodes[ entity ]\r\n\r\n # Remove from types list\r\n if( entity in self.nodeTypes ):\r\n del self.nodeTypes[ self.nodeTypes.index( entity ) ]\r\n \r\n # Remove generated attributes that aren't needed no more..\r\n for genattr in midx.generatedAttributes.keys():\r\n # If this is a collided attribute, then remove it from the tracking\r\n # NOTE: This will work for N model collisions of M attributes\r\n CAT = self.__collidedAttributeTracker\r\n if( CAT.has_key( genattr ) ): \r\n if( CAT[ genattr ] == 1 ): del CAT[ genattr ]\r\n else: CAT[ genattr ] -= 1\r\n else: \r\n # Safely destroy this attribute, NO ONE, uses it now\r\n del self.generatedAttributes[genattr]\r\n \r\n return self", "def merge_quantities(self, first, second):\n dom = self.get_canonical(first)\n add = self.get_canonical(second)\n self._qm.merge(dom, add)\n self.import_cfs(second)", "def merge_two_personroot_nodes(left_personroot_node: Node, right_personroot_node: Node) -> None:\n global _graph\n\n if left_personroot_node is None or right_personroot_node is None:\n print('merge_two_personroot_nodes(): Error: (one of the) nodes is None.')\n return\n\n if left_personroot_node['name'] != 'person-root' \\\n or right_personroot_node['name'] != 'person-root':\n print('merge_two_personroot_nodes(): not anticipated: (one of the) nodes '\n + 'are not \"person-root\".')\n return\n\n if left_personroot_node == right_personroot_node:\n # They are already connected, we are done.\n return\n\n # There are two possible reasons why it can happen that two person-root nodes\n # of two nodes to insert are different:\n # (1) It can happen e.g. in case a personal ID (ISNI, ORCID, etc.) is assigned\n # to two or more different persons.\n # Of course, that should not happen. Most probably this in a typo in a source system.\n # (2) The two nodes refer to the same person, but originate from different source\n # systems.\n # E.g. harvest of system 1 results in ORCID and ISNI of the same person, which have a\n # common person-root. Harvest of system 2 results in EMAIL with another person-root.\n # Now a subsequent harvest results in ORCID and EMAIL of the same person. Then there\n # are two different person-roots which need to be merged.\n # Both can happen, but we cannot know if it is either (1) or (2).\n\n now = datetime.now()\n timestamp = now.strftime('%Y%m%d-%H%M%S')\n count = 0\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'Merged person-root node \"'\n what_happened += right_personroot_node['_key'] + '\" to this person-root node '\n what_happened += 'and then deleted it. This was the history of the deleted node:'\n left_personroot_node['_history'].append(what_happened)\n for history in right_personroot_node['_history']:\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += history\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'End of history of the deleted node.'\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'These were the neighbors of the deleted node, '\n what_happened += 'now merged with the neighbors of this node:'\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n for edge_from_right_node in get_edges(right_personroot_node):\n right_node = edge_from_right_node.end_node\n if right_node is None:\n continue\n if right_node == right_personroot_node:\n continue\n\n what_happened += '\"' + str(right_node['_key']) + '\" '\n edge_delete1 = LINKS_TO(right_personroot_node, right_node)\n edge_delete2 = LINKS_TO(right_node, right_personroot_node)\n edge_create1 = LINKS_TO(left_personroot_node, right_node)\n edge_create2 = LINKS_TO(right_node, left_personroot_node)\n # _graph.delete() also deletes 'right_personroot_node'.\n # TODO: There seems to be a bug here. It does not only delete 'right_personroot_node', but sometimes it also\n # deletes other nodes which have more than one edge, such as an 'organization' node connected to multiple\n # person-root nodes (including right_personroot_node).\n # The problem is that _graph.separate() does not seem to work, which seems to be the 'best' function\n # since it only deletes edges. Use with caution (or don't use).\n _graph.delete(edge_delete1)\n _graph.delete(edge_delete2)\n _graph.merge(edge_create1 | edge_create2, 'RCGNode', '_key')\n\n what_happened += '.'\n left_personroot_node['_history'].append(what_happened)\n\n count += 1\n what_happened = timestamp + '-' + format(count, '02d') + ': '\n what_happened += 'End of list of neighbors of the deleted node.'\n left_personroot_node['_history'].append(what_happened)\n _graph.push(left_personroot_node)\n return", "def union(self):\n nfa2 = self.aut_stack.pop()\n nfa1 = self.aut_stack.pop()\n\n nfa1_star = nfa1.transform('X')\n nfa2_star = nfa2.transform('Y')\n\n nfa_union = Automaton()\n nfa_union.states = list(set(nfa1_star.states).union(nfa2_star.states))\n nfa_union.states.append('S')\n nfa_union.alphabet = list(set(nfa1_star.alphabet).union(nfa2_star.alphabet))\n nfa_union.final = list(set(nfa1_star.final).union(nfa2_star.final))\n nfa_union.change_start_state('S')\n nfa_union.transition = dict(nfa1_star.transition, **nfa2_star.transition)\n nfa_union.transition['S, .'] = [nfa1_star.q_0, nfa2_star.q_0]\n\n self.aut_stack.append(nfa_union)", "def union(llist_1 : LinkedList, llist_2 : LinkedList) -> LinkedList:\n # Convert to set to remove repeated entries in each list\n lset_1 = list_to_set(llist_1)\n lset_2 = list_to_set(llist_2)\n \n # Combine the two sets to create a union\n union_list = LinkedList()\n list_of_added = []\n for item in lset_1:\n union_list.append(item)\n list_of_added.append(item)\n\n for item in lset_2:\n if item not in list_of_added:\n union_list.append(item)\n\n return union_list", "def merge(*args):\n return _libsbml.Unit_merge(*args)", "def get_sharp_relations_for_sets(follows, set_1, set_2):\n for item_1 in set_1:\n for item_2 in set_2:\n if not get_sharp_relation(follows, item_1, item_2):\n return False\n return True", "def side_renaming(network1, network2):\n\n # There is probably faster way to perform this, optimize later if needed\n for i in range(len(network1.nodes)):\n \n if (network1.nodes[i][\"group\"] == \"#fcae91FF\"):\n network1.nodes[i][\"T1\"] = \"0\"\n\n elif (network1.nodes[i][\"group\"] == \"#7828a0FF\"):\n network1.nodes[i][\"T1\"] = \"1\"\n \n else:\n print(\"Error with group encoding!\")\n \n \n for i in range(len(network2.nodes)):\n \n if (network2.nodes[i][\"group\"] == \"#fcae91FF\"):\n network2.nodes[i][\"T2\"] = \"0\"\n \n elif (network2.nodes[i][\"group\"] == \"#7828a0FF\"):\n network2.nodes[i][\"T2\"] = \"1\"\n \n else:\n print(\"This should not be printed! Error with group encoding!\")\n\n return network1, network2", "def mergeSpinSystems(spinSystemB, spinSystemA):\n\n if spinSystemB is spinSystemA:\n return spinSystemA\n \n if spinSystemB.isDeleted:\n return spinSystemA\n \n if spinSystemA.isDeleted:\n return spinSystemB\n \n residueA = spinSystemA.residue\n if not residueA:\n spinSystemA.setResidue(spinSystemB.residue)\n spinSystemA.setCcpCode(spinSystemB.ccpCode)\n spinSystemA.setMolType(spinSystemB.molType)\n \n if not spinSystemA.ccpCode:\n if residueA:\n spinSystemA.setCcpCode(residueA.ccpCode)\n else:\n spinSystemA.setCcpCode(spinSystemB.ccpCode)\n \n if not spinSystemA.molType:\n if residueA:\n spinSystemA.setMolType(residueA.molResidue.molType)\n else:\n spinSystemA.setMolType(spinSystemB.molType)\n\n\n resonanceAssignments = {}\n for resonance in spinSystemA.resonances:\n resonanceSet = resonance.resonanceSet\n \n if resonanceSet:\n atomSets = list(resonanceSet.atomSets)\n atomSets.sort()\n resonanceAssignments[tuple(atomSets)] = resonance\n \n \n mergeList = []\n for resonance in spinSystemB.resonances:\n \n removeSpinSystemResonance(spinSystemB, resonance)\n addSpinSystemResonance(spinSystemA, resonance)\n \n resonanceSet = resonance.resonanceSet\n \n if resonanceSet:\n atomSets = list(resonanceSet.atomSets)\n atomSets.sort()\n resonanceA = resonanceAssignments.get(tuple(atomSets))\n \n if resonanceA:\n n = len(atomSets)\n if n == 1:\n mergeList.append((resonance, resonanceA))\n #elif len(resonanceSet.resonances) == n : # prochiral\n \n for residueProb in list(spinSystemB.residueProbs):\n residue = residueProb.possibility\n \n if not spinSystemA.findFirstResidueProb(possibility=residue):\n spinSystemA.newResidueProb(possibility=residue,\n weight=residueProb.weight)\n \n if mergeList:\n residue = spinSystemA.residue\n if residue:\n name = '%d%s' % (residue.seqCode,residue.ccpCode)\n else:\n name = '%s{%d}' % (spinSystemA.ccpCode or '',spinSystemA.serial)\n \n getName = makeResonanceGuiName\n resonanceText = ','.join([getName(r1,fullName=False) for r1,r2 in mergeList])\n \n msg = 'Merge duplicate %s resonances in spin system %s' % (resonanceText, name)\n if showYesNo('Query',msg): \n for r1, r2 in mergeList:\n mergeResonances(r1,r2) \n\n for link in spinSystemB.findAllResonanceGroupProbs(linkType='sequential',isSelected=True):\n makeSeqSpinSystemLink(spinSystemA, link.possibility, delta=link.sequenceOffset)\n\n for link in spinSystemB.findAllFromResonanceGroups(linkType='sequential',isSelected=True):\n makeSeqSpinSystemLink(link.fromResonanceGroup, spinSystemA, delta=link.sequenceOffset)\n \n if not spinSystemB.isDeleted:\n # Could be already deleted due to recursive merge\n spinSystemB.delete()\n\n return spinSystemA", "def union(llist_1, llist_2):\n union_set = set()\n return_linked_list = LinkedList()\n node = llist_1.get_head()\n while node:\n union_set.add(node.get_value())\n node = node.get_next()\n node = llist_2.get_head()\n while node:\n union_set.add(node.get_value())\n node = node.get_next()\n for item in union_set:\n return_linked_list.append(item)\n if return_linked_list.size() == 0:\n return 'No unions found'\n return return_linked_list", "def variant_add(v1: dict, v2: dict) -> Dict[str, Any]:\n left = set(v1.keys()).difference(v2.keys())\n right = set(v2.keys()).difference(v1.keys())\n joint = set(v1.keys()) & set(v2.keys())\n\n # deal with __migrator: ordering\n if \"__migrator\" in v2:\n ordering = v2[\"__migrator\"].get(\"ordering\", {})\n operation = v2[\"__migrator\"].get(\"operation\")\n # handle special operations\n if operation:\n return VARIANT_OP[operation](v1, v2)\n else:\n ordering = {}\n\n # special keys\n if \"__migrator\" in right:\n right.remove(\"__migrator\")\n\n # special keys in joint\n special_variants = {}\n if \"pin_run_as_build\" in joint:\n # For run_as_build we enforce the migrator's pin\n # TODO: should this just be a normal ordering merge, favoring more exact pins?\n joint.remove(\"pin_run_as_build\")\n special_variants[\"pin_run_as_build\"] = {\n **v1[\"pin_run_as_build\"],\n **v2[\"pin_run_as_build\"],\n }\n\n if \"zip_keys\" in joint:\n # zip_keys is a bit weird to join on as we don't have a particularly good way of identifying\n # a block. Longer term having these be named blocks would make life WAY simpler\n # That does require changes to conda-build itself though\n #\n # A zip_keys block is deemed mergeable if zkₛ,ᵢ ⊂ zkₘ,ᵢ\n zk_out = []\n zk_l = {frozenset(e) for e in v1[\"zip_keys\"]}\n zk_r = {frozenset(e) for e in v2[\"zip_keys\"]}\n\n for zk_r_i in sorted(zk_r, key=lambda x: -len(x)):\n for zk_l_i in sorted(zk_l, key=lambda x: -len(x)):\n # Merge the longest common zk first\n if zk_l_i.issubset(zk_r_i):\n zk_l.remove(zk_l_i)\n zk_r.remove(zk_r_i)\n zk_out.append(zk_r_i)\n break\n else:\n # Nothing to do\n pass\n\n zk_out.extend(zk_l)\n zk_out.extend(zk_r)\n zk_out = sorted(\n [sorted(zk) for zk in zk_out], key=lambda x: (len(x), str(x))\n )\n\n joint.remove(\"zip_keys\")\n special_variants[\"zip_keys\"] = zk_out\n\n joint_variant = {}\n for k in joint:\n v_left, v_right = ensure_list(v1[k]), ensure_list(v2[k])\n joint_variant[k] = variant_key_add(\n k, v_left, v_right, ordering=ordering.get(k, None)\n )\n\n out = {\n **toolz.keyfilter(lambda k: k in left, v1),\n **toolz.keyfilter(lambda k: k in right, v2),\n **joint_variant,\n **special_variants,\n }\n\n return out", "def merge_nodes(self):\n\n\t\t\t#obtenemos los dos primeros nodos que equivalen a quienes tienen menor frecuencia\n\t\t\twhile(len(self.heap)>1):\n\t\t\t\tnode1 = heapq.heappop(self.heap)\n\t\t\t\tnode2 = heapq.heappop(self.heap)\n\n\t\t\t\tmerged = self.HeapNode(None, node1.freq + node2.freq)#creamos un nodo padre que va a contener los nodos anteriores a la derecha y izquierda\n\t\t\t\tmerged.left = node1\n\t\t\t\tmerged.right = node2\n\n\t\t\t\theapq.heappush(self.heap, merged)#agregamos este nodo al priority queue", "def headsofunion(h1, h2):\n res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)\n return {ctx.node() for ctx in res}", "def union(self, data1, data2):\n root1 = self.find_set(data1)\n root2 = self.find_set(data2)\n\n if root1 == root2:\n return False\n\n elif root1.rank >= root2.rank:\n if root1.rank == root2.rank:\n root1.rank = root1.rank + 1\n root2.parent = root1\n else:\n root1.parent = root2\n\n return True", "def union(self, data1, data2):\n root1 = self.find_set(data1)\n root2 = self.find_set(data2)\n\n if root1 == root2:\n return False\n\n elif root1.rank >= root2.rank:\n if root1.rank == root2.rank:\n root1.rank = root1.rank + 1\n root2.parent = root1\n else:\n root1.parent = root2\n\n return True", "def union(\n self,\n other,\n node_mapping,\n check_shared_equality=True,\n add_populations=True,\n record_provenance=True,\n ):\n tables = self.dump_tables()\n other_tables = other.dump_tables()\n tables.union(\n other_tables,\n node_mapping,\n check_shared_equality=check_shared_equality,\n add_populations=add_populations,\n record_provenance=record_provenance,\n )\n return tables.tree_sequence()", "def join_nodes_in_both_trees(tree1, nodeAinT1, cladeA,\n tree2, nodeBinT2, cladeB, test=False):\n cladeA = set(cladeA)\n cladeB = set(cladeB)\n leaves1 = get_leaf_set(tree1)\n leaves2 = get_leaf_set(tree2)\n\n cladeAisT1 = leaves1 == cladeA\n cladeBisT2 = leaves2 == cladeB\n\n # Handle adding all of tree1 into tree 2 and vice versa!!\n if cladeAisT1 and cladeBisT2:\n # Done\n print(\"Nodes are tree1 and tree2...\")\n if test:\n return [None, None]\n root = dendropy.Node()\n root.add_child(nodeAinT1)\n root.add_child(nodeBinT2)\n tree1 = dendropy.Tree(seed_node=root)\n tree1.is_rooted = True\n tree2 = None\n elif cladeAisT1:\n # Add all of tree 1 into tree 2\n print(\"Add all of tree 1 into tree 2\")\n if test:\n return [None, None]\n [tree2, nodeBinT2] = extract_nodes_from_split(tree2, nodeBinT2,\n cladeB)\n root = dendropy.Node()\n root.add_child(nodeAinT1)\n root.add_child(tree2.seed_node)\n tree1 = dendropy.Tree(seed_node=root)\n tree1.is_rooted = True\n tree2 = None\n elif cladeBisT2:\n # Add all of tree 2 into tree 1\n print(\"Add all of tree 2 into tree 1\")\n if test:\n return [None, None]\n [tree1, nodeAinT1] = extract_nodes_from_split(tree1, nodeAinT1,\n cladeA)\n root = dendropy.Node()\n root.add_child(tree1.seed_node)\n root.add_child(nodeBinT2)\n tree1 = dendropy.Tree(seed_node=root)\n tree1.is_rooted = True\n tree2 = None\n else:\n # Make the join!\n print(\"Making join...\")\n [tree1, nodeAinT1] = extract_nodes_from_split(tree1, nodeAinT1,\n cladeA)\n [tree2, nodeBinT2] = extract_nodes_from_split(tree2, nodeBinT2,\n cladeB)\n\n root1 = dendropy.Node()\n root1.add_child(tree1.seed_node)\n root1.add_child(deepcopy(nodeBinT2)) # TODO: Remove deep copies!\n tree1 = dendropy.Tree(seed_node=root1)\n tree1.is_rooted = True\n\n root2 = dendropy.Node()\n root2.add_child(tree2.seed_node)\n root2.add_child(deepcopy(nodeAinT1)) # TODO: Remove deep copies!\n tree2 = dendropy.Tree(seed_node=root2)\n tree2.is_rooted = True\n\n return [tree1, tree2]", "def merge(lists):\n newsets, sets = [set(lst) for lst in lists if lst], []\n while len(sets) != len(newsets):\n sets, newsets = newsets, []\n for aset in sets:\n for eachset in newsets:\n if not aset.isdisjoint(eachset):\n eachset.update(aset)\n break\n else:\n newsets.append(aset)\n return newsets", "def merge(self, other):\n self.isotxsMetadata = self.isotxsMetadata.merge(\n other.isotxsMetadata, self, other, \"ISOTXS\", AttributeError\n )\n self.gamisoMetadata = self.gamisoMetadata.merge(\n other.gamisoMetadata, self, other, \"GAMISO\", AttributeError\n )\n self.pmatrxMetadata = self.pmatrxMetadata.merge(\n other.pmatrxMetadata, self, other, \"PMATRX\", AttributeError\n )\n self.micros.merge(other.micros)\n self.gammaXS.merge(other.gammaXS)\n self.neutronHeating = _mergeAttributes(self, other, \"neutronHeating\")\n self.neutronDamage = _mergeAttributes(self, other, \"neutronDamage\")\n self.gammaHeating = _mergeAttributes(self, other, \"gammaHeating\")\n self.isotropicProduction = _mergeAttributes(self, other, \"isotropicProduction\")\n self.linearAnisotropicProduction = _mergeAttributes(\n self, other, \"linearAnisotropicProduction\"\n )\n # this is lazy, but should work, because the n-order wouldn't be set without the others being set first.\n self.nOrderProductionMatrix = (\n self.nOrderProductionMatrix or other.nOrderProductionMatrix\n )", "def merge_yamls(yaml1, yaml2):\n updated_tools_yaml = copy.deepcopy(yaml1)\n\n # unique_tools = [dict(y) for y in set(tuple(x.items())\n # for x in yaml1['tools'])]\n\n # copy base and updated tools entries (except the revisions list of each)\n for tool_entry in yaml2['tools']:\n if tool_entry not in yaml1['tools']:\n updated_tools_yaml['tools'].append(tool_entry)\n\n return updated_tools_yaml", "def joint_graph(graph, nodes):\n # TODO\n joint_graph = nodes = None\n\n return joint_graph, nodes", "def merge(self, rhs):\n if self.forwardLabel == rhs.forwardLabel and \\\n self.reverseLabel == rhs.reverseLabel:\n return Relation(self.forwardLabel, self.reverseLabel).fromSequence(\n itertools.chain(self.iteritems(), rhs.iteritems())\n )\n else:\n return Exception, \"Cannot merge relations with different labels\"", "def union(node1, node2):\n node1_root = find(node1)\n node2_root = find(node2)\n if node1_root == node2_root:\n return\n if node1_root.rank < node2_root.rank:\n node1_root.parent = node2_root\n elif node2_root.rank > node2_root.rank:\n node2_root.parent = node1_root\n else:\n node2_root.parent = node1_root\n node1_root.rank = node1_root.rank + 1", "def synSimilarity(self, wSet1, wSet2): \n nW1 = len(wSet1)\n nW2 = len(wSet2)\n if nW1 == 0 or nW2 == 0:\n return 0.0\n synonyms1 = self.getSynonyms(wSet1)\n synonyms2 = self.getSynonyms(wSet2)\n \n # easy bit: find the number of identical words in each mention\n intersection = wSet1.intersection(wSet2)\n # now remove these words and look for synonyms between those left\n w1 = wSet1 - intersection\n w2 = wSet2 - intersection\n while len(w1) > 0:\n word1 = w1.pop()\n if word1 not in synonyms1:\n continue # no synonyms for this word\n \n for word2 in w2:\n if word2 not in synonyms2:\n continue # no synonyms for this word\n sharedSynsets = synonyms1[word1].intersection(synonyms2[word2])\n if len(sharedSynsets) > 0:\n # the two have at least one synset in common, consider them synonyms\n w2.remove(word2)\n intersection.add(word1)\n \n break\n return float(2*len(intersection)) / (nW1 + nW2)", "def merge_nffgs (cls, target, new, log=logging.getLogger(\"UNION\")):\n # Copy Infras\n target = cls._copy_node_type_with_flowrules(new.infras, target, log)\n # Copy NFs\n target = cls._copy_node_type(new.nfs, target, log)\n # Copy SAPs\n target = cls._copy_node_type(new.saps, target, log)\n\n # Copy remaining links which should be valid\n for u, v, link in new.network.edges_iter(data=True):\n if not target.network.has_edge(u, v, key=link.id):\n src_port = target.network.node[u].ports[link.src.id]\n dst_port = target.network.node[v].ports[link.dst.id]\n c_link = deepcopy(link)\n c_link.src = src_port\n c_link.dst = dst_port\n target.add_link(src_port=src_port, dst_port=dst_port, link=c_link)\n log.debug(\"Copy Link: %s\" % c_link)\n return target", "def singa_to_onnx_graph(cls, inputs, y, model_name=\"sonnx\"):\n assert len(\n y\n ) == 1, \"Not support multiple output now.\" # assume there is only one output\n y = y[0]\n\n graph_def = GraphProto()\n graph_def.name = model_name\n topol, ws, ins = utils.post_order_recursive(y.creator, y)\n\n # prepare the input\n X = []\n for op_name, op_t in ins.items():\n op_t = inputs.pop(0)\n dtype = TensorProto.INT32 if op_t.dtype == tensor.int32 else TensorProto.FLOAT\n X.append(helper.make_tensor_value_info(op_name, dtype, op_t.shape))\n\n # prepare the output\n y_optype = cls._get_singa_op_type(y.creator)\n if y_optype in cls._bool_operators:\n y_dtype = cls._bool_operators[y_optype]\n elif y.dtype == tensor.int32:\n y_dtype = TensorProto.INT32\n else:\n y_dtype = TensorProto.FLOAT\n Y = [helper.make_tensor_value_info(y.name, y_dtype, y.shape)]\n\n # prepare the weight\n W = []\n for op_name, op_t in ws.items():\n dtype = TensorProto.INT32 if op_t.dtype == tensor.int32 else TensorProto.FLOAT\n wt = tensor.to_numpy(op_t)\n wt = numpy_helper.from_array(wt)\n wt.name = op_name\n W.append(wt)\n X.append(helper.make_tensor_value_info(op_name, dtype, op_t.shape))\n\n # iterate the node graph\n for op_name, op in topol.items():\n optype = cls._get_singa_op_type(op)\n if optype in cls._unhandled_operators:\n cls.handle_special_ops(op, X, W)\n graph_def.node.extend(cls.singa_op_to_onnx_node(op, op_t))\n\n graph_def.input.extend(X)\n graph_def.output.extend(Y)\n graph_def.initializer.extend(W)\n return graph_def", "def merge_schema(first, second):\n if not (type(first) == type(second) == dict):\n raise ValueError(\"Argument is not a schema\")\n\n if not (first.get('type') == second.get('type') == 'object'):\n raise NotImplementedError(\"Unsupported root type\")\n\n return merge_objects(first, second)", "def join_nodes(trees, leaves, maps, nodeA, nodeB):\n cladeA = get_leaf_set(nodeA)\n cladeB = get_leaf_set(nodeB)\n\n if len(cladeA.intersection(cladeB)) > 0:\n raise Exception(\"Nodes are not disjoint on their leaf sets!\\n\")\n\n edits = [False] * len(trees)\n for i, edit in enumerate(edits):\n leaf = leaves[i]\n\n if cladeA == leaf:\n nodeAinT = nodeA\n else:\n nodeAinT = get_node_from_clade(trees[i], maps[i], cladeA)\n\n if cladeB == leaf:\n nodeBinT = nodeB\n else:\n nodeBinT = get_node_from_clade(trees[i], maps[i], cladeB)\n\n nAinT = nodeAinT is not None\n nBinT = nodeBinT is not None\n\n if nAinT and nBinT:\n # Node A and node B are both in T, do nothing!\n pass\n elif nAinT:\n # Add node B to T\n edits[i] = True\n root = dendropy.Node()\n root.add_child(deepcopy(nodeB)) # TODO: Remove deep copies!\n if leaves[i] == cladeA:\n nodeAinT.parent_node = None\n root.add_child(nodeAinT)\n else:\n [tree, nodeAinT] = extract_nodes_from_split(trees[i], nodeAinT,\n cladeA)\n root.add_child(tree.seed_node)\n trees[i] = dendropy.Tree(seed_node=root)\n trees[i].is_rooted = True\n elif nBinT:\n # Add node A to T\n edits[i] = True\n root = dendropy.Node()\n root.add_child(deepcopy(nodeA)) # TODO: Remove deep copies!\n if leaves[i] == cladeB:\n nodeBinT.parent_node = None\n root.add_child(nodeBinT)\n else:\n [tree, nodeBinT] = extract_nodes_from_split(trees[i], nodeBinT,\n cladeB)\n root.add_child(tree.seed_node)\n trees[i] = dendropy.Tree(seed_node=root)\n trees[i].is_rooted = True\n else:\n # Neither node A or node B is in T, do nothing!\n pass\n\n return [trees, edits]", "def union(self, other: \"CFG\") -> \"CFG\":\n start_temp = Variable(\"#STARTUNION#\")\n temp_0 = Terminal(\"#0UNION#\")\n temp_1 = Terminal(\"#1UNION#\")\n production_0 = Production(start_temp, [temp_0])\n production_1 = Production(start_temp, [temp_1])\n cfg_temp = CFG({start_temp},\n {temp_0, temp_1},\n start_temp,\n {production_0, production_1})\n return cfg_temp.substitute({temp_0: self,\n temp_1: other})", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def direct_network(self):\n #print list(self.get_subgraphs())\n graphs = [self._depth_first_directed(g) for g in self.get_subgraphs()]\n self._network = reduce(lambda a, b: nx.union(a, b), graphs)", "def _merge_descriptors(\n desc1: TextendsDescriptor, desc2: Descriptor\n) -> TextendsDescriptor:\n if desc2 is None:\n return desc1\n for k2, v2 in desc2.items():\n if k2 in desc1.skip_merging:\n continue\n if k2 not in desc1:\n desc1[k2] = v2\n else:\n if isinstance(v2, Descriptor):\n desc1[k2].merge(v2)\n elif isinstance(v2, list):\n desc1[k2] = _merge_lists(desc1[k2], v2)\n\n return desc1", "def merge(self, other):\n\n assert self.ins_addr == other.ins_addr\n assert self.type == other.type\n\n o = self.copy()\n o.targets |= other.targets\n\n return o", "def merge(self, sets):\n merge = None\n for _, item in enumerate([item['data'] for item in sets]):\n if merge is None:\n merge = item\n else:\n Logger().info(\n 'Partition size = {0}'.format(\n PartitionRunner.merge_size(merge, item, how='outer', group_by=self._unique_columns)\n )\n )\n\n merge = merge.merge(\n item,\n on=self._unique_columns,\n how='outer'\n )\n return merge", "def merge_data(self, nodenet_data, keep_uids=False):\n\n uidmap = {}\n # for dict_engine compatibility\n uidmap[\"Root\"] = \"s1\"\n\n # re-use the root nodespace\n uidmap[\"s1\"] = \"s1\"\n\n # merge in spaces, make sure that parent nodespaces exist before children are initialized\n nodespaces_to_merge = set(nodenet_data.get('nodespaces', {}).keys())\n for nodespace in nodespaces_to_merge:\n self.merge_nodespace_data(nodespace, nodenet_data['nodespaces'], uidmap, keep_uids)\n\n # merge in nodes\n for uid in nodenet_data.get('nodes', {}):\n data = nodenet_data['nodes'][uid]\n parent_uid = data['parent_nodespace']\n if not keep_uids:\n parent_uid = uidmap[data['parent_nodespace']]\n if data['type'] in self.__nodetypes or data['type'] in self.native_modules:\n olduid = None\n if keep_uids:\n olduid = uid\n new_uid = self.create_node(\n data['type'],\n parent_uid,\n data['position'],\n name=data['name'],\n uid=olduid,\n parameters=data['parameters'],\n gate_parameters=data['gate_parameters'],\n gate_functions=data['gate_functions'])\n uidmap[uid] = new_uid\n node_proxy = self.get_node(new_uid)\n for gatetype in data['gate_activations']: # todo: implement sheaves\n node_proxy.get_gate(gatetype).activation = data['gate_activations'][gatetype]['default']['activation']\n\n else:\n warnings.warn(\"Invalid nodetype %s for node %s\" % (data['type'], uid))\n\n # merge in links\n for linkid in nodenet_data.get('links', {}):\n data = nodenet_data['links'][linkid]\n self.create_link(\n uidmap[data['source_node_uid']],\n data['source_gate_name'],\n uidmap[data['target_node_uid']],\n data['target_slot_name'],\n data['weight']\n )\n\n for monitorid in nodenet_data.get('monitors', {}):\n data = nodenet_data['monitors'][monitorid]\n if 'node_uid' in data:\n old_node_uid = data['node_uid']\n if old_node_uid in uidmap:\n data['node_uid'] = uidmap[old_node_uid]\n if 'classname' in data:\n if hasattr(monitor, data['classname']):\n getattr(monitor, data['classname'])(self, **data)\n else:\n self.logger.warn('unknown classname for monitor: %s (uid:%s) ' % (data['classname'], monitorid))\n else:\n # Compatibility mode\n monitor.NodeMonitor(self, name=data['node_name'], **data)", "def ExtendGraph(self, vtkMutableGraphHelper, vtkGraph):\n ...", "def merge(left: Node, right: Node) -> Node:\n if (not left) or (not right): #如果至少有一个是 None , 返回另一个\n return left or right\n elif left.prior < right.prior:\n # print(\"left\") # @Haor: 没有用到?\n left.r = merge(left.r, right)\n return left\n else: #以右为头结点, 将左树与右的左孩子重做结合\n \"\"\"\n Right will be root because it has more priority\n Now we need to merge left tree and right's left son\n \"\"\"\n right.l = merge(left, right.l)\n return right" ]
[ "0.6905344", "0.6667315", "0.66233647", "0.654952", "0.6426134", "0.6303256", "0.63004285", "0.6278962", "0.6188941", "0.6159831", "0.61385214", "0.60857415", "0.60827035", "0.59966093", "0.5975047", "0.58856934", "0.5844211", "0.58405745", "0.58178586", "0.57996076", "0.579917", "0.5797398", "0.57669127", "0.5750537", "0.57353175", "0.57105595", "0.57101643", "0.56960154", "0.5684606", "0.5668889", "0.56456095", "0.55896425", "0.55868024", "0.5575324", "0.55748755", "0.5565703", "0.5540921", "0.55389464", "0.5531023", "0.5514452", "0.5504022", "0.5497828", "0.5485809", "0.54730785", "0.54616284", "0.5442377", "0.54387563", "0.5437458", "0.5415573", "0.5398264", "0.53957796", "0.53945243", "0.5393571", "0.5391018", "0.5381774", "0.5373493", "0.53481907", "0.53427184", "0.5321609", "0.5321054", "0.5315131", "0.529739", "0.52844065", "0.52726364", "0.5255691", "0.52490795", "0.524622", "0.5237417", "0.5220411", "0.52187806", "0.5217458", "0.5212396", "0.52087307", "0.5192254", "0.5188274", "0.5184506", "0.5182209", "0.5182209", "0.517799", "0.5167075", "0.51655227", "0.5158863", "0.51506823", "0.51461124", "0.5141678", "0.51407856", "0.51387167", "0.5128987", "0.51046294", "0.509667", "0.5087774", "0.50772023", "0.5068958", "0.5068866", "0.50641316", "0.5058394", "0.50493646", "0.5046557", "0.5036435", "0.50345623" ]
0.77939695
0
Downloiad the page at given URL
def get_page(self, url): """ @param url: Url we want to crawl""" """ @type url: String """ """@return the page""" try: u = urlopen(url) html = u.read().decode('utf-8') # except Exception as e: # logging.exception(e) finally: print("Closing") u.close() return html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect(url):", "def goto(self, page: str):\n self.get(urllib.parse.urljoin(settings.config.host, page))", "def __open_page(self, url):\n try:\n # Opens the url\n page = request.urlopen(url)\n except Exception as e:\n print(e, url)\n return ''\n else:\n # Avoid that None will be returned to that, try to open the web page again.\n return page if page is not None else self.__open_page(url)", "def spidy_2(url):\r\n\tbr = Browser()\r\n\t# browser basic setup (for simulate a real web browser)<----todo para simular un navegador :D\r\n\tbr.set_handle_equiv(True) # cuando tratar HTML http-equiv headers como HTTP headers\r\n\tbr.set_handle_redirect(True) # para los redirect loops\r\n\tbr.set_handle_referer(True) # para annadir un referer al objeto request\r\n\tbr.set_handle_robots(False) # ignorar robots.txt\r\n\tbr.set_debug_http(False) # bueno para la fase de development\r\n\tbr.set_debug_responses(False) # mas debuggeo\r\n\tbr.set_debug_redirects(False) # mas aun\r\n\tbr.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time = 1) # puede usarse: br.set_handle_refresh(False)\r\n\t# para simular Firefox desde Fedora :)\r\n\tbr.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]\r\n\r\n\t#x=['php?id=','php?ID=','php?decl_id=','pageid=','staff_id=','php?category=','php?'] #expresiones regulares, las mas comunes\r\n\tbr.open(url)\r\n\tfor link in br.links():\r\n\t\tif (re.search('php.id=|php.category=|php.idcategoria=|pageid=|php.ID=|php.decl_id|staff_id=',link.url)):# el . significa cualquier caracter el | es OR\r\n\t\t\tprint link.url", "def spidy_1(url):\r\n\tbr = Browser()\r\n\t# browser basic setup (for simulate a real web browser)<----todo para simular un navegador :D\r\n\tbr.set_handle_equiv(True) # cuando tratar HTML http-equiv headers como HTTP headers\r\n\tbr.set_handle_redirect(True) # para los redirect loops\r\n\tbr.set_handle_referer(True) # para annadir un referer al objeto request\r\n\tbr.set_handle_robots(False) # ignorar robots.txt\r\n\tbr.set_debug_http(False) # bueno para la fase de development\r\n\tbr.set_debug_responses(False) # mas debuggeo\r\n\tbr.set_debug_redirects(False) # mas aun\r\n\tbr.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time = 1) # puede usarse: br.set_handle_refresh(False)\r\n\t# para simular Firefox desde Fedora :)\r\n\tbr.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')]\r\n\tbr.open(url)\r\n\tfor link in br.links():\r\n\t\tprint link.text,link.url", "def _visit_pages(self, seed_url):\n\n # for single_url in seed_url:\n # update_sql = \" UPDATE fetch_list SET times = times+1 WHERE url = '{}'and source_id =17\".format(\n # single_url[0])\n # Dao.execute_dmls(update_sql)\n # self._base_url = single_url[0]\n # self._now_url = single_url[0]\n # html = self.get_page_content_str(single_url[0])\n # try:\n # self._extract_data(html)\n # except Exception as e:\n # print(e)\n # update_sql = \" UPDATE fetch_list SET status = 1 WHERE url = '{}'and source_id =17\".format(\n # single_url[0])\n # Dao.execute_dmls(update_sql)\n\n # 单个url\n # html = self.get_page_content_str(self._seed_url[0]) #用数据库的时候\n seed_url = self._root_url + seed_url[seed_url.rindex(\"?\"):]\n html = self.get_page_content_str(seed_url) #单个URL\n self.findEachBuilding(html)\n # b = set(self._resualt)\n # self._resualt=[i for i in b]\n # # dao=Dao()\n # insert_sql=\"\"\n # for res1 in b :\n # insert_sql = \"INSERT INTO merchant_tmp (description,url )VALUES ( '{}', 'http://www.youlin.me/category/407')\".format(res1)\n # print( insert_sql )\n # dao = Dao()\n # dao.execute_dmls(insert_sql)", "def view(self, url):\r\n abort(404)", "def view(self, url):\n abort(404)", "def scrap_site(link):\n pass # Scrapy or BeautifulSoup", "def nav(self, url):\n\n self.history.append(url)\n\n fu = urlparse.urlparse(url)\n scheme = fu.scheme\n u = fu.path\n\n self.changeCurrentModule(scheme, u)\n\n html = self.modh.get(u)\n\n if type(html) == str:\n s = self.tr(html)\n else:\n s = html\n self.setHtml(s)", "def load_page(url):\n try:\n url = 'https://en.wikipedia.org'+url\n html = urlopen(url)\n bs = BeautifulSoup(html.read(),'html.parser')\n except:\n #if page not exists or page not found\n return None \n return bs", "def load_page(url):\n parameters = {'User-Agent': \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) \\\n Chrome/69.0.3497.100 Safari/537.36\"}\n response = requests.get(url, params=parameters)\n\n # Abort if server is responding with error\n if not response.status_code == 200:\n print(\"Server stopped responding. Execution aborted.\")\n sys.exit(1)\n\n content = response.content.decode(response.encoding)\n\n # Save page to a file for debugging\n # with open(self.lastpage_path, 'w') as output_file:\n # output_file.write(content)\n\n return content", "def download_page(name=None, url=None):\n if name and url:\n timestamp = construct_date()\n filename = name + '_' + timestamp + '.html'\n os.system('wget ' + url + ' -O ' + os.path.join('..', 'html', filename))\n with open(os.path.join('..', 'html', filename), 'rb') as f:\n page = f.read()\n print('done with page {}'.format(url))\n return page", "def get(self, url):\n\t\ttry:\n\t\t\tassert(type(url)) == str\n\t\t\tself.driver.get(url)\n\t\t\t# sleep(1) # Even tho driver.get is blocking, it returns as soon as DOM loads, without waiting for JS to run and update the DOM with the new elements\n\t\t\t# wait(self.driver, 10).until( EC.visibility_of_element_located() ) # Not sure how to wait here efficiently\n\t\t\tsleep(5) # A little long, but without a conditional variable to tell us when the page is ready us when to go our only choice is to nap\n\t\t\tself.bsource = bs( self.viewSource(), \"lxml\" ) # Update internal BeautifulSoup source with new javascript-encriched code (\"lxml\" is faster that \"html.parser\")\n\t\texcept Exception as e:\n\t\t\tprint(\"[*] Unable to GET page {}\\n{}\".format(url, e))\n\t\t\treturn -1", "def load_website(self):\n# r = urllib.request.urlopen(self.url).read()\n r = requests.get(self.url).content \n self.soup = bs(r, \"lxml\")", "def download_page(url):\n try:\n headers = {}\n headers['User-Agent'] = generate_user_agent()\n headers['Referer'] = 'https://www.google.com'\n req = urllib.request.Request(url, headers = headers)\n resp = urllib.request.urlopen(req)\n return str(resp.read())\n except Exception as e:\n print('error while downloading page {0}'.format(url))\n logging.error('error while downloading page {0}'.format(url))\n return None", "async def async_get(self, url):\n self.reset()\n self.next_link = url\n return await self.async_advance_page()", "def _download_backwards(self, date_str):\n self.url = f\"http://example.com/new/url/{date_str}\"\n self.html = self._download()", "def get_page(self):\n self.browser.get(self.url)", "def split_url(url): # Change the url so it can be iterated\n url = url.split('index') \n url = url[0] + 'page-1.html'\n url = url.split('page-')\n url = f\"{url[0]}page-1.html\"\n return url", "def load_page(url):\n try:\n response = urllib2.urlopen(url)\n html = response.read()\n\n if response.code == 200:\n body_text = html\n return html\n return \"\"\n except Exception:\n return \"\"", "def process_page(html,dest):\n html0 = html[:]\n to_root = os.path.relpath(export_path,dest)\n to_root = to_root[1:]# Change '../' or '..' to '.' or './'\n \n # Fix links to directories first since that is easier to find\n html,N1 = re_dirlinks.subn(r'\\1=\"/\\2/index.html\"',html)\n \n # all pages links\n html,N2 = re_all.subn(r'\\1=\"/_all/\\2/index.html\"',html)\n \n # Add index.html for any other internal links. NOTE: by preprocessing\n # all internal links from the main content will already end in .html so this\n # is just special pages.\n for match in re_intlinks.finditer(html):\n dest = match.groups()[-1]\n ext = os.path.splitext(dest)[-1]\n if ext == '':\n old = r'{}=\"/{}\"'.format(*match.groups())\n new = r'{}=\"/{}\"'.format(match.groups()[0], os.path.join(match.groups()[1],'index.html') )\n html = html.replace(old,new)\n \n # Now make all links to the root\n html,N3 = re_intlinks.subn(r'\\1=\"{}/\\2\"'.format(to_root),html)\n \n # Remove the search stuff\n out = []\n ff = False\n for line in html.split('\\n'):\n if not ff and '<!-- search -->' not in line:\n out.append(line)\n continue\n \n if '<!-- search -->' in line:\n ff = True\n \n if ff and '<!-- /search -->' in line:\n ff = False\n\n html = '\\n'.join(out)\n return html", "def get_next_page(soup: BeautifulSoup, url: str) -> str:\n url = url.replace(url.split('/')[-1], '')\n if soup.find('ul', class_='pager'):\n page = soup.find('ul', class_='pager')\n if page.find('li', class_='next'):\n url = url + \\\n str(page.find('li', class_='next').find('a')['href'])\n return url\n else:\n return\n else:\n return", "def GetUrlFirst(self):\n self.url = \"https://www.taobao.com/\"\n self.host = \"www.taobao.com\"\n self.referer = \"https://www.taobao.com/\"\n content = self.GetContent()\n __clear__ = '<a href=.*?</a>'\n match = open(self.base_dir_url+\"url_first.html\", 'w')\n try:\n all_link = re.findall(__clear__, content, re.S)\n print \"All links of the web page is: \", len(all_link)\n self.DealUrlFirst(match, all_link)\n except:\n print \"Something wrong is happening!\"\n finally:\n match.close()\n match.close()", "def scrapePage(self, url):\n raw_page = requests.get(url)\n site = self.getSite(url)\n bs_page = BeautifulSoup(raw_page.content, 'html.parser')\n page = site.parsePage(bs_page)\n return page", "def nav(self, url):\r\n\r\n self.driver.get(url)\r\n time.sleep(3) # wait for page load\r", "def down_page_run(urlstr,sec=\"culture\"):\n socket.setdefaulttimeout(120)\n #设置线程数量\n thread_num = 4\n url = urlstr\n print 'URL:%s' % url\n strs = urlstr.split('/')\n postid = strs[-2]\n\n #需要自己实现\n updic = getUpdateInfo(postid)\n oldpage = int(updic['pg'])\n oldrc = int(updic['rc'])\n\n my_page = page(url)\n if my_page is None:\n my_page = oldpage\n\n #my_dict = {}\n print 'page num is : %s ~ %s' % (oldpage, my_page)\n threads = []\n\n \"\"\"根据设置的线程数量设置线程\"\"\"\n page_num = my_page - oldpage\n if(page_num < 10):\n thread_num = 2\n\n numperthread = 0\n if thread_num is 1:\n numperthread = page_num\n else:\n numperthread = page_num/(thread_num-1)\n\n if numperthread is 0:\n numperthread = 1\n \"\"\"numoflast = my_page%(thread_num-1)\"\"\"\n\n #保存所有数据的字典{pagenum:[{},{}..]..}\n resultdic = {}\n \"\"\"根据页数构造urls进行多线程下载\"\"\"\n for num in range(0, thread_num-1):\n downlist = Down_DouBan_Post(postid, url, num*numperthread+oldpage, (num+1)*numperthread+oldpage, my_page, oldrc, resultdic,sec)\n downlist.start()\n threads.append(downlist)\n downlist = Down_DouBan_Post(postid, url, numperthread*(thread_num-1)+oldpage, my_page+1, my_page, oldrc, resultdic,sec)\n downlist.start()\n threads.append(downlist)\n \"\"\"检查下载完成后再进行写入\"\"\"\n for t in threads:\n t.join()\n\n #写入文件\n fn = './data/%s.txt' % postid\n write_text(postid, resultdic, fn,oldrc)", "def fix_url(cls, url: str):\r\n ...", "def download_page(url):\n try:\n headers = {}\n headers[\n 'User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36'\n headers['Referer'] = 'https://www.google.com'\n req = urllib.request.Request(url, headers=headers)\n resp = urllib.request.urlopen(req)\n return str(resp.read())\n except Exception as e:\n print('error while downloading page {0}'.format(url))\n print('error:', e)\n logging.error('error while downloading page {0}'.format(url))\n return None", "def extract_next_page(parser):\r\n url = ''\r\n table = parser.table.find_all('table')[1]\r\n tr = table.findAll('tr')\r\n url = url + str(tr[len(tr) - 1].a.get('href'))\r\n\r\n return url", "def goto_url(self, url):\n try:\n self._browser.get(url)\n except Exception as e:\n self.logger.error(\"Error going to url '\" + url + \"' : \" + str(e))\n raise", "def crawl_new_url(self):\n url_returned = self.obj_scheduler.get_next_url()\n \n if self.obj_scheduler.can_fetch_page(url_returned[0]):\n return None\n else:\n binary_content = self.request_url(url_returned[0])\n \n if binary_content != None:\n return self.discover_links(url_returned[0], url_returned[1], binary_content)\n else:\n return None", "def crawl(self, url):\n return None", "def page_soup(url):\n html = requests.get(url).text\n return bs(html, 'html.parser')", "def expand(url):\r\n data={'signature':YOURL_SIGN,'action':'expand','shorturl':url,'format':'json'}\r\n res = requests.post(API_URL, data)\r\n if res.status_code==200:\r\n return res.json()['longurl']\r\n raise Exception('Status Code Error')", "def open_url(url):\n\tglobal books\n\tglobal count_books\n\tglobal titles\n\t#global word_count\n\ttry:\n\t\t#open url\n\t\tresponse = re.urlopen(url)\n\t\t#get data\n\t\tcontent = response.read().decode('utf8')\n\t\t#close connection\n\t\tresponse.close()\n\t\t\n\texcept(er.URLError):\n\t\t#if url is not functional\n\t\tcontent = \"\"\n\t\tprint(\"The URL is not functional : \",url)\n\t\treturn None\n\t\t# #remove the url from the books dictionary\n\t\t# for key,val in books.items():\n\t\t# \tif val == url:\n\t\t# \t\tdel books[key]\n\t\t# \t\t#pop the last\n\t\t# \t\ttitles.pop()\n\t\t# \t\tbreak\n\t\t# #update count for number of books\n\t\t# count_books = len(books)\n\t\t# return\n\treturn content", "def __expandURL(self, link):\n try:\n return requests.get(link).url\n except Exception:\n return link", "def __setSoup( self, url = None, data = None, headers = {} ):\r\n if url:\r\n self.currenturi = url\r\n try:\r\n log.info(self.log_msg( 'for uri %s'%(self.currenturi) ))\r\n res = self._getHTML( data = data, headers=headers )\r\n if res:\r\n self.rawpage = res[ 'result' ]\r\n else:\r\n log.info(self.log_msg('self.rawpage not set.... so Sorry..'))\r\n return False\r\n self._setCurrentPage()\r\n return True\r\n except Exception, e:\r\n log.exception(self.log_msg('Page not for :%s'%url))\r\n raise e", "def visit_homepage(url):\n response = requests.get(url, timeout=10)\n soup = BeautifulSoup(response.content, 'html.parser')\n return soup", "def get_suburl(url, page):\n pr = list(urlparse(url))\n param = parse_qs(pr[4])\n for k, v in list(param.items()):\n param[k] = v[0]\n param[\"P\"] = \"{0}-{1}\".format(param[\"P\"], page)\n pr[4] = urlencode(param)\n return urlunparse(pr)", "def download_page(link, f, cnt):\n try:\n page = ur.urlopen(link).read().decode()\n fh = open(ALL_PAGES + f + str(cnt) + '.htm', 'w')\n\n fh.write(page)\n fh.close()\n except Exception:\n print('Something wrong with link ' + link)", "def __setSoup( self, url = None, data=None, headers={}):\n\n if url:\n self.currenturi = url\n try:\n log.info(self.log_msg( 'for uri %s'%(self.currenturi) ))\n res = self._getHTML( data = data, headers=headers )\n if res:\n self.rawpage = res[ 'result' ]\n else:\n log.info(self.log_msg('self.rawpage not set.... so Sorry..'))\n return False\n self._setCurrentPage()\n return True\n except Exception, e:\n log.exception(self.log_msg('Page not for :%s'%url))\n raise e", "def beautify_page(url=\"https://www.transportation.gov/individuals/aviation-consumer-protection/air-travel-consumer-reports-2020\"):\n # page = requests.get(url)\n page = urllib.request.urlopen(url)\n if page.getcode() == 200:\n soup = BeautifulSoup(page.read(), 'html.parser')\n print('Connection Successful!')\n print(url)\n return soup\n else:\n print('Connection Failure!')\n print(f'Status Code: {page.status_code}')", "def shorten_duplicate_content_url(url):\n if '#' in url:\n url = url.split('#', 1)[0]\n if url.endswith('index.html'):\n return url[:-10]\n if url.endswith('index.htm'):\n return url[:-9]\n return url", "def browsepage(year,view,slow=True):\n params={\n \"year_select\":year\n , \"view_select\":view\n }\n r=post(URLS['form-action'],params)\n x=fromstring(r.content)\n orglinks=x.cssselect('a.backtolist')\n d=[]\n for orglink in orglinks:\n if \"#\"==orglink.attrib['href'][0]:\n #Link just brings you to the top of the page\n pass\n else:\n d.append({\"year\":year,\"view\":view,\"name\":orglink.text,\"href\":orglink.attrib['href']})\n\n save(['href'],d,'links')", "def browsepage(year,view,slow=True):\n params={\n \"year_select\":year\n , \"view_select\":view\n }\n r=post(URLS['form-action'],params)\n x=fromstring(r.content)\n orglinks=x.cssselect('a.backtolist')\n d=[]\n for orglink in orglinks:\n if \"#\"==orglink.attrib['href'][0]:\n #Link just brings you to the top of the page\n pass\n else:\n d.append({\"year\":year,\"view\":view,\"name\":orglink.text,\"href\":orglink.attrib['href']})\n\n save(['href'],d,'links')", "def go(self, url):\n self.driver.get(url)", "def download_page(url, destination):\n\n # Set and verify destination path\n destination = directory_resolve_home(directory_slash(destination))\n directory_exists(destination)\n\n # Set output name\n filename = generate_filename(url=url, title=get_page_title(read_page(url)))\n\n pdfkit.from_url(url, destination + filename)\n\n return destination + filename", "def shorten_url(url: str, next_record: int) -> str:\r\n encoded_record = encode(next_record)\r\n LINKS[next_record] = url\r\n return SITE + f'/{encoded_record}'", "def request_html_page(self):\n try:\n response = requests.get('http://www.indeed.com/jobs?', params=self.payload)\n except:\n print \"got error for \", self.payload\n self.page = response.content", "def _soup(self, url):\n r = self.session.get(url)\n r.raise_for_status()\n html = Soup(r.text, 'lxml') # lxml is fastert than html.parser\n r.close()\n return html", "def _setSoup(self, url=None, data=None, headers={}):\n if url:\n self.currenturi = url\n try:\n log.info(self.log_msg( 'for uri %s' %(self.currenturi) ))\n res = self._getHTML(data=data, headers=headers)\n if res:\n self.rawpage = res['result']\n else:\n log.info(self.log_msg('self.rawpage not set.... so Sorry..'))\n return False\n self._setCurrentPage()\n return True\n except Exception, e:\n log.exception(self.log_msg('Page not for :%s' %uri))\n raise e", "def redirectPage() -> Response:\n # pass in the function name\n return redirect(url_for('view.loadMainPage'))", "def get_url_soup(url):\n r = requests.get(url)\n if r.status_code != 200:\n raise Exception(\"Paper request failed '%s'\" % url)\n return get_soup(r.content)", "def load_page(pageurl):\n\t# Attempt to load the page\n\ttry:\n\t\twebpage = urllib2.urlopen(pageurl)\n\t# Page could not be loaded (hit quota?)\n\texcept urllib2.HTTPError, e:\n\t\tlogging.warning('' + pageurl + ' is not a valid wikipedia page and could not be opened')\n\t\treturn None\n\t# URL is invalid\n\texcept urllib2.URLError, e:\n\t\tlogging.warning('' + pageurl + ' is not a valid URL and could not be opened')\n\t\treturn None\n\t# return beautifulsoup parsetree for the page\n\treturn BeautifulSoup(webpage,'html.parser')", "def scrape_page(url):\n cached_page = cache.get(url)\n\n if cached_page:\n return html.fromstring(cached_page)\n else:\n page = get(url)\n\n cache.set(url, page.text)\n\n return html.fromstring(page.text)", "def _real_extract(self, url):\n pass", "def CreatePage(url1: str) -> Page:\n page = Page(\n name=url1,\n queried=1,\n )\n db.session.add(page)\n db.session.commit()\n return page", "def redirect_old_featured(page):\r\n return redirect(url_for('.index', page=page), 301)", "def crawl(self, url):\n paperData = OrderedDict()\n if url.endswith('.short'):\n url = url.replace('.short', '.long')\n if not url.endswith('.long') and 'pmidlookup' not in url:\n url = url + '.long'\n delayTime = self._highwireDelay(url)\n htmlPage = httpGetDelay(url, delayTime)\n url = htmlPage['url']\n if 'install.php.' in url:\n raise pubGetError('Highwire invalid DOI', 'highwireInvalidUrl')\n isDrupal = False\n if htmlPage['mimeType'] != 'application/pdf' and not htmlPage['data'].startswith('%PDF'):\n aaasStr = 'The content you requested is not included in your institutional subscription'\n aacrStr = 'Purchase Short-Term Access'\n stopWords = [aaasStr, aacrStr]\n if pageContains(htmlPage, stopWords):\n raise pubGetError('no license for this article', 'noLicense')\n if pageContains(htmlPage, ['We are currently doing routine maintenance']):\n time.sleep(600)\n raise pubGetError('site is down, waited for 10 minutes', 'siteMaintenance')\n isDrupal = False\n if 'drupal.org' in htmlPage['data']:\n logging.debug('Drupal-Highwire detected')\n isDrupal = True\n paperData['main.html'] = htmlPage\n else:\n logging.warn('Got PDF page where html page was expected, no html available')\n\n if 'Transparent Process' in htmlPage['data']:\n reviewUrl = url.replace('.long', '') + '.reviewer-comments.pdf'\n logging.debug('Downloading review process file')\n reviewPage = httpGetDelay(reviewUrl, delayTime)\n paperData['review.pdf'] = reviewPage\n\n url = htmlPage['url']\n if '.long' in url:\n pdfUrl = url.replace('.long', '.full.pdf')\n else:\n pdfUrl = url + '.full.pdf'\n pdfPage = httpGetDelay(pdfUrl, delayTime)\n if not isPdf(pdfPage):\n raise pubGetError('predicted PDF page is not PDF. Is this really highwire?', 'HighwirePdfNotValid', pdfUrl)\n paperData['main.pdf'] = pdfPage\n if isDrupal:\n htmlPage['data'] = htmlExtractPart(htmlPage, 'div', {'class': 'article fulltext-view '})\n else:\n htmlPage['data'] = htmlExtractPart(htmlPage, 'div', {'id': 'content-block'})\n htmlPage['data'] = stripOutsideOfTags(htmlPage['data'], 'highwire-journal-article-marker-start', 'highwire-journal-article-marker-end')\n suppListUrl = url.replace('.long', '/suppl/DC1')\n suppListPage = httpGetDelay(suppListUrl, delayTime)\n suppUrls = findLinksWithUrlPart(suppListPage, '/content/suppl/')\n if len(suppUrls) == 0:\n suppUrls = findLinksWithUrlPart(suppListPage, 'supplementary-material.')\n paperData = downloadSuppFiles(suppUrls, paperData, delayTime)\n return paperData", "def getURL(self, url):\n \n if self.driver.current_url != url:\n print \"getting: {0}\".format(self.driver.current_url)\n self.driver.get(url)", "def gotoWeb(self,page:str)->None:\n if page=='repo':\n webbrowser.open('http://github.com/ivan866/readTobiiGlasses')\n elif page=='wiki':\n webbrowser.open('http://github.com/ivan866/readTobiiGlasses/wiki')\n elif page=='glasses2API':\n webbrowser.open('http://tobiipro.com/product-listing/tobii-pro-glasses-2-sdk/')\n elif page=='coordSys':\n webbrowser.open('http://developer.tobiipro.com/commonconcepts.html')", "def __prepare_request(self, page):\n\t\t# Replace the page parameter if it exists and add it if it doesn't exist\n\t\tpage_regex = re.compile(\"(?<=[\\\\&\\\\?]page=)\\\\d*\")\n\t\tregex_res = page_regex.subn(str(page), self.__response.request.url)\n\n\t\tif regex_res[1] == 0:\n\t\t\tself.__response.request.prepare_url(self.__response.request.url, {\"page\": str(page)})\n\t\telse:\n\t\t\tself.__response.request.url = regex_res[0]\n\n\t\treturn self.__response.request", "def url(self):\n self._current_page += 1\n return URL_TPL.format(self._uid, self._current_page)", "def get_page(url):\n # todo need some error checking\n\n r = requests.get(url)\n\n if r.status_code != 200:\n log_date = datetime.now().strftime(\"%Y-%m-%d %H%M%S\")\n filename = f'{log_date} response.html'\n with open(filename, 'w+') as f:\n f.write(r.text)\n logging.critical('get_page failed with status {}. See file {}.'.format(\n r.status_code,\n filename\n ))\n r.raise_for_status()\n\n return r", "def download(self, url):\n try:\n webFile = urllib.urlopen(url)\n localFile = open(self.workdir + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n print(\"could not get url \" + url)", "def webdl(url):\n print('Downloading...{}'.format(url))\n try:\n r = requests.get(url)\n r.raise_for_status()\n return r\n except:\n print('[Error webdl]: Download failed for {}'.format(url))\n return None", "def get_absolute_url(self):\r\n return \"{0}page1/\".format(self.get_short_url())", "def get_next_page(url):\n match = re.search(r\".*/(\\d+)_p/\", url)\n if match:\n next_number = int(match.group(1)) + 1\n next_url = urljoin(BEGIN_RENT_LISTINGS, f\"{next_number}_p/\")\n return next_url\n else:\n # the first page has no page index\n return urljoin(BEGIN_RENT_LISTINGS, \"2_p/\")", "def fetch(url):\r\n PAGES = {\"http://SEARCH_QUERY_URL?&page=1\" : SEARCH_RESULT_PAGE1,\r\n \"http://SEARCH_QUERY_URL?&page=2\" : SEARCH_RESULT_PAGE2} \r\n return PAGES[url]", "def get_url(self, page):\n return self.server_url + page", "def scrape_url(url):\n html = requests.get(url).text\n return scrape_html(html)", "def load_page(url):\n with requests.Session() as session:\n return session.get(url).content.decode('utf-8')", "def get_page(url):\n try:\n if url == \"http://xkcd.com/353\":\n return \"\"\"Depending on the purpose of your site, choose a name that can be easily recognized and reflects the site’s theme.\n\n Try not to have a domain name that sounds or spell out like your competitor. You do not want visitors to accidentally visit other than your site.\n\n Vanity url is a trend now. For more options on vanity url, get yours from “iwantmyname” service provider.Use keywords related words in the domain name that describe your site (if it makes sense). The domain name should suggest the nature of your product or service. A good domain name describes exactly what the site is about. It is important for a visitor to get an idea of what the website is about just by looking at the domain name. For example, our site service is to make awareness to success in blogging so I take it \"besuccessblogger\".\n\n Easy to Remeber\n\n Your domain name should be easy to remember because your visitors will want to type in the domain name in the web browser for revisits and if they can’t remember the domain name then you loose a huge amount potential traffic. It is also easier to spread the word of mouth when the domain name is easy to remember.\n\n Keep your domain name short\"\"\"\n except:\n return \"\"\n return \"\"", "def _redirect(self, url):\n logger.debug('Redirecting to URL %s', url)\n segments = urllib.parse.urlparse(url)\n\n host = segments.netloc\n if host != self._host:\n self.new_connection(host)\n\n relurl = urllib.parse.urlunparse(('', '') + segments[2:])\n try:\n self._raw_get(relurl)\n except http.client.HTTPException as e:\n logger.debug('Got exception: %s.', e)\n raise DDGConnectionError(\"Failed to get '%s'.\" % url)", "def url():\n ...", "def modify_pages(url: str, last_title: str) -> None:\n\n\t# Retrieving the JSON and extracting page titles\n\tsession = requests.Session()\n\trequest = session.get(url=url, params=get_params(last_title), verify=False)\n\tpages_json = request.json()\n\tpages = pages_json[\"query\"][\"allpages\"]\n\tprint(\"Pages to be scanned:\", pages)\n\n\t# Adds template to the page if needed\n\tfor page in pages:\n\t\tcurr_title = page[\"title\"]\n\t\tif re.search('/doc$', curr_title):\n\t\t\tprint(curr_title, \"is a doc subpage. Skipping...\")\n\t\telse:\n\t\t\tadd_template(curr_title)\n\n\tif \"continue\" in pages_json:\n\t\tcontinue_from_title = pages_json[\"continue\"][\"apcontinue\"]\n\t\tprint(\"\\nContinuing from:\", continue_from_title, \"next run.\")\n\telse:\n\t\tcontinue_from_title = \"\"\n\n\twith open(TEXT_FILE, \"w+\") as f:\n\t\tf.write(continue_from_title)\n\t\tprint(\"Wrote\", continue_from_title, \"in\", TEXT_FILE)", "def goto(url):\r\n terminal(f'start \"\" \"{url}\"')", "def retrieve_page(url):\n my_socket = urllib.request.urlopen(url)\n dta = my_socket.read().decode()\n my_socket.close()\n return dta", "def get_page_content(self, url, delay):\r\n\r\n # if browser cannot connect to the server, repeat it infinitely.\r\n while True:\r\n try:\r\n # load the page\r\n self.sel_driver.get(url)\r\n\r\n # if the page is loaded, wait for delay seconds until loading would finish.\r\n # this delay is also to avoid being blocked by upwork due to so frequent access\r\n time.sleep(delay)\r\n\r\n # read and parse the page contents\r\n soup = BeautifulSoup(self.sel_driver.page_source, 'html.parser')\r\n\r\n # page loading succeeded. escape from the endless iteration\r\n break\r\n except (WebDriverException, TimeoutException):\r\n # error occurred, do it again\r\n print(\"(ERROR) Driver could't be load: \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n self.relaunch(60)\r\n\r\n # check if the page is ACCESS DENIED\r\n # get the title of the page\r\n elements = soup.find_all(\"title\")\r\n if len(elements) == 0:\r\n return soup # if it has no title, it's may be a normal page\r\n\r\n # if the title is UPWORK ACCESS DENIED, I deal with it\r\n title = elements[0].text\r\n if 'access denied' in title.lower():\r\n print(\"(ERROR) UPWORK DENIED at \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n\r\n self.relaunch(200) # relaunch after about 3 minutes\r\n\r\n return self.get_page_content(url, delay)\r\n\r\n # if the title is Upwork - Maintenance, let it wait\r\n if title == 'Upwork - Maintenance':\r\n print(\"(ERROR) UPWORK is under the Maintenance - \", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n time.sleep(random.randint(200, 400)) # We don't need relaunch browser.\r\n return self.get_page_content(url, delay)\r\n\r\n return soup", "def down_text(self, last_page):\n html_content = 3\n while html_content > 0:\n html_content -= 1\n try:\n\n html_content =urllib.urlopen(self.pageurl).read()\n break\n except Exception as e:\n print('Unable to download data [Time:%d][%s]' % (html_content, self.pageurl))\n insert_log('Unable to download data [Time:%d][%s]' % (html_content, self.pageurl))\n\n if isinstance(html_content, int):\n print('Unable to save data [%s]' % self.pageurl)\n insert_log('Unable to save data [%s]' % self.pageurl)\n return False\n\n print 'downling successfully from %s' % self.pageurl\n soup = BeautifulSoup(html_content)\n print soup\n #\n #对于帖子不存在了的情况进行判断\n titlep = soup.title.string\n if titlep == '403 Forbidden':\n print '出错了:%s' % self.pageurl\n insert_log('出错了:%s' % self.pageurl)\n return []\n\n alldata =[]\n audic = {}\n\n \"\"\"\n 由于第一页有主帖,因此第一页的解析会不同,要把主帖的所有信息保存下来,跟帖的信息也是如此\n \"\"\"\n #主帖的部分解析\n if((self.num == 1) and (self.rc == 0)):\n title = soup.find('div', id=\"content\").find(\"h1\").get_text().strip(\"\\r\\t\\n\")\n #section = soup.find('p', class_='crumbs').find_all('a')[1].get_text()\n auname = soup.find('div', id='content').find('span',class_=\"from\").find('a').get_text().strip(\"\\r\\t\\n\")\n auid = soup.find('div', id='content').find('span',class_=\"from\").find('a')['href'].split(\"/\")[-2]\n ctime = soup.find('div', id='content').find(\"h3\").find_all('span')[-1].get_text()\n\n audic['title'] = title\n audic['sec'] = self.section\n audic['uname'] = auname\n self.author = auname\n audic['uid'] = auid\n audic['ctime'] = ctime\n audic['ro'] = 0\n\n mydic = soup.find('div',id=\"content\").find_all(\"li\",class_=\"clearfix comment-item\")\n mi = 0\n #soup2 = BeautifulSoup((str)(htstr))\n #如果把self.rc放在外层if中用于判断是否没有记录,则对于有少量回复,而又有更新的情况,便不能跳过第一页的主帖\n #解析主帖部分内容\n if((mi == 0) and (self.num == 1)):\n mi += 1\n if(self.rc == 0):\n imgSrc = \"\"\n try:\n imgsrc = soup.find(\"div\",class_=\"topic-content\").find_all(\"div\",class_=\"topic-figure cc\")\n for img in imgsrc:\n try:\n for im in img.find_all(\"img\"):\n imgSrc += im[\"src\"] +\"\\n\"\n except:\n pass\n except:\n pass\n strtext = soup.find('div', id='link-report').find(\"div\",class_=\"topic-content\").get_text()\n\n audic['text'] = strtext.strip() + \"\\n\" + imgSrc\n\n alldata.append(audic)\n count = 0\n for htstr in mydic:\n soup2 = BeautifulSoup((str)(htstr))\n\n #解析跟帖内容\n redic = {}\n try:\n # irroder = (self.num-1)*100 +count\n #\n rtime = soup2.find('div', class_='bg-img-green').find('h4').find(\"span\").get_text()\n\n timeArray = time.strptime(rtime, \"%Y-%m-%d %H:%M:%S\")\n irorder = int(time.mktime(timeArray))\n redic['ro'] = irorder\n #update the reply count\n if (last_page and (irorder > self.rc)):\n self.rc = irorder\n imgSrc = \"\"\n try:\n imgsrc = soup2.find(\"div\",class_=\"topic-content\").find_all(\"div\",class_=\"topic-figure cc\")\n for img in imgsrc:\n try:\n for im in img.find_all(\"img\"):\n imgSrc += im[\"src\"] +\"\\n\"\n except:\n pass\n except:\n pass\n strtext = soup2.find('div', class_='reply-doc content').find(\"p\").get_text()\n redic['text'] = strtext.strip() +\"\\n\"+imgSrc\n\n\n\n uname = soup2.find('div', class_='bg-img-green').find('h4').find(\"a\").get_text()\n redic['un'] = uname\n\n if(uname != self.author):\n redic['au'] = False\n else:\n redic['au'] = True\n\n uid = soup2.find('div', class_='bg-img-green').find('h4').find(\"a\")[\"href\"].split(\"/\")[-2]\n\n redic['uid'] = uid\n\n\n redic['time'] = rtime\n\n alldata.append(redic)\n except AttributeError as e:\n alldata.append(redic)\n continue\n print 'All:%s' % self.pageurl\n return alldata", "def convert_navigation_url(url, link):\n url_parsed = urlparse(link)\n host_url = (\n \"{base_url}\"\n \"?q={q}&limit={limit}&offset={offset}\"\n )\n\n url_queries = parse_qs(url_parsed.query)\n q = url_queries['q'][0]\n size = int(url_queries['size'][0])\n page = int(url_queries['page'][0])\n\n return host_url.format(\n base_url=url,\n q=q,\n limit=size,\n offset=size*(page-1)\n )", "def processUrl(self, url: str) -> dict:\n site = self.sf.urlFQDN(url)\n cookies = None\n\n # Filter out certain file types (if user chooses to)\n if list(filter(lambda ext: url.lower().split('?')[0].endswith('.' + ext.lower()), self.opts['filterfiles'])):\n # self.debug(f\"Ignoring URL with filtered file extension: {link}\")\n return None\n\n if site in self.siteCookies:\n self.debug(f\"Restoring cookies for {site}: {self.siteCookies[site]}\")\n cookies = self.siteCookies[site]\n\n # Fetch the contents of the supplied URL\n fetched = self.sf.fetchUrl(\n url,\n cookies=cookies,\n timeout=self.opts['_fetchtimeout'],\n useragent=self.opts['_useragent'],\n sizeLimit=10000000,\n verify=False\n )\n self.fetchedPages[url] = True\n\n if not fetched:\n return None\n\n # Track cookies a site has sent, then send the back in subsquent requests\n if self.opts['usecookies'] and fetched['headers'] is not None:\n if fetched['headers'].get('Set-Cookie'):\n self.siteCookies[site] = fetched['headers'].get('Set-Cookie')\n self.debug(f\"Saving cookies for {site}: {self.siteCookies[site]}\")\n\n if url not in self.urlEvents:\n # TODO: be more descriptive\n self.error(\"Something strange happened - shouldn't get here: url not in self.urlEvents\")\n self.urlEvents[url] = None\n\n # Notify modules about the content obtained\n self.contentNotify(url, fetched, self.urlEvents[url])\n\n real_url = fetched['realurl']\n if real_url and real_url != url:\n # self.debug(f\"Redirect of {url} to {real_url}\")\n # Store the content for the redirect so that it isn't fetched again\n self.fetchedPages[real_url] = True\n # Notify modules about the new link\n self.urlEvents[real_url] = self.linkNotify(real_url, self.urlEvents[url])\n url = real_url # override the URL if we had a redirect\n\n data = fetched['content']\n\n if not data:\n return None\n\n if isinstance(data, bytes):\n data = data.decode('utf-8', errors='replace')\n\n # Extract links from the content\n links = SpiderFootHelpers.extractLinksFromHtml(\n url,\n data,\n self.getTarget().getNames()\n )\n\n if not links:\n self.debug(f\"No links found at {url}\")\n return None\n\n # Notify modules about the links found\n # Aside from the first URL, this will be the first time a new\n # URL is spotted.\n for link in links:\n if not self.opts['reportduplicates']:\n if link in self.urlEvents:\n continue\n # Supply the SpiderFootEvent of the parent URL as the parent\n self.urlEvents[link] = self.linkNotify(link, self.urlEvents[url])\n\n self.debug(f\"Links found from parsing: {links.keys()}\")\n return links", "def internallinks(url, number_of_pages):\n hotelslist = set()\n request = get(url)\n parser = BeautifulSoup(request.text, 'html.parser')\n page_load = 5\n for link in parser.findAll(\"a\", href=re.compile(\"^(/|.*)(?=REVIEWS)\")):\n if link.attrs['href'] is not None:\n hotelurl = link.attrs['href']\n url = 'https://www.tripadvisor.es' + str(hotelurl)\n hotelslist.add(url)\n else:\n pass\n next_page = parser.find(class_=\"prw_rup prw_common_standard_pagination_resp\").find(\"a\", href=re.compile(\"^(/|.*)\"))\n next_page_url = next_page.attrs['href']\n while number_of_pages > 1:\n url = 'https://www.tripadvisor.es' + str(next_page_url)\n request = get(url)\n parser = BeautifulSoup(request.text, 'html.parser')\n for link in parser.findAll(\"a\", href=re.compile(\"^(/|.*)(?=REVIEWS)\")):\n if link.attrs['href'] is not None:\n hotelurl = link.attrs['href']\n url = 'https://www.tripadvisor.es' + str(hotelurl)\n hotelslist.add(url)\n else:\n pass\n try:\n next_page = parser.find(class_=\"prw_rup prw_common_standard_pagination_resp\").find(\"a\", href=re.compile(\n \"^(/|.*)\"))\n next_page_url = next_page.attrs['href']\n print(next_page_url)\n number_of_pages = number_of_pages - 1\n if page_load < 5:\n page_load = page_load + (5 - page_load)\n else:\n pass\n except:\n print(\n \"IndexError(Encontramos un error al extraer la {0} página volvemos a ejecutar el contenido de esa \"\n \"pagina)\".format(str(number_of_pages)))\n sleep(1)\n if page_load > 0:\n page_load = page_load - 1\n pass\n else:\n raise IndexError(\"Encontramos un error al extraer la {0} multiples fallos \"\n \"salimos \").format(str(number_of_pages))\n return hotelslist", "def __setSoup(self, url=None, data=None, headers={}):\n if url:\n self.currenturi = url\n res = self._getHTML(data=data, headers=headers)\n if res:\n self.rawpage = res['result']\n else:\n log.info(self.log_msg('HTML Content cannot be fetched for the url: \\\n %s'%self.currenturi))\n return False\n self._setCurrentPage()\n return True", "def visit(self, max_depth = DEPTH, response_handler=record, html_rendering=False, no_expand=lambda url, doc: False):\n if self.depth >= max_depth:\n return\n if self.url.name in pool:\n return\n else:\n pool.add(self.url.name)\n \n print(f\"Requesting {self.url.name}...\")\n \n# host for relative href\n try:\n host = re.search(r\"(?:(?:https?:)?//)?([^/]+)\", self.url.name).group(1)\n except Exception:\n host = None\n\n# indicate if the request is successful\n flag = False\n site = None\n html = ''\n\n for req in self.url.request_string():\n if html_rendering:\n renderer.render(req, timeout=10)\n while not renderer.ready:\n time.sleep(1)\n html = renderer.html\n site = bs4.BeautifulSoup(html, 'html5lib')\n if html:\n flag = True\n else:\n try:\n # print(f\"Site: {req}\")\n r = requests.get(req, timeout = 5)\n if r.status_code != 200:\n print(f\"Warning: HTTP response for {req} is {r.status_code} but 200\")\n else:\n # print(\"OK\")\n flag = True\n html = r.content.decode('utf-8')\n site = bs4.BeautifulSoup(html, 'html5lib')\n break\n except requests.exceptions.Timeout:\n # print(f\"Request time out : {req}\")\n pass\n except Exception:\n # print(f\"Failed to connect : {req}\")\n pass\n\n if not site:\n return\n\n if not flag:\n return\n\n urls = []\n\n # handle the response\n response_handler(self.url.name, html)\n\n # find successors\n for tag in site.find_all('a'):\n urls.append(tag.get('href'))\n # print('Link to', tag.get('href'))\n \n if no_expand(self.url.name, html):\n # stop expanding\n return\n\n thread_pool = []\n for url in urls:\n if not url:\n continue\n # add host if started with a slash\n if url[0] == '/':\n if len(url) > 1 and url[1] == '/':\n url = url.lstrip('/')\n else:\n url = host + url\n url = url.rstrip('/')\n\n searchTask = URL(url)\n\n if not searchTask.valid:\n # print(f\"Invalid URL: {url}\")\n continue\n else:\n # if the website has been visited\n if searchTask.name in pool:\n continue\n else:\n thread = threading.Thread(target=Node(searchTask, self.depth + 1).visit, args=(max_depth, response_handler))\n thread.start()\n thread_pool.append(thread)\n\n while thread_pool:\n for thread in thread_pool:\n thread.join(timeout=0)\n if not thread.is_alive():\n thread_pool.remove(thread)\n time.sleep(1)", "def get(self, url):\n self.browser.get(url)", "def _get_main_page(website):\n return BeautifulSoup(requests.get(website).content, 'html.parser')", "def forward_to(id):\n\n db = init_connection_engine()\n\n if id == 'short_URL':\n return redirect(url_for('index'))\n else:\n # Looking up the URL by its ID in the DB.\n try:\n # Using a with statement ensures that the connection is always released\n # back into the pool at the end of statement (even if an error occurs).\n with db.connect() as conn:\n lookup_url = \"SELECT url_data FROM url_list WHERE url_id='\" + id + \"';\"\n target_url = conn.execute(lookup_url).fetchone()\n # If target URL is not found.\n if not target_url:\n flash('Not found')\n return redirect(url_for('index'))\n # If something goes wrong.\n except:\n flash('Something went wrong')\n return redirect(url_for('index'))\n\n return redirect(target_url[0])", "def removeFromNavBar(des):\n index = \"Xblog/docs/index.html\"\n title = des.split(\"/\")[-1].replace(\".html\", \"\")\n with open(index, 'r') as f:\n soup = BeautifulSoup(f, \"html.parser\")\n f.close()\n soup.select(\"#\"+title)[0].decompose()\n with open(index, 'w') as f:\n f.write(soup.prettify(formatter=\"html\"))\n f.close()\n ccc.success(\"removing \" + des + \" from navigation pallete\")", "def links(n):\n return redirect(url_for(\"link_page\", n=n, offset=0))", "def GetUrlSecond(self):\n counter = 0\n file_handler = open(self.base_dir_url+\"url_first.html\", \"r\")\n for line in file_handler.readlines():\n url_name = line.strip().split()\n \n self.url = 'http:'+url_name[0]\n if re.search('www.taobao.*', url_name[0]):\n self.host = \"www.taobao.com\"\n else:\n getHost = re.findall('//(.*?.com)', url_name[0])\n self.host = getHost[0]\n print url_name[1].decode('utf-8').encode('gbk') +':'+ getHost[0]+','+self.url\n self.referer = \"https://www.taobao.com/\"\n match_txt = open(self.base_dir_url+url_name[1].decode('utf-8').encode('gbk')+'.txt', \"w\")\n content = self.GetContent()\n print >> match_txt, content\n match_txt.close()\n if (counter < 4):\n match = open(self.base_dir_url+url_name[1].decode('utf-8').encode('gbk'), \"w\")\n self.DealUrlSecond14(match, content)\n match.close()\n elif (counter > 3 and counter < 8) or (counter > 11 and counter < 16) or (counter > 19 and counter < 24):\n match = open(self.base_dir_url+url_name[1].decode('utf-8').encode('gbk'), \"w\")\n self.DealUrlSecond58(match, content)\n match.close()\n counter += 1\n file_handler.close()", "async def fetch_page(self, url: str) -> PageRaw:\n\n raise NotImplementedError()", "def scrapeThePage(url):\n\n # set the headers like we are a browser\n # headers = {\n # 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n # download the page\n # page = requests.get(url, headers=headers)\n\n # parse the downloaded page and grab all text, then\n # scraped_page = BeautifulSoup(page.content, \"html.parser\")\n\n html = urlopen(url).read() # html will contain the *entire* page\n\n # transforming html (byte-like object) into string\n htmlString = html.decode(\"utf-8\")\n\n # transforming html (byte-like object) into string\n # htmlString = scraped_page.decode(\"utf-8\")\n\n return htmlString", "def fetch_url(url):\n try:\n soup = bs(urlopen(url).read(), 'html.parser')\n return soup\n except:\n print \"Couldnot download the content from the URL\", url\n return \"\"", "def get_url_with_page(url: str, page: Optional[int], delimiter: str = \"/\") -> str:\n return url if page is None else f\"{url}{delimiter}{page}\"", "def next_url(self, url, soup, unfinished, logger):\n try:\n next_button = soup.find('span', class_='next-button') # keeping the next url for the next loop\n url = next_button.find('a').attrs['href']\n unfinished = True\n except Exception:\n logger.error(\"\\n*** Finished to load the url {} ***\\n\".format(url))\n unfinished = False\n\n return url, unfinished", "def navigate(self, url):\n self.log_info(f\"Browser.navigate: Navigating to {url}\")\n self.CORE.get(url)\n return", "def __goToLastPage(self):\n try:\n self.currenturi = self.currenturi = self.currenturi.rsplit('/',1)[0] + '/' +self.soup.find('div', 'pagination_container vt_pagination_container').findAll('a', text=re.compile ('^\\d+$'))[-1].parent['href']\n self.__setSoupForCurrentUri()\n except:\n log.info(self.log_msg('Last page cannot find from the given page no \\\n for url %s'%self.task.instance_data['uri']))", "def _newurl(counter):\n return \"%s/sitemap-%s.xml.gz\" % (settings.SITEMAPS_BASE_URL, counter)" ]
[ "0.6163118", "0.5895792", "0.5818599", "0.58166754", "0.5812692", "0.57281524", "0.5682992", "0.5658968", "0.56213826", "0.5620462", "0.5577548", "0.5556134", "0.55078506", "0.5499445", "0.5490812", "0.547787", "0.54733974", "0.5458218", "0.54565185", "0.5416221", "0.54098094", "0.5408598", "0.5406272", "0.54038626", "0.5400941", "0.54005194", "0.53924394", "0.5366171", "0.5359046", "0.5344665", "0.53360814", "0.5323506", "0.5316831", "0.5308319", "0.5301482", "0.52985704", "0.5297613", "0.5294851", "0.52921194", "0.52884936", "0.52847654", "0.52808535", "0.52684724", "0.5266823", "0.5263065", "0.5263065", "0.5258663", "0.525253", "0.524713", "0.5233255", "0.5214064", "0.5212186", "0.52121484", "0.5208255", "0.5206781", "0.5206217", "0.51999557", "0.51973855", "0.5195794", "0.5192819", "0.51877743", "0.51749176", "0.5170374", "0.51675415", "0.51662344", "0.51589364", "0.51467806", "0.5131887", "0.51318455", "0.51312697", "0.512267", "0.51149917", "0.5114507", "0.5103985", "0.510316", "0.5094274", "0.5089945", "0.5088318", "0.5086933", "0.50842935", "0.50816166", "0.50796026", "0.5070672", "0.50696194", "0.5068239", "0.50613254", "0.5060202", "0.50572985", "0.5055938", "0.5053384", "0.50529873", "0.5051726", "0.50513494", "0.5046067", "0.50434697", "0.50403255", "0.502657", "0.5022427", "0.5020896", "0.50205135" ]
0.5585596
10
Returns the BeautifulSoup object of the given page
def get_soup(self, html): if html is not None: soup = BeautifulSoup(html, "html.parser") return soup else: return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def page_soup(page):\n return bs4.BeautifulSoup(page, 'html.parser')", "def _get_soup(self, page=''):\n content = requests.get('%s/%s' % (BASE_URL, page)).text\n return BeautifulSoup(content)", "def get_soup(page='1'):\n content = requests.get('%s/?page=%s' % (BASE_URL, page)).text\n return BeautifulSoup(content)", "def get_soup(self, page):\n if page in self.soups:\n return self.soups[page]\n else:\n response = self.client.get(page)\n soup = bs4.BeautifulSoup(response.content, \"html5lib\")\n self.soups[page] = soup\n return soup", "def get_soup_for_page(url: str) -> BeautifulSoup:\n return BeautifulSoup(get_html(url), 'html.parser')", "def page_soup(url):\n html = requests.get(url).text\n return bs(html, 'html.parser')", "def _get_soup(pagerequest):\n html = _get_page(pagerequest)\n return BeautifulSoup(html, 'html.parser')", "def get_page(self, url):\n page = self.__open_page(url)\n soup = BeautifulSoup(page, 'html.parser')\n return soup", "def _get_soup_object(url: str) -> bs4.BeautifulSoup:\n request_result=requests.get(url)\n soup = bs4.BeautifulSoup(request_result.text, \"html.parser\")\n return soup", "def get_soup_from_url(page_url):\n r = requests.get(page_url)\n if r.status_code != requests.codes.ok:\n raise requests.exceptions.HTTPError\n return BeautifulSoup(r.content, 'lxml')", "def get_soup(url):\n\tresponse = urlopen(url)\n\thtml = response.read()\n\tsoup = BeautifulSoup(html, \"html.parser\")\n\tresponse.close()\n\treturn soup", "def get_soup(url):\r\n page=requests.get(url)\r\n soup = BeautifulSoup(page.text.encode(\"utf-8\"), 'html.parser')\r\n return soup", "def getPage(self, url):\n headers = {\"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\"}\n try:\n req = self.session.get(url, headers=headers)\n except requests.exceptions.RequestException:\n return None\n bs = BeautifulSoup(req.text, \"html.parser\")\n return bs", "def get_soup(url: str) -> BeautifulSoup:\n html = get_html(url)\n soup = BeautifulSoup(html, 'lxml')\n return soup", "def get_soup(url):\n\tr = requests.get(url)\n\tdata = r.text\n\tsoup = BeautifulSoup(data, \"lxml\")\n\treturn soup", "def get_soup(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n return soup", "def get_soup(self):\n page = get(self.url)\n if page.status_code == 200:\n soup = BeautifulSoup(page.text, 'lxml')\n return soup\n else:\n raise ConnectionError('The page is not disponible.')", "def get_soup(self, page, planet=None):\n url = self.page_url(page, planet)\n result = self.session.get(url)\n soup = BeautifulSoup(result.content, \"html.parser\")\n\n if not self.logged_in(use_page=soup):\n self.login()\n # i could do this recursively but i'm afraid of getting stuck because it couldn't\n # log in for some reason not related to this program. and this is prob faster\n result = self.session.get(url)\n soup = BeautifulSoup(result.content, \"html.parser\")\n\n return soup", "def get_soup(url: str):\n response = requests.get(url)\n\n return BeautifulSoup(response.content, \"html.parser\")", "def getSoup(url):\n return BeautifulSoup(getHtml(url), 'lxml')", "def get_soup(url):\n return BeautifulSoup(requests.get(url).content, 'lxml')", "def generate_tree(self, page):\n return BeautifulSoup.BeautifulSoup(page)", "def make_soup(url):\r\n htmlFile = urllib.request.urlopen(url).read()\r\n soup = BeautifulSoup(htmlFile)\r\n return soup", "def return_beautiful_soup_object(url: str) -> bs4.BeautifulSoup:\n html_filename, headers = urllib.request.urlretrieve(url)\n with open(html_filename) as file:\n soup = BeautifulSoup(file, 'html.parser')\n file.close()\n return soup", "def scrapePage(self, url):\n raw_page = requests.get(url)\n site = self.getSite(url)\n bs_page = BeautifulSoup(raw_page.content, 'html.parser')\n page = site.parsePage(bs_page)\n return page", "def load_page(self) -> bs4.BeautifulSoup:\n\n res = requests.get(self.url)\n\n res.raise_for_status()\n return bs4.BeautifulSoup(res.text, 'html.parser')", "def get_soup_obj(url):\n try:\n html = session.get(url, headers=headers).text\n return BeautifulSoup(html, \"html.parser\")\n except HTTPError:\n print(\"{} not reachable\".format(url))\n return None", "def get_page(page):\n\timport urllib2\n\tsource = urllib2.urlopen(page)\n\treturn source.read()", "def get_soup(session, url, user_agent):\n headers = cs.base_request_headers\n headers['User-Agent'] = user_agent\n\n page = custom_get(session=session, url=url, headers=headers)\n\n return BeautifulSoup(page.text, 'html.parser')", "def _soup(self, url):\n r = self.session.get(url)\n r.raise_for_status()\n html = Soup(r.text, 'lxml') # lxml is fastert than html.parser\n r.close()\n return html", "def _get_soup(self, url):\n\n # generate a random header \n headers = {'User-Agent': self._random_user_agent()}\n # send a request and get the soup\n response = requests.get(url, headers=headers)\n results = response.content\n if not response.status_code == 404:\n soup = BeautifulSoup(results, 'lxml')\n return soup", "def get_html_parser(url):\n response = requests.get(url)\n return BeautifulSoup(response.content, 'html.parser')", "def _get_main_page(website):\n return BeautifulSoup(requests.get(website).content, 'html.parser')", "def get_soup(url):\n opener = urllib2.build_opener()\n request = urllib2.Request(url);\n request.add_header('User-Agent','Mozilla/6.0 (Windows NT 6.2; WOW64; rv:16.0.1) Gecko/20121011 Firefox/16.0.1');\n data = opener.open(request).read(); \n return BeautifulSoup(data);", "def create_soup(u):\n req = requests.get(u)\n html = req.text\n s = BeautifulSoup(html, \"html.parser\")\n return s", "def soupify(html):\n return BeautifulSoup(html, \"html.parser\")", "def make_soup(self):\n req = urllib.request.Request(\n url,\n data=None,\n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n }\n )\n f = urllib.request.urlopen(self.html)\n soupdata = BeautifulSoup(f, \"html.parser\")\n return soupdata", "def from_page(cls, page_instance):\n obj = cls()\n obj._doc = page_instance._doc.copy()\n return obj", "def get_soup(url: str):\n\n page_response = get_page_response(url)\n if page_response is not None:\n try:\n soup = BeautifulSoup(page_response.content, 'lxml')\n except:\n print('Trouble parsing the soup for: {}'.format(url))\n return None\n else:\n return soup\n else:\n print(f'The response object was \"None\" so there is no point in trying to parse for url {url}')\n return None", "def get_page(self, url):\n \"\"\" @param url: Url we want to crawl\"\"\"\n \"\"\" @type url: String \"\"\"\n \"\"\"@return the page\"\"\"\n try:\n u = urlopen(url)\n html = u.read().decode('utf-8')\n # except Exception as e:\n # logging.exception(e)\n finally:\n print(\"Closing\")\n u.close()\n return html", "def request(url):\n response=requests.get(url)\n soup=BeautifulSoup(response.content,\"lxml\")\n return soup", "def request(self, url):\r\n\r\n req = self.get(url)\r\n soup = BeautifulSoup(req.content, \"lxml\")\r\n return soup", "def htmlParsePage(page):\n if 'parsedHtml' not in page:\n logging.debug('Parsing HTML')\n html = page['data']\n html = html.replace(' xmlns=\"http://www.w3.org/1999/xhtml\"', '')\n html = removeThreeByteUtf(html)\n page['parsedHtml'] = BeautifulSoup(html)", "def get_soup(url, using_TOR = False):\n try:\n request = get_request(url, using_TOR = using_TOR)\n if request == None:\n logger.debug(\"Request is empty, don't create soup.\")\n return None\n soup = BeautifulSoup(request, 'html.parser')\n return soup\n except Exception as error:\n #logger.warn(traceback.format_exc())\n raise\n return None", "def soup_given_url(given_url):\n url = given_url\n content = urllib.request.urlopen(url)\n soup = BeautifulSoup(content, \"html.parser\")\n return soup", "def get_url_soup(url):\n r = requests.get(url)\n if r.status_code != 200:\n raise Exception(\"Paper request failed '%s'\" % url)\n return get_soup(r.content)", "def load_page(url):\n try:\n url = 'https://en.wikipedia.org'+url\n html = urlopen(url)\n bs = BeautifulSoup(html.read(),'html.parser')\n except:\n #if page not exists or page not found\n return None \n return bs", "def _html(url: str) -> BeautifulSoup:\n with urllib3.PoolManager() as manager:\n res = manager.request(\"GET\", url, headers={\"User-Agent\": ua.chrome})\n if res.status != 200:\n raise Exception(res.status)\n soup = BeautifulSoup(res.data, \"html.parser\")\n return soup", "def get_soup(self, url):\n if self.session is None:\n return BeautifulSoup(requests.get(url).content, features=\"xml\")\n else:\n return BeautifulSoup(self.session.get(url).content, features=\"xml\")", "def create_soup(url):\n ua = UserAgent()\n user_agent = {'User-agent': ua.random}\n response_text = requests.get(url, headers=user_agent).text\n soup = BeautifulSoup(response_text, 'html5lib')\n return soup", "def _get_soup_by_path(self, path):\n return BeautifulSoup(requests.get('%s%s' % (self.URL, path), headers=self.HEADERS).content)", "def get_soup():\n global soup\n html = urlopen(\"http://www.jrenshaw.com/works-in-progress/\")\n soup = BeautifulSoup(html, \"lxml\")\n return soup", "async def get_one_page_soup_object(url):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n return await response.text()", "def soup_explore(url_or_file, session=None):\n soup = ph.get_soup(url_or_file, session)\n if not soup:\n ph.logger.error('No soup found for {}'.format(url_or_file))\n else:\n print('\\nExplore the \"soup\" object\\n\\n')\n embed()\n return soup", "def get_soup_from_url(url, parser='html.parser'):\n r = requests.get(url)\n r.raise_for_status()\n soup = bs4.BeautifulSoup(r.text, parser)\n return soup", "def get_html(url):\n # type: (str) -> BeautifulSoup\n headers = {\n \"Accept\": \"text/html\",\n \"Accept-encoding\": \"gzip\"\n }\n with Cache(CACHE_URI) as c:\n cached = c.get(url)\n if cached:\n add_cache_headers(headers, cached)\n # always return cached info regardless\n if cached[\"fresh\"] or url.startswith(JAFC_INFO_URI):\n return BeautifulSoup(cached[\"blob\"], \"html.parser\")\n r = requests.get(url, headers=headers, timeout=SEARCH_TIMEOUT)\n if 200 == r.status_code:\n soup = BeautifulSoup(r.content, \"html.parser\")\n # pre-cache clean-up\n for x in soup([\"script\", \"style\"]):\n x.extract()\n c.set(url, str(soup), r.headers)\n return soup\n if 304 == r.status_code:\n c.touch(url, r.headers)\n return BeautifulSoup(cached[\"blob\"], \"html.parser\")", "def parse(url, parser='html5lib', **kwargs):\n return bs4.BeautifulSoup(SESSION.get(url).content, features=parser, **kwargs)", "def soup(self):\n if not self._soup:\n resp = requests.get(self.url)\n if not resp.ok:\n logging.warning('Status of request is not ok.')\n self._soup = BeautifulSoup(resp.content, 'html.parser')\n\n return self._soup", "def scrapeThePage(url):\n\n # set the headers like we are a browser\n # headers = {\n # 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n # download the page\n # page = requests.get(url, headers=headers)\n\n # parse the downloaded page and grab all text, then\n # scraped_page = BeautifulSoup(page.content, \"html.parser\")\n\n html = urlopen(url).read() # html will contain the *entire* page\n\n # transforming html (byte-like object) into string\n htmlString = html.decode(\"utf-8\")\n\n # transforming html (byte-like object) into string\n # htmlString = scraped_page.decode(\"utf-8\")\n\n return htmlString", "def soup(self) -> Soup:\n return Soup(self.html)", "def get_soup(url):\n url_hash = get_url_hash(url)\n www_cache_file = os.path.join(www_cache_dir, url_hash)\n if os.path.exists(www_cache_file):\n with open(www_cache_file) as file:\n charset = 'utf8'\n data = file.read().encode(charset)\n else:\n print('Downloading %s...' % url, file=sys.stderr)\n with urlopen(url) as stream:\n charset = stream.info().get_param('charset')\n data = stream.read()\n with open(www_cache_file, 'w') as file:\n file.write(data.decode(charset))\n return bs4.BeautifulSoup(data, 'lxml', from_encoding=charset)", "def make_soup(url, params=None):\n\n r = requests.get(url, params=params)\n if r.status_code != requests.codes.ok:\n raise Exception('Error: status code is %s for URL: %s' %\n (str(r.status_code), url))\n\n contents = r.content\n \n soup = BeautifulSoup(contents, parser, from_encoding='iso-8859-1')\n return soup", "def make_file_soup(self):\n soup = BeautifulSoup(self.html, 'html.parser')\n return soup", "def url_to_soup(data_url, **kwargs):\n try:\n data_page = get_cached_url(data_url, **kwargs)\n except requests.RequestException:\n _logger.warning(f'request failed: {data_url}')\n raise\n _logger.debug(f'request successful: {data_url}')\n\n # Create a Beautiful Soup object\n data_text = data_page.text\n data_soup = BeautifulSoup(data_text, 'html.parser')\n\n return data_soup", "def get_soup(url):\r\n res = requests.get(url=url)\r\n soup = BeautifulSoup(res.text, \"html.parser\")\r\n infor_form = soup.find(\"div\", {\"id\": \"content\"})\r\n return infor_form", "def get_html_content(self, url):\n\n req = urllib2.Request(url, headers=self.HEADER)\n page = urllib2.urlopen(req)\n soup = BeautifulSoup(page)\n\n return soup", "def soupify(html):\n try:\n return BeautifulSoup(html, \"html.parser\")\n except Exception as e: # pragma: no cover\n raise SoupError(str(e))", "def getHTML(url):\n\n time.sleep(2.00)\n html = urllib2.urlopen(url,timeout=10).read()\n urllib2.urlopen(url).close()\n\n soup = BeautifulSoup(html)\n\n return soup", "def visit_homepage(url):\n response = requests.get(url, timeout=10)\n soup = BeautifulSoup(response.content, 'html.parser')\n return soup", "def get_soup_alternate(url):\n\theaders = {\"User-Agent\":\"Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11\"}\n\treq = Request(url, headers=headers)\n\tresponse = urlopen(req)\n\thtml = response.read()\n\tsoup = BeautifulSoup(html, \"html.parser\")\n\tresponse.close()\n\treturn soup", "def make_request(url):\r\n req = requests.get(url, headers)\r\n soup = BeautifulSoup (req.content, \"html5lib\")\r\n return soup", "def ProcessPage (self, page):\n\t\tcontent = BeautifulSoup (open(page), 'lxml')\n\n\t\t# Find and replace script tags with local version\n\t\tfor script in content.find_all ('script'):\n\t\t\tif script.get ('src'):\n\t\t\t\tscript ['src'] = self._register (script.get ('src'))\n\n\t\tfor link in content.find_all ('link'):\n\t\t\tif link.get ('href'):\n\t\t\t\tlink ['href'] = self._register (link.get ('href'))\n\n\t\treturn content.prettify().encode('utf-8')", "def get_document(url):\n req = requests.get(url)\n doc = BeautifulSoup(req.content, \"html.parser\")\n return doc", "def soup(url):\n handle = ''\n max_tries = 10\n for i in range(max_tries):\n try:\n handle = urlopen(url)\n handle = handle.read()\n break\n except:\n logging.exception('urlopen failed (attempt %d)', i + 1)\n if i == max_tries - 1:\n logging.error('the maximum urlopen attempts have been reached')\n raise\n time.sleep(1)\n\n s = BeautifulSoup(handle)\n return s", "def extract_page_html(url):\n\n from urllib.request import Request, urlopen\n\n request_headers = {'User-Agent': 'Mozilla/5.0'}\n req = Request(url, headers=request_headers)\n page = urlopen(req).read()\n\n return page", "def scrape_page(url):\n cached_page = cache.get(url)\n\n if cached_page:\n return html.fromstring(cached_page)\n else:\n page = get(url)\n\n cache.set(url, page.text)\n\n return html.fromstring(page.text)", "def load_page(pageurl):\n\t# Attempt to load the page\n\ttry:\n\t\twebpage = urllib2.urlopen(pageurl)\n\t# Page could not be loaded (hit quota?)\n\texcept urllib2.HTTPError, e:\n\t\tlogging.warning('' + pageurl + ' is not a valid wikipedia page and could not be opened')\n\t\treturn None\n\t# URL is invalid\n\texcept urllib2.URLError, e:\n\t\tlogging.warning('' + pageurl + ' is not a valid URL and could not be opened')\n\t\treturn None\n\t# return beautifulsoup parsetree for the page\n\treturn BeautifulSoup(webpage,'html.parser')", "def getSoupFromURL(url, supressOutput=True):\n if not supressOutput:\n print url\n \n try:\n r = requests.get(url)\n except:\n return None\n \n return BeautifulSoup(r.text)", "def searchpageparsing(page): # Note for initial Coldwell this was run seperately, for more managable errors\n if not page: # Failed webdl handling\n return None\n\n soup = bs4.BeautifulSoup(page.text, 'lxml')\n parent_element = soup.find('ul', {'class': 'expanded-nav'})\n link_parent = parent_element.find('li')\n link_el = link_parent.find('a')\n link = link_el['href']\n\n return link", "def get_soup(self, url, params=None, headers=None, parser=\"html.parser\", timeout=10):\r\n headers = headers or self.headers\r\n try:\r\n response = self.request(url=url, method='GET', params=params, extra_headers=headers, timeout=timeout)\r\n response.encoding = 'utf-8'\r\n return BeautifulSoup(response.text, parser)\r\n except requests.exceptions.ProxyError:\r\n return None\r\n except requests.RequestException as error:\r\n if self._debug:\r\n logging.exception(\r\n ''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))\r\n return None", "def source_to_soup(page_source):\n\tpage_source = re.sub('<br>', '', page_source)\n\tpage_source = re.sub('<br/', '', page_source)\n\tpage_source = re.sub('<br />', '', page_source)\n\treturn BeautifulSoup(page_source, 'html.parser', parse_only=SoupStrainer('div'))", "def load_website(self):\n# r = urllib.request.urlopen(self.url).read()\n r = requests.get(self.url).content \n self.soup = bs(r, \"lxml\")", "def gzipPage(page):\n #if not hasattr(page,\"info\"):\n # return(\"\")\n data = object()\n # Check if content encoding is gzip\n if page.info().get('Content-Encoding') == 'gzip':\n buf = StringIO(page.read())\n f = gzip.GzipFile(fileobj=buf)\n data = f.read()\n else :\n data = page.read()\n return(data)", "def __init__(self, page):\n try:\n self.page = wikipedia.page(page)\n except wikipedia.exceptions.DisambiguationError as e:\n self.page = wikipedia.page(e.options[0])\n self.soup = BeautifulSoup(self.page.html())\n self._gen_table()", "def get(self, page):\n element = self._elements.get(page)\n if element is None:\n element = self._cls(page, self._locator)\n self._elements[page] = element\n return element", "def parse(html, encoding='utf-8'):\n if isinstance(html, unicode):\n return bs4.BeautifulSoup(html, 'html.parser')\n\n return bs4.BeautifulSoup(html, 'html.parser', from_encoding=encoding)", "def __init__(self, page):\n self.raw_page = page\n self.page = etree.HTML(page)", "def get_webpage_content(url):\n request = urllib2.Request(url)\n page = urllib2.urlopen(request)\n soup = BeautifulSoup(page.read())\n return unicode(soup)", "def get_html_soup(url_target, getter=1):\n if getter == 1:\n response = requests.get(url_target) # getter == 1\n status_code = response.status_code\n markup = response.text\n else:\n response = urlopen(url_target)\n status_code = response.getcode()\n markup = response\n print(f\"status_code = [{status_code}] \\n\")\n return BeautifulSoup(markup=markup, features='html.parser')", "def get_page(self, page_id):\n\t\treturn Page(page_id, self.user_id, self.site_id)", "def getHTML(self):\n html = requests.get(self.URL).text\n soup = BeautifulSoup(html, \"lxml\")\n return soup", "def parse_source(html, encoding='utf-8'):\n return BeautifulSoup(html, from_encoding=encoding)", "def page_data():\n return scrape()", "def _grab_tags(self, url):\n a = self._api_request(url)\n return bs4.BeautifulSoup(a,features=\"html.parser\")", "def get_content(self):\n response = requests.get(self.url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n return soup", "def hot_soup(url, payload={}):\r\n response = query(url, payload)\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n return soup", "async def fetch_page(self, url: str) -> PageRaw:\n\n raise NotImplementedError()", "def get_page(url):\n try:\n return urlopen(url).read()\n except:\n return None\n return None", "def get_content_from_link(link):\n\n page = requests.get(link)\n soup = BeautifulSoup(page.text, \"lxml\")\n return soup", "def get_article(url):\n \n r = requests.get(url) \n html_soup = BeautifulSoup(r.content, 'lxml')\n return html_soup" ]
[ "0.8628668", "0.84880644", "0.84137076", "0.8070522", "0.80435354", "0.79836047", "0.78635615", "0.78475887", "0.747077", "0.73754066", "0.72366697", "0.72015417", "0.714936", "0.71203345", "0.7103755", "0.7096957", "0.70346177", "0.7005713", "0.6984478", "0.6955197", "0.69351816", "0.6913274", "0.68932223", "0.68776554", "0.68408763", "0.68376124", "0.68280834", "0.682398", "0.669454", "0.66738886", "0.6654842", "0.65935534", "0.6580902", "0.6573284", "0.6566132", "0.6539177", "0.6516902", "0.64551544", "0.6452835", "0.6370227", "0.63446844", "0.6340904", "0.6334861", "0.6320063", "0.6311695", "0.63086975", "0.6302393", "0.6295639", "0.62859446", "0.6264793", "0.62417376", "0.62399596", "0.61980116", "0.61927015", "0.6178538", "0.6133998", "0.61331695", "0.6129062", "0.61216325", "0.61185646", "0.6117843", "0.6105837", "0.6103252", "0.6087605", "0.6086691", "0.6075231", "0.6072845", "0.60486007", "0.6044746", "0.6025028", "0.6000524", "0.59866774", "0.5972094", "0.59633404", "0.5943524", "0.5932282", "0.5922002", "0.5915323", "0.59072226", "0.5900004", "0.589102", "0.58422637", "0.5834587", "0.5820904", "0.5819605", "0.58082145", "0.58032274", "0.57971054", "0.57868207", "0.5781431", "0.577011", "0.5769448", "0.5759364", "0.5749124", "0.5725664", "0.57204527", "0.56827337", "0.56810415", "0.5677489", "0.56583816" ]
0.7005158
18
Get the links of interest from the given Beuti
def get_links(self, soup): """ @param soup: BeautifulSoup object that cointains the targeted links """ """ @type soup: BeautifulSoup object """ for link in soup.select('a[href^="https://"]'): # All links which have a href element href = link.get('href') # The actually href element of the link if not any(href.endswith(x) for x in ['.csv', '.xls', '.xlsx']): print("No excel") continue if not href in self.url_queue: self.url_queue.append(href) # Add the URL to our queue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_links(self):", "def get_links() -> list:\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate\",\n }\n p = re.compile(r'\\d+.html')\n base_url = 'http://stateoftheunion.onetwothree.net/texts/'\n essay_url = base_url + 'index.html'\n res = requests.get(essay_url, headers=headers)\n soup = BeautifulSoup(res.content, 'html')\n links = soup.find_all('a')\n sotu_links = {link.text: base_url + link.get('href', '') for link in links if re.match(p, link.get('href', ''))}\n return sotu_links", "def links(self):\n\t\treturn self.list_of_links", "def getURLs():", "def handout_links(self):\r\n return self.q(css='section.handouts ol li a').map(lambda el: el.get_attribute('href')).results", "def getLink(self):", "def getLinks(self):\n\t\threfs = []\n\t\tfor link in self.bsource.find_all('a'):\n\t\t\threfs.append(link.get('href'))\n\t\treturn hrefs", "def get_links(proj,exp):\n response = do_method(\"experiment.info\",\n {\"proj\":proj,\"exp\":exp,\"aspect\":\"links\"})\n check_response(response)\n return response['value']", "def get_links(self):\n msg = self.get_message()\n return msg.split()", "def get_links(self):\r\n return self.links", "def links(self) -> str:\n return pulumi.get(self, \"links\")", "def getlinks(url):\n page = Linkfetcher(url)\n page.linkfetch()\n for i, url in enumerate(page):\n print(\"%d ==> %s\" % (i, url))", "def extract_urls(genome):\n itemid = genome.get('metadata').get('identifier')\n urls = set([url for url in genome['urls'] if 'archive.org' not in url])\n db_urls_found(itemid, urls)", "def get_subjects_IOP_urls(url):\n # f = open(\"test.txt\", 'a+')\n body = getBody(url)\n\n html = soup(body,'html.parser')\n # print(html.original_encoding)\n div_content = html.find(id=\"content\")\n a_elems = div_content.find_all(\"a\", recursive=True, class_=\"entry-image-post-link\".encode('utf-8'))\n hrefs = []\n for a in a_elems:\n hrefs.append(a[\"href\"])\n return hrefs", "def links(iati_import, activity, project, activities_globals):\n imported_links = []\n changes = []\n\n for website in activity.findall('activity-website'):\n url = get_text(website, activities_globals['version'])\n\n # Skip RSR links\n if url and 'rsr.akvo.org' in url:\n continue\n\n link, created = get_model('rsr', 'link').objects.get_or_create(\n project=project,\n url=url\n )\n\n if created:\n changes.append(u'added link (id: %s): %s' % (str(link.pk), link))\n\n imported_links.append(link)\n\n for doc_link in activity.findall(\"document-link[@format='application/http']\"):\n url = ''\n caption = ''\n\n if 'url' in doc_link.attrib.keys():\n url = doc_link.attrib['url']\n\n # Skip RSR links\n if url and 'rsr.akvo.org' in url:\n continue\n\n title_element = doc_link.find('title')\n if not title_element is None:\n caption = get_text(title_element, activities_globals['version'])\n if len(caption) > 50:\n add_log(iati_import, 'link_caption', 'caption is too long (50 characters allowed)',\n project, IatiImportLog.VALUE_PARTLY_SAVED)\n caption = caption[:50]\n\n link, created = get_model('rsr', 'link').objects.get_or_create(\n project=project,\n url=url,\n caption=caption\n )\n\n if created:\n changes.append(u'added link (id: %s): %s' % (str(link.pk), link))\n\n imported_links.append(link)\n\n for link in project.links.all():\n if not link in imported_links:\n changes.append(u'deleted link (id: %s): %s' %\n (str(link.pk),\n link.__unicode__()))\n link.delete()\n\n return changes", "def get_links_from_body(body):\n return [get_base(url) for url in extract_urls(body)]", "def get_links_to_historic_matches(wd):\n list_of_links = []\n content_blocks = wd.find_elements_by_id(\"js-mutual-table\")\n for block in content_blocks:\n elements = block.find_elements_by_tag_name(\"a\")\n for el in elements:\n one_link = el.get_attribute(\"href\")\n if one_link.count(\"/\") > 6:\n list_of_links.append(one_link)\n return list_of_links", "def get_guide_urls(self):\n # data structures for returns\n urls = []\n link_labels = []\n link_class = []\n # data structures for tracking classes for links\n cur_class = None\n dict_counter = {}\n for tag in self.post_div.find_all(\"a\"):\n url = tag[\"href\"]\n # update class for the links if boundary found\n if url in url_to_class:\n dict_count = min(dict_counter.get(url, 0), len(url_to_class[url]) - 1)\n cur_class = url_to_class[url][dict_count]\n dict_counter[url] = dict_counter.get(url, 0) + 1\n # record the data for the link\n if cur_class is not None:\n urls += [url]\n link_labels += [tag.text]\n link_class += [cur_class]\n return urls, link_labels, link_class", "def gen_links(text):\n return []", "def exactor_links(self, response: BeautifulSoup):\n raise NotImplementedError", "def links(cls, page):\r\n for match in cls.HREF_RE.finditer(page):\r\n yield cls.href_match_to_url(match)", "def get_links(self):\n soup = BeautifulSoup(requests.get(self.locations_url).text.strip(), features=\"lxml\")\n for region in soup.select('td[class=\"navbox-list navbox-odd\"]'):\n self.links.extend(region.div.find_all('a'))\n\n soup_prague = BeautifulSoup(requests.get(self.url_prague).text.strip(), features=\"lxml\")\n table_prague = soup_prague.findAll('table', {\"class\": \"wikitable\"})[3]\n for prague_parts in table_prague.select(\"tr > td:nth-child(3)\"):\n self.links.extend(prague_parts.find_all('a'))\n\n self.links = [self.url + i['href'] for i in self.links]\n self.links.append(self.url_prague)\n return None", "async def _find_links(self, res: aiohttp.ClientResponse) -> Iterator[str]:\n\n content = await res.text()\n soup = BeautifulSoup(content, 'html.parser')\n links = [self._format(res.url, a) for a in soup.find_all('a')]\n return filter(lambda l: l is not None, links)", "def getLinks(self):\n return self.pageLinks", "def get_links(corpus, page):\n res = []\n for p in corpus:\n if page in corpus[p]:\n res.append(p)\n return res", "def produce_links_search(self, value_list:list) -> list:\n return [\n [self.produce_link_google(f) for f in value_list],\n [self.produce_link_qwant(f) for f in value_list],\n [self.produce_link_bing(f) for f in value_list],\n [self.produce_link_duckduckgo(f) for f in value_list],\n [self.produce_link_yahoo(f) for f in value_list]\n ]", "def getlinks_to(markup, domain):\n links = []\n for link in markup.cssselect('a'):\n target = link.get('href')\n\n if indomain(target, domain):\n links.append(target)\n\n return links", "def get_links(self):\n return (link for link in self.links)", "def query_download_link_from_ebi(query):\n all_links = list()\n fl = get_accession(query)\n for r in fl:\n tmp = get_download_link(r)\n if len(tmp) > 0:\n all_links.extend(tmp)\n ret_links = [\"ftp://\"+link for link in all_links]\n return sorted(list(set(ret_links)))", "def get_image_links(data):\n painting_links = []\n\n print(data)\n\n for painting in data:\n painting_links.append(painting['image'])\n\n return painting_links", "def get_links_from_url(url):\n return [get_base(url)]", "def list(self):\n\t\treturn self.link_words", "def get_links(self):\r\n return self.__links", "def urls(self) -> list[str]:\r\n ...", "def _get_links(self):\n with open(self.source, \"r\", encoding=\"utf-8\") as link_doc:\n return link_doc.readlines()", "def getMyLinks(self, link_list, plant):\n my_links = []\n for links in link_list:\n if plant in links:\n my_links.append(links)\n return my_links", "def collectLinks(self, output):\n pass", "def get_recipe_links(pages):\n recipe_links = []\n for page in xrange(1, pages+1):\n sleep(SCRAPING_REQUEST_STAGGER)\n recipe_links.extend(get_recipe_links_by_page(page))\n cuisine_recipes = get_recipe_details(list(set(recipe_links)))\n return cuisine_recipes", "def extract_links():\n br = mechanize.Browser()\n br.open(BASE_URL)\n f = open('data/svodki/alllinks.csv', 'w')\n calurls = []\n # Collect all calendar urls with reports\n for year in range(2005, 2013):\n for month in range(1, 13):\n calurls.append([year, month, CALEND_URLPAT %(year, month)])\n\n # Update for current year (needs fixes later)\n for year in range(2013, 2014):\n for month in range(1, 3):\n calurls.append([year, month, CALEND_URLPAT %(year, month)])\n # Process calendar urls one by one\n for year, month, calurl in calurls:\n print calurl\n u = br.open(calurl)\n data = u.read()\n u.close()\n soup = BeautifulSoup(data)\n slist = soup.find('ul', attrs={'class': 'emergency_list'})\n urls = slist.findAll('a')\n for url in urls:\n s = '%s\\t%s\\t%s\\t%s\\t' % (unicode(year), unicode(month), url.text, urljoin(BASE_URL, url['href']))\n f.write((s + '\\n').encode('utf8'))\n print s\n f.close()", "def getExpandedLinks():", "def get_all_links(page):\n\tlinks = []\n\twhile True:\n\t\turl, end_pos = get_next_target(page)\n\t\tif url:\n\t\t\tlinks.append(url)\n\t\t\tpage = page[end_pos:]\n\t\telse:\n\t\t\tbreak\n\treturn links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def get_recipe_links(cuisine, pages, collection):\n recipe_links = []\n for page in xrange(0, pages):\n sleep(SCRAPING_REQUEST_STAGGER)\n recipe_links.extend(get_cuisine_search_pages(cuisine, page))\n if collection:\n recipe_links.extend(get_cuisine_collection_page(cuisine))\n cuisine_recipes = get_recipe_details(list(set(recipe_links)))\n return cuisine_recipes", "def find_references(doi):\n if doi is None:\n return None\n\n references = []\n if doi:\n response = requests.get(f\"https://opencitations.net/index/api/v1/references/{doi}\").json()\n if response:\n references = [{\"doi\": r['cited'].replace(\"coci =>\", \"\")} for r in response]\n\n if references:\n return references\n else:\n return None", "def __url_list(self, page):\n url_list = []\n for tag_a in page.find_all('a'):\n href = str(tag_a.get('href'))\n if self.__verify(href):\n url = parse.quote(self.__add_main_site(href), '/:#')\n url_list.append(url)\n return url_list", "def links(self):\n return self._links_tpl.expand(self._identity, self._record)", "def _getLinks(self, response, soup):\n links = []\n for anchor in soup.find_all('a'):\n href = anchor.get('href')\n # Convert relative href to full uri\n if href and href.startswith(\"/\"):\n href = response.urljoin(href)\n else:\n continue\n links.append(href)\n return links", "def links(self):\n return self.container['links']", "def _extract_intra_links(pajek):\n string_links = re.findall(r\"\\d+ \\d+ \\d+.*\", pajek.split(\"*Intra\")[1].split(\"*Inter\")[0])\n intra_links = [list(map(eval, link.split())) for link in string_links]\n G_arr_intra = defaultdict(lambda: nx.Graph)\n for l in intra_links:\n G_arr_intra[l[0]].add_edge(l[1], l[2])\n return G_arr_intra", "def hyperlinks(self):\n return self.container['hyperlinks']", "def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://www.med.navy.mil'\n r = requests.get(self.starting_url, verify=CERTIFICATE_DIR + '/cat3.pem')\n soup = bs4.BeautifulSoup(r.content, features=\"html.parser\")\n\n # get target column of list items\n issuance_list = soup.find('div', attrs={'class': 'noindex ms-wpContentDivSpace'})\n matches = [\"Publications\", \"BUMEDNotes\", \"BUMEDInstructions\"]\n # extract links\n links = [link for link in issuance_list.find_all('a')]\n for link in links[2:-1]:\n if any(x in str(link) for x in matches):\n if not link['href'].startswith('http'):\n url = base_url + link['href']\n else:\n url = link['href']\n yield url", "def get_urls(json_dict):\n url_list = []\n count = 0\n for i in json_dict[\"items\"]:\n if i[\"is_answered\"]:\n url_list.append(i[\"link\"])\n count += 1\n if count == 3 or count == len(i):\n break\n \n for i in url_list:\n wb.open(i)", "def links(self):\n links = {}\n data = self.data['links']\n for key in data:\n links[key] = data[key]['url']\n return links", "def get_links(self):\n return self.__data['links']", "def grab_links(self):\n links = []\n link_char = []\n w_temp = [] #in template?\n par = [] #in parentheses?\n rtag = [] #in <ref> tag?\n dtag = [] #in <div> tag?\n\n skip_char = []\n\n for i, c in enumerate(self.article_xml):\n if i in skip_char: continue #eliminates double counting\n char = self.article_xml[i:i+2]\n tag = self.article_xml[i:i+4]\n \n #wiki template\n w_temp = self.inside_char(char, Article.w_marker, w_temp, i)\n if char in Article.w_marker: skip_char.append(i+1)\n if w_temp:\n continue #doesn't process if inside wiki template\n \n #parentheses\n par = self.inside_char(c, Article.par_marker, par, i)\n if par:\n continue\n \n #<ref> or <div>\n rtag = self.inside_char(tag, Article.rtag_marker, rtag, i)\n dtag = self.inside_char(tag, Article.dtag_marker, dtag, i)\n if rtag or dtag:\n continue\n \n #clear to add outer-most link\n if char == '[[':\n link_char.append(i)\n elif char == ']]' and len(link_char) == 1:\n links.append( self.article_xml[link_char[0]:i+2])\n link_char.pop()\n elif char == ']]' and len(link_char) > 1:\n link_char.pop()\n return links", "def get_links(self, response, domain, port, folder):\n\t\t# find link in tags: a, link, form, button\n\t\t# call to all function in file get_link\n\t\t# for method in get_link:\n\t\tlinks = get_link(response, domain, port, folder)\n\t\tlinks = filter(None, links.getResults())\n\t\treturn links", "def links(self):\n return self.dom.findall(\".//a\")", "def get_docs_urls(self):\n docs_urls = []\n link_labels = []\n for tag in self.post_div.find_all(\"a\"):\n url = tag[\"href\"]\n if url.startswith(\"https://docs.google.com\") or \\\n url.startswith(\"https://drive.google.com\"):\n docs_urls += [url]\n link_labels += [tag.text]\n return docs_urls, link_labels", "def get_urls():\r\n return []", "def getNewsIconURL(newsBrain):", "def get_all_links(html):\n links = []\n while True:\n url, endpos = get_next_target(html)\n if url:\n links.append(url)\n html = html[endpos:]\n else:\n break\n return links", "def enumerate_profiles(inhandle, page):\n html = inhandle.read()\n soup = BeautifulSoup(html, 'html.parser')\n \n urls = [ node.find('a')['href'] for node in soup.findAll('h1', {'class':'entry-title'})]\n return urls", "def find_active_links(lat, lon, place, name):\n\tWIKIPEDIA_BASE = 'https://wikipedia.org/wiki/Special:Search/'\n\tlinks = {}\n\tlinks[\"wikipediaUrl\"] = WIKIPEDIA_BASE + name\n\n\ttry:\n\t\tfsqReturn = find_foursquare_url(lat, lon, name)\n\t\tfoursquareVenueId = fsqReturn['venueId']\n\t\tfoursquareUrl = fsqReturn['4sqUrl']\n\t\twebsite = fsqReturn['url']\n\t\tdisplayMetadata = fsqReturn['metadata']\n\n\t\tif foursquareUrl is not None:\n\t\t\tlinks['foursquare'] = {\"foursquareUrl\" : foursquareUrl,\n\t\t\t\t\"foursquareVenueId\" : foursquareVenueId}\n\n\t\tif website is not None:\n\t\t\tlinks['url'] = website\n\n\t\tif displayMetadata is not None:\n\t\t\tlinks['displayMetadata'] = displayMetadata\n\n\texcept:\n\t\tprint \"foursquare failed\"\n\n\ttry:\n\t\topenTableUrl = find_open_table_url(place)\n\t\tif openTableUrl is not None:\n\t\t\tlinks['openTableUrl'] = openTableUrl\n\n\texcept: \n\t\tprint \"opentable failed\"\n\n\treturn links", "def get_links():\n # make a driver to create a section were we going to work, and get the source of page\n driver = webdriver.Chrome(executable_path=Params.path_crome)\n driver.get('https://www.foxsports.com./soccer/schedule?competition=4&season=2019&round=1&week=0&group=0&sequence=1')\n WebDriverWait(driver, 60).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"wisfoxbox\"]/section[3]/div[1]/div/div[1]/div[2]/a')))\n html = driver.page_source\n # read the page source with BeautifulSoup, and make a lista with all links game\n soup = BeautifulSoup(html, 'lxml')\n urls = soup.find_all('td', {'class': 'wisbb_gameInfo'})\n # with list comprehension select only the games have ended\n links = ['https://www.foxsports.com' + item.find('a')['href'] for item in urls if\n item.find('span', {'class': 'wisbb_status'}).text == 'FINAL']\n # end a driver and return a list with links\n driver.quit()\n return links", "def get_links(self) -> List[str]:\n return self.__links", "def _links_get(self, cr, uid, context=None):\n obj = self.pool.get('res.request.link')\n ids = obj.search(cr, uid, [])\n res = obj.read(cr, uid, ids, ['object', 'name'], context)\n return [(r['object'], r['name']) for r in res]", "def parse(html, url, bases): \n\n soup = BeautifulSoup(html, 'lxml')\n htmlBody = soup.find('body').get_text().strip()\n links = [urljoin(url, l.get('href')) for l in soup.findAll('a')]\n links = [l for l in links if urlparse(l).netloc in bases]\n return url, htmlBody, links", "def links_json(self, absolutize_url):\n return [\n {\n \"href\": absolutize_url(\"v2/{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"self\"\n },\n {\n \"href\": absolutize_url(\"{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"bookmark\"\n },\n {\n \"href\": absolutize_url(\"/images/{0}\"\n .format(self.image_id)),\n \"type\": \"application/vnd.openstack.image\",\n \"rel\": \"alternate\"\n }\n ]", "def links_json(self, absolutize_url):\n return [\n {\n \"href\": absolutize_url(\"v2/{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"self\"\n },\n {\n \"href\": absolutize_url(\"{0}/images/{1}\"\n .format(self.tenant_id, self.image_id)),\n \"rel\": \"bookmark\"\n },\n {\n \"href\": absolutize_url(\"/images/{0}\"\n .format(self.image_id)),\n \"type\": \"application/vnd.openstack.image\",\n \"rel\": \"alternate\"\n }\n ]", "def get_links_from_one_page(driver,site,URL_exclusions):\r\n while True:\r\n try:\r\n results = driver.find_elements_by_class_name(\"g\") #Find all elements with class=\"g\". This includes search results.\r\n break\r\n except:\r\n continue \r\n links = []\r\n for result in results:\r\n link = result.find_element_by_tag_name(\"a\") #Hyperlinks are contained under <a> tags\r\n link = link.get_attribute('href') #Retrive link as a string\r\n if link.find(site) != -1: #Some class=\"g\" elements are not search results. Only store links with urls containing \"site\".\r\n links.append(link)\r\n sig_links = [] #Create list of links for pages not from travel sections\r\n for url in links:\r\n find = np.zeros(len(URL_exclusions))\r\n for i in range(len(URL_exclusions)):\r\n find[i] = bool(url.find(URL_exclusions[i]) == -1)\r\n if all(find) == True: #If none of the exclusion words are in url\r\n sig_links.append(url)\r\n return sig_links", "def search_urls():\n r = req('GET', SUB_API + 'search/urls', params=apply_search_filters())\n urls = []\n for url in demisto.get(r.json(), 'data.items'):\n urls.append({\n 'Result': demisto.get(url, 'result'),\n 'Details': demisto.get(url, 'details')\n })\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.URLs': urls},\n 'HumanReadable': tableToMarkdown('ThreatGrid - URL Search', urls, ['Result', 'Details']),\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })", "def get_links(self, node): # pragma: no cover\n\t\traise NotImplementedError", "def __getitem__(self, i):\n return self._links[i]", "def getLinks(content):\n soup = BeautifulSoup(content, 'lxml')\n links = set([link.get('href') for link in soup.find_all('a')])\n return links", "def get_links(html):\n # a regular expression to extract all links from the webpage\n # re.IGNORECASE 让正则表达式忽略大小写,如[A-Z]也可以匹配小写字母了。\n # link_list = re.findall(r\"(?<=href=\\\").+?(?=\\\")|(?<=href=\\').+?(?=\\')\", content)\n webpage_regex = re.compile('<a href=(.*?)</a>', re.IGNORECASE)\n # list of all links from the webpage\n url_list = webpage_regex.findall(html)\n with open('E:\\淘女郎首页图片list.txt','w') as f:\n for each in url_list:\n f.write(each+'\\n')\n return url_list", "def getLinks(self):\n refbrains = self.refcat._queryFor(relationship=self.relation,\n tid=self.suid, sid=None)\n if refbrains:\n uids = [brain.sourceUID for brain in refbrains]\n ## XXX non-orthogonal\n return self.resolver.queryUIDs(uids)\n return []", "def get_hyperlinks(url, header, empty_list):\n response = requests.get(url, headers=header)\n soup = BeautifulSoup(response.text, 'html.parser')\n \n# table holds the preview of every job posting\n job_table = soup.find_all('h2', {'mb4 fc-black-800 fs-body3'})\n \n for job in job_table:\n# retrieving every link\n job_link_html = job.find_all('a', href=True)\n \n for job_link in job_link_html:\n empty_list.append('https://stackoverflow.com'+ job_link['href'])\n \n return empty_list", "def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://health.mil/About-MHS/OASDHA/Defense-Health-Agency/Resources-and-Management/DHA-Publications'\n yield base_url", "def scrape_links(base_url, data):\n soup = BeautifulSoup(data, from_encoding=\"gbk\")\n\n # Create mechanize links to be used\n # later by mechanize.Browser instance\n #soup = BeautifulSoup(data)\n print 'scrape_links before'\n links = []\n for anchor in soup.find_all('a'):\n url = anchor['href']\n text = anchor.string\n shtml = '.shtml'\n thisYear = '2013'\n isWant = ( anchor.has_attr('href')) \\\n and ( anchor.has_attr('target') ) \\\n and (BASE_URL in url) \\\n and (shtml in url) \\\n and (text != None) \\\n and (thisYear in url)\n if isWant==True:\n unicode_string = (unicode(anchor.string))\n print 'unicode_string:',unicode_string\n print 'type(text): ', type(text)\n print 'type(unicode_string): ', type(unicode_string)\n tag = anchor.name\n\n attrs = []\n for name in anchor.attrs:\n attrs.append(name)\n link = mechanize.Link(base_url, url, text, tag, attrs)\n print link\n links.append(link)\n if len(links) > 10:\n break;\n print 'scrape_links after'\n return links", "def links (self):\n return (link for src, dst, link in self.network.edges_iter(data=True) if\n link.type == Link.STATIC or link.type == Link.DYNAMIC)", "def extract_links(data):\n soup = BeautifulSoup(data)\n for link in soup.findAll(\"a\"):\n for pair in link.attrs:\n if pair[0] == u'href':\n yield pair[1]", "async def get_article_links(self):\n urls = []\n for page in range(self._start, self._end+1):\n urls.append(self._searchURL + str(page))\n result_list = await self._connect(urls)\n\n self._urls = []\n hares_links = []\n for result in result_list:\n soup = result[1]\n search_links = soup.find_all(class_='search-title')\n article_links = re.findall(r'url=(.*?)\\\"', str(search_links))\n for l in article_links:\n l = unquote(l)\n if 'hare48.pixnet.net' in l:\n hares_links.append(l)\n else:\n self._urls.append(l)\n self._urls.extend(await self._transform_hares(hares_links))", "def get_links(html, outformat):\n if outformat == FORMAT_BIBTEX:\n refre = re.compile(r'<a href=\"(/scholar\\.bib\\?[^\"]*)')\n elif outformat == FORMAT_ENDNOTE:\n refre = re.compile(r'<a href=\"(/scholar\\.enw\\?[^\"]*)\"')\n elif outformat == FORMAT_REFMAN:\n refre = re.compile(r'<a href=\"(/scholar\\.ris\\?[^\"]*)\"')\n elif outformat == FORMAT_WENXIANWANG:\n refre = re.compile(r'<a href=\"(/scholar\\.ral\\?[^\"]*)\"')\n reflist = refre.findall(html)\n # escape html entities\n reflist = [re.sub('&(%s);' % '|'.join(name2codepoint), lambda m:\n chr(name2codepoint[m.group(1)]), s) for s in reflist]\n return reflist", "def _extract_links(self, publication, feed_self_url):\n self._logger.debug(\n \"Started extracting links from {0}\".format(encode(publication.links))\n )\n\n links = []\n\n for link in publication.links:\n link_metadata = self._extract_link(link, feed_self_url)\n links.append(link_metadata)\n\n description_link = self._extract_description_link(publication)\n if description_link:\n links.append(description_link)\n\n image_links = self._extract_image_links(publication, feed_self_url)\n if image_links:\n links.extend(image_links)\n\n self._logger.debug(\n \"Finished extracting links from {0}: {1}\".format(\n encode(publication.links), encode(links)\n )\n )\n\n return links", "def htmlFindLinkUrls(page, attrs={}):\n htmlParsePage(page)\n bs = page['parsedHtml']\n elList = bs.findAll('a', attrs=attrs)\n urls = []\n for el in elList:\n if 'href' not in el:\n continue\n url = el['href']\n url = urlparse.urljoin(page['url'], url)\n urls.append(url)\n\n return urls", "def links(self):\n return self._link_reg", "def get_urls(links):\n\n temp_list=[]\n url_list=[]\n temp_list2=[]\n #Open the file where the url's are saved and copy the tuple values into an empty list\n z=open('dbdocs.txt','r')\n for line in z:\n temp_list.append(line)\n #print temp_list\n for x in temp_list:\n index=x.find(',')\n if index==-1:\n y=x.split(\" \",1)\n key=int(y[0])\n val=str(x[1]).replace('\\n','')\n url_list.append((key,val))\n else:\n #find the tab seperator between the key and the url, and\n #split them, in order to put in a list\n key=x[0:index-1]\n #print key\n value=str(x[index+3:len(x)-1])\n #print value\n temp_list2.append((int(key),value))\n #Find the url's of the links where the word was found\n for k in links:\n for i,j in temp_list2:\n #print j\n if i==k:\n url_list.append((i,j))\n break\n #print len(url_list)\n #print len(links)\n z.close()\n return url_list", "def getLinks(self):\n\n return self.links", "def fetch_urls(self, html):\n urls = []\n all_urls = set()\n dom = lh.fromstring(html)\n for href in dom.xpath('//a/@href'):\n url = urljoin(self.base_url, href)\n path = urlparse(url).path\n ext = os.path.splitext(path)[1]\n if bool(ext) and ext not in ['.html', '.htm']:\n continue\n if url not in self.visited_urls and url.startswith(self.base_url):\n urls.append(url)\n if url not in all_urls and url.startswith(self.base_url):\n all_urls.add(url)\n return urls, all_urls", "def connect_links(base_url, extensions, wikidir, body):\n if base_url.endswith(\"/\"):\n base_url = base_url[:-1]\n\n i = 0\n body2 = []\n\n for match in WIKILINK.finditer(body):\n body2.append(body[i:match.span(0)[0]])\n \n text = match.group(1)\n\n if \"|\" in text:\n topic, desc = text.split(\"|\")\n topic = topic.strip()\n else:\n topic, desc = (text, text)\n\n fn = os.path.join(wikidir, topic)\n\n ext = tools.what_ext(extensions, fn)\n if not ext:\n body2.append(match.group(0))\n i = match.span(0)[1]\n continue\n\n body2.append(\"<a href=\\\"%s/%s/%s\\\">%s</a>\" % \\\n (base_url, TRIGGER, topic, desc))\n i = match.span(0)[1]\n\n body2.append(body[i:])\n return \"\".join(body2)", "def getLinks(self, url, tag = \"a\", attr = \"href\"): \n try: \n response = open(self.filename(url)).read() #read from the file\n except IOError:\n raise IOError\n parsed_url = urlparse(url)\n domain = parsed_url[0] + '://' + parsed_url[1]\n \n try:\n soup = BeautifulSoup.BeautifulSoup(response)\n l = soup.findAll(tag, href = True)\n except Exception:\n raise Exception\n links = []\n \n for tag in l:\n link = str(tag[attr]) #convert the link to a string\n purl = urlparse(link)\n if purl[1] == '': #if the link is relative make it absolute\n link = domain+link\n #check if the extension is that of a document \n if splitext(link)[1] in self._invalidExt: \n self.docs_list.append(link)\n \n #append only the html link\n links.append(link)\n \n \n \n return list(set(links)) #returns only distinct links", "def getLinks(link):\n source = requests.get(link).text\n soup = BeautifulSoup(source, 'lxml')\n rows = soup.find_all(class_ = 'column-1') #select which column \n list_of_links = []\n \n for row in rows[1:]: #rows[1:] is used in case first row is a title row (ie there is no useful data here)\n name = row.find('a')\n link = name.attrs['href'] #the data I'm trying to extract\n list_of_links.append(link)\n return list_of_links", "def get_links(self):\n links = \"\"\n if self.title != \"\":\n links += html_link_to_tag(\n plain_to_html(self.title), self.title, self.proc\n )\n return links + \\\n html_unordered_list([x.get_links() for x in self.subsections])", "def get_links(query_terms):\n\n # the set of links all of which contains all the terms in the query string\n final_links = None\n for term in query_terms:\n # get all links containing the term and put in a set\n links = Set(index_data.get(term))\n #print(\"\\n\\nQuery Term: %s\" % term)\n #print(links)\n\n # special case for first iteration, because: empty & anything = empty\n if final_links == None:\n final_links = links\n\n # take intersection of links set\n final_links = final_links & links\n\n #print(final_links)\n\n # convert the Set to List and return\n return list(final_links)", "def get_links(file_src='index.html') -> List[Dict[str, str]]:\n with open(file_src) as file:\n soup = BS(file.read(), 'html.parser')\n\n vid_entries = soup.select('a.yt-simple-endpoint.style-scope.ytd-playlist-video-renderer')\n for vid_elem in vid_entries:\n song = vid_elem.select_one('span[title]')\n if song:\n title = song['title']\n href = vid_elem.select_one('a[href]')['href']\n yield {'title': title, 'href': href}", "def get_links(value):\n\ttry:\n\t\ttry:\n\t\t\tfrom BeautifulSoup import BeautifulSoup\n\t\texcept ImportError:\n\t\t\tfrom beautifulsoup import BeautifulSoup\n\t\tsoup = BeautifulSoup(value)\n\t\treturn soup.findAll('a')\n\texcept ImportError:\n\t\tif settings.DEBUG:\n\t\t\traise template.TemplateSyntaxError, \"Error in 'get_links' filter: BeautifulSoup isn't installed.\"\n\treturn value" ]
[ "0.66900754", "0.6568194", "0.6410215", "0.6353673", "0.6319181", "0.63110644", "0.6267211", "0.6240298", "0.62356913", "0.62124115", "0.611109", "0.60906845", "0.60674167", "0.6038508", "0.60245275", "0.60238755", "0.60237354", "0.6021248", "0.60163826", "0.6006266", "0.5980935", "0.5977729", "0.59717923", "0.5960885", "0.59112376", "0.59008735", "0.58878803", "0.5875843", "0.5873685", "0.5873463", "0.58713716", "0.5867797", "0.5860926", "0.5856926", "0.583983", "0.58374923", "0.583651", "0.5829921", "0.58226347", "0.5819459", "0.5817068", "0.58111525", "0.58111525", "0.58111525", "0.58111525", "0.58111525", "0.5810235", "0.5810182", "0.5807637", "0.580073", "0.57957315", "0.5795454", "0.5792638", "0.57925576", "0.5788128", "0.5783885", "0.577947", "0.5773586", "0.5767421", "0.57668006", "0.5766485", "0.5765104", "0.5757919", "0.5751834", "0.5748525", "0.57464385", "0.57384735", "0.57272077", "0.57191616", "0.5708265", "0.56884855", "0.5682947", "0.5682947", "0.56810445", "0.5674912", "0.56726533", "0.5670445", "0.56624526", "0.5657174", "0.5650244", "0.56496036", "0.56452", "0.56327426", "0.56269014", "0.56240207", "0.5621557", "0.5615541", "0.561194", "0.56076443", "0.55906767", "0.5585758", "0.55841756", "0.5575853", "0.55704117", "0.55663574", "0.55619925", "0.55572015", "0.55548185", "0.5546745", "0.554492" ]
0.5816899
41
Create a temp folder to download
def get_files(self): # self.folder= +str(int(time.time())) if not os.path.exists(self.folder): os.mkdir(self.folder) while len(self.url_queue): # If we have URLs to crawl - we crawl href = self.url_queue.popleft() # We grab a URL from the left of the list filename = href.rsplit('/', 1)[-1] print("Downloading %s to %s..." % (href, filename)) fullname = os.path.join(self.folder, filename) urlretrieve(href, fullname) self.xlfnames.append(filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_temp_folder():\n path_join = os.path.join(tempfile.gettempdir(), id_generator(5))\n os.makedirs(path_join)\n return path_join", "def get_tmp():\n\n # tmp = os.getcwd()\n # if Settings.get_download_path() != \"\":\n # tmp = os.path.join(Settings.get_download_path(), \"tmp\")\n # else:\n # tmp = os.path.join(tmp, \"tmp\")\n # if not os.path.exists(str(tmp)):\n # os.mkdir(str(tmp))\n # return tmp\n download_path = Settings.get_download_path()\n if not os.path.exists(str(download_path)):\n os.mkdir(str(download_path))\n return download_path", "def generate_temp_folder(self):\n folder_name = str(time.time()).replace(\".\", \"\")\n folder_path = os.path.join(self.temp_folder, folder_name)\n os.makedirs(folder_path)\n self.logger.debug(f\"Created nested temp folder at {folder_path}\")\n return folder_path", "def make_tempdir(self):\n self.tempdir_path = self.dst_path + '_temp'\n if not os.path.exists(self.tempdir_path):\n os.makedirs(self.tempdir_path)\n return self.tempdir_path", "def create_temp_dir(self, *args, **kwargs):\n temp_dir = tempfile.mkdtemp(*args, **kwargs)\n try:\n yield decode_path(temp_dir)\n finally:\n remove_directory(temp_dir)", "def create_temp_directory(reason):\n current_dir = os.getcwd()\n # change path accordingly - dir=current_dir\n # check temp dir - C:\\Users\\uC264789\\AppData\\Local\\Temp\n return tempfile.mkdtemp(prefix='temp-{0}-'.format(reason))", "def tempdir():\n\n # Create a directory and return the path\n return tempfile.mkdtemp()", "def makeTempDir(self):\n try:\n os.mkdir(self.temp_dir)\n except FileExistsError:\n pass", "def temporary_folder():\r\n tempdir = mkdtemp()\r\n try:\r\n yield tempdir\r\n finally:\r\n rmtree(tempdir)", "def make_tempdir():\n return mkdtemp()", "def create_download_folder(path=None):\n local_path = path or DEFAULT_PATH\n folder_name = time.strftime(\"%Y%m%dT%H%M%S\")\n folder_path = os.path.join(local_path, folder_name)\n\n if not os.path.exists(local_path):\n os.mkdir(local_path)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n\n return folder_path", "def mktemp(self):\n newDir = tempfile.mkdtemp(dir=tempfile.gettempdir())\n self.addCleanup(shutil.rmtree, newDir)\n return newDir", "def create_download_path(params: DownloadCommandParameters) -> None:\n if not os.path.exists(params.output):\n os.makedirs(params.output)", "def create_directory():\n global dirName\n dirName = 'Downloaded Files'\n global folder_path\n if os.path.isdir(dirName) == True:\n print(\"This folder already exists, path:\", os.path.abspath(dirName))\n else:\n os.mkdir(dirName)\n global folder_path\n folder_path = os.path.abspath(dirName)\n print(\"Directory \" , dirName , \" Created \")", "def _make_tempdir(self):\n self._clean_tempdir()\n os.mkdir(self._get_tempdir())\n assert os.path.exists(self._get_tempdir())\n rospy.on_shutdown(self._clean_tempdir)\n rospy.on_shutdown(self._clear_cache)", "def createTmpFolder(tmpFolderName:str = 'tmp')->str:\n rp = getRootPath()\n if not os.path.isdir(rp+tmpFolderName):\n os.mkdir(rp + tmpFolderName)\n\n return rp + tmpFolderName", "def temp_dir(request):\n tmp = tempfile.mkdtemp()\n request.addfinalizer(lambda: shutil.rmtree(tmp))\n return tmp", "def create_temp_dir():\n\n try:\n temp_dir = os.getenv('TEMP_FILE_DIR')\n\n if not isinstance(temp_dir, type(None)):\n if os.path.exists(temp_dir):\n LOGGER.warning('Temp Directory Already Exists.')\n else:\n temp_dir = tempfile.mkdtemp()\n os.environ['TEMP_FILE_DIR'] = temp_dir\n else:\n temp_dir = tempfile.mkdtemp()\n os.environ['TEMP_FILE_DIR'] = temp_dir\n\n LOGGER.debug(f'Temp Dir: {temp_dir}')\n except Exception as ex:\n LOGGER.exception(ex)\n raise ex", "def _temp_dir(self):\n tmp_dir = os.path.join(self.output_dir, self.config.find_tune[\"run_dir\"])\n try:\n os.makedirs(tmp_dir)\n except OSError:\n pass\n os.chdir(tmp_dir)\n self.tmp_dir = \"./\"", "def make_tempdir():\n global _TEMPDIR\n if not _TEMPDIR:\n _TEMPDIR = tempfile.mkdtemp()\n return _TEMPDIR", "def _make_unique_temp_dir(base_temp_dir):\n return os.path.join(base_temp_dir, uuid.uuid4().hex)", "def download_and_extract_to_mkdtemp(\n bucket: str, key: str, session: Optional[boto3.Session] = None\n) -> str:\n filedes, temp_file = tempfile.mkstemp()\n os.close(filedes)\n download(bucket, key, temp_file, session)\n\n output_dir = tempfile.mkdtemp()\n with zipfile.ZipFile(temp_file, \"r\") as zip_ref:\n zip_ref.extractall(output_dir)\n os.remove(temp_file)\n LOGGER.verbose(\"extracted %s to %s\", temp_file, output_dir)\n return output_dir", "def create_temp_archive(case_dict):\n # ---------------------------------------------------------------------\n archive_temp_dir = \"{0}/archive_temp_dir\".format(case_dict[\"workdir\"])\n logger.debug(\"create_temp_archive %s\", archive_temp_dir)\n\n if not os.path.exists(archive_temp_dir):\n os.makedirs(archive_temp_dir)\n else:\n logger.info(\n \"ERROR archive_metadata archive_temp_dir already exists. exiting...\"\n )\n sys.exit(1)\n\n return archive_temp_dir", "def make_temp_file():\n global TEST_DATA_PATH\n TEST_DATA_PATH = tempfile.mkstemp()", "def tmp_directory(*args, **kwargs):\n path = mkdtemp(*args, **kwargs)\n try:\n yield path + '/'\n finally:\n #shutil.rmtree(path)\n pass", "def temp_dir(**kwargs):\n temp_dir = tempfile.mkdtemp(**kwargs)\n try:\n yield temp_dir\n finally:\n # Cleanup\n # ermm... this is breaking something (maybe bootstrapping replicates?), so leaving out for now\n #shutil.rmtree(temp_dir)\n pass", "def make_tempdir(delete=True):\n tdir = tempfile.mkdtemp(prefix='meta-mender-acceptance.')\n print('created dir', tdir)\n try:\n yield tdir\n finally:\n if delete:\n shutil.rmtree(tdir)", "def get_temp_dir():\n return tempfile.mkdtemp()", "def tempdir():\n return mkdtemp()", "def mk_work_dir():\n return tempfile.mkdtemp(prefix='pentaho-aws-', suffix='')", "def temporary_directory(request):\n path = tempfile.mkdtemp()\n\n def cleanup():\n \"\"\"Remove temporary directory.\"\"\"\n shutil.rmtree(path)\n\n request.addfinalizer(cleanup)\n\n return path", "def handle_drztmp_dir():\n global AXE_DRZTMP_LOC\n\n # get the path name of the tmp directory\n AXE_DRZTMP_LOC = os.path.join(AXE_DRIZZLE_PATH, AXE_DRZTMP_SUB)\n\n # delete an already existing directory\n if os.path.isdir(AXE_DRZTMP_LOC):\n shutil.rmtree(AXE_DRZTMP_LOC)\n\n # create a new, empty directory\n os.mkdir(AXE_DRZTMP_LOC)\n print('Creating temporary directory: ', AXE_DRZTMP_LOC)", "def create_temp_env_directory():\n return tempfile.mkdtemp(prefix=\"spack-\")", "def generate_temp_dir(path):\n exist = True\n while exist:\n # Keep trying random directory names if they already exist\n directory = str(hex(getrandbits(32)))[2:]\n full_path = os.path.join(path, directory)\n if not os.path.exists(full_path):\n exist = False\n try:\n os.makedirs(full_path)\n except PermissionError:\n raise PermissionError(\n \"The temporary directory cannot be created in {}. \"\n \"Make sure you have write permission.\".format(path)\n )\n return full_path", "def makeTempDir(self,perms=0o700,keep=False):\n\n os.mkdirs(self.temp,perms)\n if not keep:\n atexit.register(rmdirs,self.temp)\n return self.temp", "def _create_folders(tmp_folder: str = None):\n if not os.path.exists(tmp_folder):\n os.makedirs(tmp_folder)\n logging.info(\"Created folder: %s\", tmp_folder)\n\n tmp_user_data = tmp_folder + \"/user-data\"\n if not os.path.exists(tmp_user_data):\n os.makedirs(tmp_user_data)\n logging.info(\"Created folder: %s\", tmp_user_data)\n\n tmp_data_path = tmp_folder + \"/data-path\"\n if not os.path.exists(tmp_data_path):\n os.makedirs(tmp_data_path)\n logging.info(\"Created folder: %s\", tmp_data_path)\n\n tmp_cache_dir = tmp_folder + \"/cache-dir\"\n if not os.path.exists(tmp_cache_dir):\n os.makedirs(tmp_cache_dir)\n logging.info(\"Created folder: %s\", tmp_cache_dir)", "def fresh_directory():\n os.chdir(tempfile.mkdtemp())", "def mkdtemp(suffix='',prefix='tmp',dir=None):\n\tpass", "def tmp_data_directory(tmp_path_factory):\n return str(tmp_path_factory.mktemp(\"datathon-mlapp-starter\"))", "def load_temp_dir():\n\n temp_subname = 'GETURLS_TMP_{}'.format(int(time.time()))\n tmp_dir = tempfile.TemporaryDirectory(prefix=temp_subname)\n return tmp_dir", "def mktemp(self):\n if self.dryrun:\n return os.path.expandvars(\"$TEMP/build\")\n\n return tempfile.mkdtemp()", "def makeTempDir(name=None):\n makeTempDirParent()\n charSet = string.ascii_lowercase + '123456789'\n if name is None:\n while True:\n name = '%s_%s' % (''.join(random.choice(charSet) for x in xrange(4)),\n ''.join(random.choice(charSet) for x in xrange(4)))\n if not os.path.exists(os.path.join(os.curdir, '.tempTestDir', name)):\n break\n if not os.path.exists(os.path.join(os.curdir, '.tempTestDir', name)):\n os.mkdir(os.path.join(os.curdir, '.tempTestDir', name))\n return os.path.join(os.curdir, '.tempTestDir', name)", "def gen_temp_wishlist(self, identifier: str):\n tempdir_name = f\"{identifier}_{randint(1000,9999)}_repo\"\n # for when run from wish repo's home\n basedir = Path(__file__).parent.resolve()\n newdir = basedir / tempdir_name\n shutil.copytree(Path(basedir/\"fixture_repo\"), newdir)\n return newdir", "def temp_dir():\n global _temp_dir\n warnings.warn(\n \"Please use the :mod:`tempfile` module from the standard library\",\n DeprecationWarning\n )\n _create_temp_dir()\n return _temp_dir", "def _temporary_directory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n shutil.rmtree(name)", "def get_tmp_dir():\n tmp_dir = os.path.join(get_mturk_dir(), 'tmp')\n if not os.path.exists(tmp_dir):\n os.mkdir(tmp_dir)\n return tmp_dir", "def _mkdtemp(*args, **kwargs):\n import tempfile\n dir_name = tempfile.mkdtemp(*args, **kwargs)\n import atexit, shutil\n ignore_errors = True\n atexit.register(shutil.rmtree, dir_name, ignore_errors)\n return dir_name", "def test_create_package_dir(self):\n tempdir = tempfile.mkdtemp()\n os.rmdir(tempdir)\n settings = {\n 'storage.dir': tempdir,\n }\n FileStorage.configure(settings)\n try:\n self.assertTrue(os.path.exists(tempdir))\n finally:\n os.rmdir(tempdir)", "def get_temporary_directory(path, ticket_id):\n return os.path.join(path, \"tmp\", ticket_id)", "def create_folders(folder_name):\n\n if os.path.exists(downloads_path + '\\\\' + folder_name):\n pass\n else:\n os.makedirs(folder_name)\n print(f'Folder: {folder_name} has been created in {downloads_path}')", "def create_temp_dir_with_constant_name(name):\n # type: (six.text_type) -> Path\n tmp_dir_path = Path(tempfile.gettempdir()) / name\n if tmp_dir_path.exists():\n shutil.rmtree(six.text_type(tmp_dir_path), ignore_errors=True)\n tmp_dir_path.mkdir(exist_ok=True)\n\n return tmp_dir_path", "def create_temp_output_paths() -> None:\n if not os.path.exists(TMP_PATH):\n os.makedirs(TMP_PATH)\n if not os.path.exists(TMP_MAP_PATH):\n os.makedirs(TMP_MAP_PATH)", "def test_mkdir(self, client, remote_temp_dir):\n\n dir_path = posixpath.join(remote_temp_dir, \"subdir\")\n assert not client.exists(dir_path)\n\n with HdfsHook() as hook:\n hook.mkdir(dir_path, mode=0o750)\n\n assert client.exists(dir_path)\n assert client.info(dir_path)[\"permissions\"] == 0o750", "def create(self, basedir, outdir, name, prefix=None):", "def create_temp_dir(config):\n xst_dir = os.path.join(config[\"build_dir\"], XST_DIR)\n temp_dir = os.path.join(xst_dir, XST_TEMP_DIR)\n temp_abs_dir = os.path.join(utils.get_project_base(), xst_dir, XST_TEMP_DIR)\n if not os.path.exists(temp_abs_dir):\n os.makedirs(temp_abs_dir)\n return temp_dir", "def create_cache_dir(self) -> None:\n try:\n os.makedirs(self.cache_folder)\n except FileExistsError:\n pass", "def download_folder(self) -> Path:\n path = (\n config.storage_vol\n / f\"{self.device_type}/{self.patient_id}/{self.device_id}\"\n )\n path.mkdir(parents=True, exist_ok=True)\n return path", "def secure_temp_dir(context):\n tmpd = tempfile.TemporaryDirectory()\n context.tempdir = tmpd", "def make_temp_file(dir, data):\n fd, path = tempfile.mkstemp(dir=dir)\n if PY3:\n with os.fdopen(fd, 'w', encoding='utf-8') as f:\n f.write(data)\n else:\n with os.fdopen(fd, 'w') as f:\n f.write(data)\n return path", "def temp_dir():\n temp = UNIT_TEST_DATA / 'temp'\n try:\n temp.mkdir(parents=True, exist_ok=True)\n yield temp\n finally:\n rmtree(temp)", "def get_temp_dir(root_dir, prefix=\"\", suffix=\"\"):\n temp_dir = tempfile.mkdtemp(prefix=prefix,\n suffix=suffix,\n dir=os.path.abspath(root_dir))\n return temp_dir", "def move_from_temp_directory(self):", "def random_directory():\n dirname = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))\n directory = tempfile.mkdtemp(suffix=dirname)\n if not os.path.exists(directory):\n os.makedirs(directory)\n return directory", "def create_output_dir(self):\n if self.output_dir is None:\n new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))\n try:\n os.makedirs(self.output_dir)\n except OSError:\n pass", "def tmp_dir():\n tmpdir = tempfile.mkdtemp()\n yield tmpdir\n shutil.rmtree(tmpdir)", "def a_temp_directory(base=None):\n directory = None\n try:\n directory = tempfile.mkdtemp(dir=base)\n yield directory\n finally:\n if directory and os.path.exists(directory):\n shutil.rmtree(directory)", "def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)", "def temp_dir() -> pathlib.Path:\n with tempfile.TemporaryDirectory(prefix=\"phd_\") as d:\n yield pathlib.Path(d)", "def emptyTmpFolder(tmpFolderName:str = 'tmp'):\n removeTmpFolder(tmpFolderName)\n createTmpFolder(tmpFolderName)", "def TemporaryDirectory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n shutil.rmtree(name)", "def create_base_temp_dir(cls):\n if cls._thread_local.state.temp_dirs:\n base_temp_dir = os.path.join(cls._thread_local.state.temp_dirs[-1],\n cls._TEMP_SUBDIR)\n else:\n raise ValueError(\n 'A tf.Transform function that required a temp dir was called but no '\n 'temp dir was set. To set a temp dir use the impl.Context context '\n 'manager.')\n tf.gfile.MakeDirs(base_temp_dir)\n return base_temp_dir", "def make_tempdir(parent=None):\n tmpdir = tempfile.mkdtemp(prefix='rbtools.',\n dir=parent)\n tempdirs.append(tmpdir)\n\n return tmpdir", "def open_tempfile(*tempdirs, **kwargs):\n for d in tempdirs:\n try:\n os.makedirs(d, mode=0o775, exist_ok=True)\n return tempfile.TemporaryFile(dir=d, **kwargs)\n except Exception:\n continue\n return tempfile.TemporaryFile(**kwargs)", "def mp_tmpdir():\n # shutil.rmtree(TEMP_DIR, ignore_errors=True)\n os.makedirs(TEMP_DIR)\n yield TEMP_DIR\n shutil.rmtree(TEMP_DIR, ignore_errors=True)", "def get_tmp_dir_for(self, required_digest):\n\t\ttry:\n\t\t\tif not os.path.isdir(self.dir):\n\t\t\t\tos.makedirs(self.dir)\n\t\t\tfrom tempfile import mkdtemp\n\t\t\ttmp = mkdtemp(dir = self.dir, prefix = 'tmp-')\n\t\t\tos.chmod(tmp, 0o755)\t# r-x for all; needed by 0store-helper\n\t\t\treturn tmp\n\t\texcept OSError as ex:\n\t\t\traise NonwritableStore(str(ex))", "def build_private_data_dir(self, instance):\n path = tempfile.mkdtemp(prefix=JOB_FOLDER_PREFIX % instance.pk, dir=settings.AWX_ISOLATION_BASE_PATH)\n os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)\n if settings.AWX_CLEANUP_PATHS:\n self.cleanup_paths.append(path)\n # We will write files in these folders later\n for subfolder in ('inventory', 'env'):\n runner_subfolder = os.path.join(path, subfolder)\n if not os.path.exists(runner_subfolder):\n os.mkdir(runner_subfolder)\n return path", "def mklocdir(self):\n dir = self._localpath+\"/%s.%s\" % (self._jobname, self._jobid)\n os.mkdir(dir)", "def makeTempDirParent():\n if not os.path.exists(os.path.join(os.curdir, '.tempTestDir')):\n os.mkdir(os.path.join(os.curdir, '.tempTestDir'))", "def _CreateTempDir(prefix, run_dir=None):\n temp_dir = tempfile.mkdtemp(prefix=prefix + '-', dir=run_dir)\n try:\n yield temp_dir\n finally:\n shutil.rmtree(temp_dir)", "def get_new_temp_dir(self):\n return self.useFixture(fixtures.TempDir())", "def mktempdir(delete_on_exit=True):\n tmpdir = tempfile.mkdtemp(\"idstools\")\n if delete_on_exit:\n atexit.register(shutil.rmtree, tmpdir, ignore_errors=True)\n return tmpdir", "def fixture_out_dir(tmpdir_factory) -> Path:\n my_tmpdir = Path(tmpdir_factory.mktemp(\"out\"))\n yield my_tmpdir\n shutil.rmtree(str(my_tmpdir))", "def prepDir(path=None):\n if path:\n if os.path.exists(path):\n return path\n else:\n os.makedirs(path)\n else:\n # Do something innocent when no path is provided\n path = tempfile.mkdtemp(prefix='XEPs_')\n print \"creating {} for output\".format(path)\n return path", "def _use_temp_directory(self):\n if not self._is_temp_dir:\n self._orig_base_data_dir = self._base_data_dir\n self._orig_base_logs_dir = self._base_logs_dir\n temp_dir = Path(tempfile.mkdtemp())\n self._base_data_dir = temp_dir / \"data\"\n self._base_logs_dir = temp_dir / \"logs\"\n self.db.change_path(\":memory:\")\n self.set_current(\"default\", update=False)\n self._is_temp_dir = True\n return temp_dir", "def create_tempdir(self, suffix=None, prefix=None, dir=None):\n if suffix is None:\n suffix = ''\n if prefix is None:\n prefix = 'tmp'\n if dir is None:\n dir = self.tempdir\n if dir is None and pyutilib_mngr is not None:\n dir = pyutilib_mngr.tempdir\n if dir is not None:\n deprecation_warning(\n \"The use of the PyUtilib TempfileManager.tempdir \"\n \"to specify the default location for Pyomo \"\n \"temporary directories has been deprecated. \"\n \"Please set TempfileManager.tempdir in \"\n \"pyomo.common.tempfiles\", version='5.7.2')\n\n dirname = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)\n if self._tempfiles[-1].ctr >= 0:\n new_dirname = os.path.join(\n os.path.dirname(dirname),\n prefix + str(self._tempfiles[-1].ctr) + suffix\n )\n # Delete any directory having the sequential name and then\n # rename\n if os.path.exists(new_dirname):\n shutil.rmtree(new_dirname)\n shutil.move(dirname, new_dirname)\n dirname = new_dirname\n self._tempfiles[-1].ctr += 1\n\n self._tempfiles[-1].append(dirname)\n return dirname", "def _generate_to_tempfile(self, generator):\r\n (output_fd, output_path) = tempfile.mkstemp()\r\n with os.fdopen(output_fd, 'w') as output:\r\n generator.write(output)\r\n return output_path", "def tempdir(*args, **kwargs):\r\n d = mkdtemp(*args, **kwargs)\r\n try:\r\n yield d\r\n finally:\r\n rmtree(d)", "def tmpfile(tmpdir_factory):\n\n def make(filename):\n fn = tmpdir_factory.mktemp(\"data\").join(filename)\n return fn\n\n # fn = tmpdir_factory.mktemp(\"data\").join(filename)\n return make", "def use_temp_dir():\n directory = tempfile.mkdtemp()\n try:\n yield directory\n finally:\n shutil.rmtree(directory)", "def create_temp_dirs(sim_dir, outer_dir_name, inner_dir_name=\"\"):\n outer_dir = os.path.join(sim_dir, outer_dir_name)\n utils.setup_dir(outer_dir)\n inner_dir = \"\"\n if inner_dir_name != \"\":\n inner_dir = os.path.join(sim_dir, outer_dir_name, inner_dir_name)\n utils.setup_dir(inner_dir)\n return outer_dir, inner_dir", "def create_directory(tracking_id):\n upload_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(tracking_id))\n if not os.path.isdir(upload_path):\n os.mkdir(upload_path)", "def create_temporary_file(path):\n # Save current working directory\n current_dir = os.getcwd()\n os.chdir('/')\n\n # Create list of path contents\n path_items = path.split('/')\n\n # Create directories for each string between '/' characters\n for item in path_items[1:-1]:\n if not os.path.exists(item):\n os.makedirs(item)\n os.chdir(item)\n\n # Create a new empty file with the last string in 'path_items'\n open(path_items[-1], 'a').close()\n\n # Change back to original dir\n os.chdir(current_dir)", "def tempdir(**kwargs):\n tmpdir = tempfile.mkdtemp(**kwargs)\n try:\n yield tmpdir\n finally:\n if os.path.isdir(tmpdir):\n shutil.rmtree(tmpdir, ignore_errors=True)", "def _make_output_directory(self):\n fs = self._filesystem\n output_filename = fs.join(self._root_output_dir, self._test_name)\n fs.maybe_make_directory(fs.dirname(output_filename))", "def create_required_dir():\n if not os.path.exists('utils_dfn/temp'):\n os.mkdir('utils_dfn/temp')\n if not os.path.exists('utils_dfn/img'):\n os.mkdir('utils_dfn/img')\n if not os.path.exists('utils_dfn/mask'):\n os.mkdir('utils_dfn/mask')\n if not os.path.exists('utils_dfn/output'):\n os.mkdir('utils_dfn/utils_dfn/output')\n # if not os.path.exists('compare'):\n # os.mkdir('compare')", "def tempdir():\n dirpath = tempfile.mkdtemp()\n try:\n yield dirpath\n finally:\n shutil.rmtree(dirpath)", "def test_makedirs(self, client, remote_temp_dir):\n\n dir_path = posixpath.join(remote_temp_dir, \"some\", \"nested\", \"dir\")\n\n with HdfsHook() as hook:\n hook.makedirs(dir_path, mode=0o750)\n\n assert client.exists(dir_path)\n assert client.info(dir_path)[\"permissions\"] == 0o750", "def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already", "def set_temp_file(self):\n\n index = self.filename.rfind('/') + 1\n self.temp_filename = self.filename[:index] + \"tmp_\" + self.filename[index:]", "def _create_tmp_folder_system(sitename):\n # Create site directories and files.\n p = tmpdir.mkdir(\"deployment_files\")\n config.set_site_repo(p.strpath)\n\n site_definition = copy.deepcopy(_SITE_DEFINITION)\n site_definition = site_definition % {'sitename': sitename}\n\n test_structure = copy.deepcopy(_SITE_TEST_STRUCTURE)\n test_structure['files']['site-definition.yaml'] = yaml.safe_load(\n site_definition)\n test_structure['files']['layering-definition.yaml'] = yaml.safe_load(\n _LAYERING_DEFINITION)\n test_structure['directories']['secrets']['directories']['passphrases'][\n 'files']['plaintext.yaml'] = yaml.safe_load(_PLAINTEXT_SECRET)\n test_structure['directories']['secrets']['directories']['passphrases'][\n 'files']['managed.yaml'] = yaml.safe_load(_MANAGED_SECRET)\n test_structure['directories']['secrets']['directories']['passphrases'][\n 'files']['encrypted.yaml'] = yaml.safe_load(_ENCRYPTED_SECRET)\n\n test_path = os.path.join(p.strpath, files._site_path(sitename))\n files._create_tree(test_path, tree=test_structure)\n\n return p.strpath", "def test_add3_dir(self):\n os.mkdir(tempdir + 'add3')\n TempfileManager.add_tempfile(tempdir + 'add3')" ]
[ "0.74962807", "0.73743665", "0.72294396", "0.7167176", "0.71236885", "0.7032736", "0.7019592", "0.6932763", "0.6831103", "0.6811089", "0.68001753", "0.6794936", "0.67714673", "0.6741706", "0.67387897", "0.67336327", "0.67199904", "0.6681555", "0.66798896", "0.66495675", "0.6613181", "0.6592235", "0.65898466", "0.6589807", "0.6571616", "0.65600616", "0.6558382", "0.6538919", "0.6536412", "0.6535619", "0.65216506", "0.6515663", "0.65111196", "0.6505632", "0.6497097", "0.64717305", "0.64624846", "0.64624435", "0.6434509", "0.64142776", "0.64041847", "0.63975304", "0.63855225", "0.63697577", "0.63684213", "0.63521177", "0.63412637", "0.63355464", "0.630538", "0.629487", "0.6293975", "0.6292916", "0.6289111", "0.6284879", "0.626788", "0.62649024", "0.62582076", "0.624955", "0.6236886", "0.6231817", "0.6230973", "0.62232906", "0.6223051", "0.6217807", "0.6216888", "0.62146705", "0.6210899", "0.62074333", "0.62050444", "0.61851156", "0.6184492", "0.6171033", "0.6170295", "0.61696845", "0.61633134", "0.615557", "0.6152986", "0.6144667", "0.61317587", "0.61253434", "0.6117821", "0.6116801", "0.61134994", "0.6111179", "0.6107791", "0.6101698", "0.6101387", "0.60980815", "0.60934263", "0.60863376", "0.6076862", "0.6076354", "0.6063886", "0.6061635", "0.605976", "0.6047802", "0.6038113", "0.6028193", "0.6021375", "0.6013225", "0.60114765" ]
0.0
-1
downloads the htmlpage and looks for the links with excel files
def run_downloader(self): """calls to the file downloader""" try: html = self.get_page(self.url) soup = self.get_soup(html) if soup is not None: # If we have soup - self.get_links(soup) self.get_files() else: self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source format is not as expected', e) return False except Exception as e: self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source format is not as expected', e) return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_pages():\n\n excel_filename = 'Result_' + datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S') + '.xlsx'\n workbook = xlsxwriter.Workbook(excel_filename)\n worksheet_all = workbook.add_worksheet()\n\n create_headers(worksheet_all, workbook)\n\n row = 1\n col = 0\n\n cell_format = workbook.add_format()\n cell__wrapped_format = workbook.add_format()\n cell__wrapped_format.set_text_wrap()\n site_url = 'http://medsalltheworld.com/'\n for full_filename in get_html_filenames():\n with open(full_filename, \"r\", encoding=\"utf-8\") as html_file:\n try:\n soup = BeautifulSoup(html_file.read(), \"lxml\")\n product_name_elements = soup.find_all(\"li\", class_=\"col-xs-6 col-md-4\")\n for elem in product_name_elements:\n name = elem.select('h3')[0].text.replace('®', '')\n elem_url = site_url + elem.select('h3')[0].find('a')['href']\n\n worksheet_all.write(row, col, name, cell_format)\n worksheet_all.write(row, col + 1, elem_url, cell_format)\n worksheet_all.write(row, col + 2, full_filename, cell_format)\n row += 1\n\n except AttributeError:\n print(full_filename)\n\n workbook.close()", "def download_excel(data_url, temp_file):\r\n if not os.path.isfile(temp_file):\r\n urllib.request.urlretrieve(data_url, temp_file)", "def extract_urls_from_file(f, all_abns, links_existed):\n content = open(CURL_OUTPUT + f).read()\n soup = BeautifulSoup(content)\n\n fh = open(ALL_LINKS + 'all_links.txt', 'a')\n\n cnt = 0\n all_rows = soup.find_all('tr', {'class': 'rgRow'})\n for row in all_rows:\n all_cells = row.find_all('td')\n abn = all_cells[0].text\n if (abn in all_abns):\n link = all_cells[1].findChildren('a')[0]['href']\n if not link in links_existed:\n print(link)\n download_page(link, f, cnt)\n fh.write(link + '\\n')\n cnt = cnt + 1\n\n fh.close()", "def fetch_html(url, file_name, path, attempts_limit=5):\n if not os.path.exists(path):\n os.makedirs(path)\n if os.path.isfile(path + file_name) is False:\n attempts = 0\n while attempts < attempts_limit:\n try:\n BROWSER.get(url)\n element = BROWSER.find_element_by_xpath(\"/html\")\n html_content = element.get_attribute(\"innerHTML\")\n with open(path + file_name, \"w\") as f:\n f.write(html_content)\n log.debug(f\"Downloaded: {file_name}\")\n return(True)\n except Exception as e:\n attempts += 1\n log.info(type(e).__name__ + str(e))\n log.warning(\"Try again\" + file_name)\n else:\n log.error(f\"Cannot download {file_name}\")\n return(False)\n else:\n log.debug(f\"Already downloaded {file_name}\")\n return(True)", "def get_links(self, soup):\n \"\"\" @param soup: BeautifulSoup object that cointains the targeted links \"\"\"\n \"\"\" @type soup: BeautifulSoup object \"\"\"\n for link in soup.select('a[href^=\"https://\"]'): # All links which have a href element\n href = link.get('href') # The actually href element of the link\n if not any(href.endswith(x) for x in ['.csv', '.xls', '.xlsx']):\n print(\"No excel\")\n continue\n if not href in self.url_queue:\n self.url_queue.append(href) # Add the URL to our queue", "def retrieve_html(self, input_url, domain_folder_name, data_type, file_name):\n print \"retrieve_html: RETRIEVING HTML CODE FOR PAGE:\", input_url\n try:\n from_path = \"%s%s%s%s\" % (self.main_path, domain_folder_name, data_type, file_name)\n print \"retrieve_html: HTML CODE RETRIEVED LOCALY\\npath:%s\" % from_path\n with io.open(from_path, \"r\", encoding='utf-8') as f:\n content = f.read()\n bs_object = BS(content, 'html.parser')\n return bs_object\n \n except IOError:\n print \"retrieve_html: RETRIEVING HTML CODE ONLINE\"\n\n # time_to_sleep = 2\n # print \"SLEEPING FOR %d s.................\" % time_to_sleep\n # time.sleep(time_to_sleep)\n try:\n response = urllib2.urlopen(input_url)\n content = response.read()\n except:\n print \"retrieve_html: FAILED TO RETRIEVE HTML ONLINE, INCREASING failed_retrieving_html_counter\"\n content = \"<HTML><Retrieval_Error>\"\n self.failed_retrieving_html_counter += 1\n \n\n # for always proper utf-8 encoding\n bs_object = BS(content, 'html.parser')\n bs_content = bs_object.prettify('utf-8')\n u_content = unicode(bs_content, 'utf-8')\n #/\n\n to_path = \"%s%s%s%s\" % (self.main_path, domain_folder_name, data_type, file_name)\n print \"retrieve_html: WRITING RETRIEVED HTML_CODE TO FILE\\npath:%s\" % to_path\n with io.open(to_path, \"w\", encoding='utf-8') as f:\n f.write(u_content)\n\n # print \"html WRITTEN to %s.txt\" % file_name\n return bs_object", "def _extract_download_link(self, response1):\n \n found = re.search('<ul class=\"dataset\">(.*)</ul>', response1.content, re.IGNORECASE)\n link = \"\"\n if found:\n filelist_HTML = found.group(0).strip()\n found_link = re.search('href=\"(.*)\">', found.group(0), re.IGNORECASE)\n if found_link:\n link = found_link.group(1).strip()\n \n self.assertTrue(link!=\"\",\"Could not find any list of files after rendering html '%s'\" % response1.content)\n return link", "def extract_links():\n br = mechanize.Browser()\n br.open(BASE_URL)\n f = open('data/svodki/alllinks.csv', 'w')\n calurls = []\n # Collect all calendar urls with reports\n for year in range(2005, 2013):\n for month in range(1, 13):\n calurls.append([year, month, CALEND_URLPAT %(year, month)])\n\n # Update for current year (needs fixes later)\n for year in range(2013, 2014):\n for month in range(1, 3):\n calurls.append([year, month, CALEND_URLPAT %(year, month)])\n # Process calendar urls one by one\n for year, month, calurl in calurls:\n print calurl\n u = br.open(calurl)\n data = u.read()\n u.close()\n soup = BeautifulSoup(data)\n slist = soup.find('ul', attrs={'class': 'emergency_list'})\n urls = slist.findAll('a')\n for url in urls:\n s = '%s\\t%s\\t%s\\t%s\\t' % (unicode(year), unicode(month), url.text, urljoin(BASE_URL, url['href']))\n f.write((s + '\\n').encode('utf8'))\n print s\n f.close()", "def download_websites(self,\n df_fn: str,\n savedir: str = \"../webpages\") -> None:\n df = pd.read_csv(df_fn,\n sep='\\t',\n quotechar=\"\\'\",\n quoting=csv.QUOTE_NONE)\n\n hdr = {\n 'User-Agent': 'Mozilla/5.0',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'\n }\n os.makedirs(savedir, exist_ok=True)\n for (index, webpage) in zip(df['index'], df['redirect']):\n fn = os.path.join(savedir, f\"{index}.html\")\n req = urllib.request.Request(webpage, headers=hdr)\n try:\n with urllib.request.urlopen(req) as url_handl:\n html_code = url_handl.read()\n with open(fn, 'wb') as f:\n f.write(html_code)\n except Exception as e:\n print(e)\n print(f\"\\t{index} Failed to extract {webpage}\")", "def findhtml(pathused,ticker,typ):\n\n allfiles = [] # initializing the return list\n pathused += \"/\"+ticker.upper()+\"/\"+typ # since SEC_edgar has a standard way to store files as its the Ticker and inside \n # sec-edgar-filings ==> AAPL ==> 10-K \n \n for r,d,f in os.walk(pathused): # os.walk will return all the files inside the directory (with absolute path)\n # r is the absolute path\n # f is list of files in the folders\n \n if 'filing-details.html' in f: # if filing.html (SEC-edgar convention to name html files) is in this folder \n pathfol = r.replace(\"\\\\\",\"/\") # we modify it \n allfiles.append(pathfol+'/filing-details.html') # we append the absolute path\n else:\n continue\n return allfiles #and return it", "def download_files(self):", "def _download(self, request_dict={}):\n html = super()._download(request_dict)\n self.extract_cases_from_subpages(html)\n return html", "def download(self, url_list):\n for url in url_list:\n suitable_found = False\n for ie_var in self._ies:\n # Go to next InfoExtractor if not suitable\n if not ie_var.suitable(url):\n continue\n # Suitable InfoExtractor found\n suitable_found = True\n # Extract information from URL and process it\n ie_var.extract(url)\n # Suitable InfoExtractor had been found; go to next URL\n break\n if not suitable_found:\n self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)\n return self._download_retcode", "def download_all(self):\r\n # Fetch website list\r\n self.fetch_website_list()\r\n\r\n for website in self.website_list:\r\n self.download(website['id'])", "def extract_california(download_directory, date_after = '1-1-2001'):\n \n try:\n site = 'https://www.sos.ca.gov'\n with urllib.request.urlopen(site + '/elections/report-registration/') as resp:\n soup = BeautifulSoup(resp,'lxml')\n except HTTPError as e:\n print('Error Code: ', e.code)\n except URLError as e:\n print('Reason: ', e.reason)\n \n #Make a dictionary of all pages on the site which lead to voter files.\n #Only include those dates after the date_after parameter.\n pages = {}\n for link in soup.find_all('a', href=True):\n if '/elections/report-registration/' in link['href']:\n date = link.string[link.string.rfind('-') + 2:]\n if datetime.strptime(date, '%B %d, %Y') \\\n > datetime.strptime(date_after, '%m-%d-%Y'):\n pages[date] = site + link['href']\n \n #Make a dictionary of all excel files contained on each page\n files = {}\n for page_key, page_item in pages.items():\n try:\n with urllib.request.urlopen(page_item) as resp:\n soup = BeautifulSoup(resp, 'lxml')\n except HTTPError as e:\n print('Error Code: ', e.code)\n except URLError as e:\n print('Reason: ', e.reason)\n for link in soup.find_all('a', href=True):\n if 'xls' in link['href']:\n if page_key in files:\n files[page_key].append(link['href'])\n else:\n files[page_key] = []\n files[page_key].append(link['href'])\n \n #Download all files into chosen directory\n for path_key, path in files.items():\n for file in path:\n last_slash = file.rfind('/') \n folder = datetime.strptime(path_key, '%B %d, %Y').strftime('%Y-%m-%d')\n filename = download_directory + '\\\\' + folder + '\\\\' + file[last_slash + 1:]\n \n try:\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n except OSError:\n pass\n try:\n urllib.request.urlretrieve(file, filename)\n except HTTPError as e:\n print('Error Code: ', e.code)\n except URLError as e:\n print('Reason: ', e.reason)", "def get_files_to_download(self):\n\n self.logger.logMsg(\"Getting Files to Download\")\n\n download_links = []\n try:\n with open(self.main_xml) as xml_file:\n data_dict = xmltodict.parse(xml_file.read())\n\n xml_file.close()\n\n for docs in data_dict.get('response').get('result').get('doc'):\n for doc in docs.get('str'):\n\n if doc.get('@name') == 'download_link':\n link = doc.get('#text', None)\n if link is not None:\n download_links.append(link)\n except Exception as e:\n self.logger.logMsg(\"Error Getting Files to Download {}\".format(str(e)))\n raise Exception('Error in Getting Files For Download')\n\n self.logger.logMsg(\"Finished Getting Files to Download\")\n\n return download_links", "def browse(self, web_resource):\n url = web_resource.url\n\n # We don't need destination anchors\n current_full_url = url.split(\"#\")[0]\n # Url without query string\n current = current_full_url.split(\"?\")[0]\n # Get the dirname of the file\n currentdir = \"/\".join(current.split(\"/\")[:-1]) + \"/\"\n\n # Timeout must not be too long to block big documents\n # (for exemple a download script)\n # and not too short to give good results\n socket.setdefaulttimeout(self.timeout)\n\n headers = {}\n headers[\"user-agent\"] = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\n try:\n resp = self.h.send(web_resource, headers=headers)\n except socket.timeout:\n self.excluded.append(url)\n return False\n except requests.exceptions.Timeout:\n self.excluded.append(url)\n return False\n except socket.error, msg:\n if msg.errno == 111:\n print(_(\"Connection refused!\"))\n self.excluded.append(url)\n return False\n except Exception, e:\n print(_(\"Exception in lswww.browse: {0}\").format(e))\n self.excluded.append(url)\n return False\n\n info = resp.getHeaders()\n code = resp.getCode()\n info[\"status_code\"] = code\n\n if not url in self.link_encoding:\n self.link_encoding[url] = \"\"\n\n proto = url.split(\"://\")[0]\n if proto == \"http\" or proto == \"https\":\n if not isinstance(proto, unicode):\n proto = unicode(proto)\n # Check the content-type first\n # if not info.has_key(\"content-type\"):\n # Sometimes there's no content-type...\n #so we rely on the document extension\n # if (current.split(\".\")[-1] not in self.allowed)\n # and current[-1] != \"/\":\n # return info\n # elif info[\"content-type\"].find(\"text\") == -1:\n # return info\n\n # No files more than 2MB\n if \"content-length\" in info:\n if int(info[\"content-length\"]) > 2097152:\n return False\n\n page_encoding = None\n resp_encoding = resp.getEncoding()\n content_type = resp.getHeaders().get('content-type', '')\n mime_type = content_type.split(';')[0].strip()\n swf_links = []\n js_links = []\n\n # Requests says it found an encoding... the content must be some HTML\n if resp_encoding and any(mime_type.startswith(t) for t in self.allowed_types):\n # But Requests doesn't take a deep look at the webpage,\n # so check it with BeautifulSoup\n page_encoding = BeautifulSoup.BeautifulSoup(resp.getRawPage()).originalEncoding\n if page_encoding and page_encoding.upper() != resp_encoding:\n # Mismatch ! Convert the response text to the encoding detected by BeautifulSoup\n resp.setEncoding(page_encoding)\n data = resp.getPage()\n else:\n # Can't find an encoding... beware of non-html content\n data = resp.getRawPage()\n if \"application/x-shockwave-flash\" in mime_type or web_resource.file_ext == \"swf\":\n try:\n flash_parser = swf_parser.swf_parser(data)\n swf_links = flash_parser.getLinks()\n except Exception, err_data:\n swf_links = err_data[1]\n data = \"\"\n elif \"/x-javascript\" in mime_type or \"/x-js\" in mime_type or \"/javascript\" in mime_type:\n js_links = lamejs.lamejs(data).getLinks()\n data = \"\"\n\n # Manage redirections\n if \"location\" in info:\n redir = self.correctlink(info[\"location\"], current, current_full_url, currentdir, proto, None)\n if redir is not None:\n if self.__inzone(redir) == 0:\n self.link_encoding[redir] = self.link_encoding[url]\n redir = HTTP.HTTPResource(redir)\n # Is the document already visited of forbidden ?\n if (redir in self.browsed) or (redir in self.tobrowse) or \\\n self.isExcluded(redir):\n pass\n else:\n # No -> Will browse it soon\n self.tobrowse.append(redir)\n\n htmlSource = data\n if page_encoding:\n bs = BeautifulSoup.BeautifulSoup(htmlSource)\n # Look for a base tag with an href attribute\n if bs.head:\n baseTags = bs.head.findAll(\"base\")\n for base in baseTags:\n # BeautifulSoup doesn't work as excepted with the \"in\" statement, keep this:\n if base.has_key(\"href\"):\n # Found a base url, now set it as the current url\n current = base[\"href\"].split(\"#\")[0]\n # We don't need destination anchors\n current = current.split(\"?\")[0]\n # Get the dirname of the file\n currentdir = \"/\".join(current.split(\"/\")[:-1]) + \"/\"\n break\n\n #if page_encoding != None:\n # htmlSource = unicode(data, page_encoding, \"ignore\")\n #else:\n # htmlSource = data\n\n p = linkParser(url)\n try:\n p.feed(htmlSource)\n except HTMLParser.HTMLParseError:\n htmlSource = BeautifulSoup.BeautifulSoup(htmlSource).prettify()\n if not isinstance(htmlSource, unicode) and page_encoding is not None:\n htmlSource = unicode(htmlSource, page_encoding, \"ignore\")\n try:\n p.reset()\n p.feed(htmlSource)\n except HTMLParser.HTMLParseError:\n p = linkParser2(url, self.verbose)\n p.feed(htmlSource)\n\n # Sometimes the page is badcoded but the parser doesn't see the error\n # So if we got no links we can force a correction of the page\n if len(p.liens) == 0:\n if page_encoding is not None:\n try:\n htmlSource = BeautifulSoup.BeautifulSoup(htmlSource).prettify(page_encoding)\n p.reset()\n p.feed(htmlSource)\n except UnicodeEncodeError:\n # The resource is not a valid webpage (for example an image)\n htmlSource = \"\"\n except HTMLParser.HTMLParseError:\n p = linkParser2(url, self.verbose)\n p.feed(htmlSource)\n\n found_links = p.liens + swf_links + js_links\n for lien in found_links:\n if (lien is not None) and (page_encoding is not None) and isinstance(lien, unicode):\n lien = lien.encode(page_encoding, \"ignore\")\n lien = self.correctlink(lien, current, current_full_url, currentdir, proto, page_encoding)\n if lien is not None:\n if self.__inzone(lien) == 0:\n # Is the document already visited of forbidden ?\n lien = HTTP.HTTPResource(lien, encoding=page_encoding, referer=url)\n if ((lien in self.browsed) or\n (lien in self.tobrowse) or\n self.isExcluded(lien) or\n self.__inzone(lien.url) != 0):\n pass\n elif self.nice > 0:\n if self.__countMatches(lien) >= self.nice:\n # don't waste time next time we found it\n self.excluded.append(lien.url)\n return False\n else:\n self.tobrowse.append(lien)\n else:\n # No -> Will browse it soon\n self.tobrowse.append(lien)\n # Keep the encoding of the current webpage for the future requests to the link\n # so we can encode the query string parameters just as a browser would do.\n # Of course websites encoding may be broken :(\n self.link_encoding[lien] = page_encoding\n\n for form in p.forms:\n action = self.correctlink(form[0], current, current_full_url, currentdir, proto, page_encoding)\n if action is None:\n action = current\n if self.__inzone(action) != 0:\n continue\n\n # urlencode the POST parameters here\n params = form[1]\n post_params = []\n files = []\n for kv in params:\n if isinstance(kv[0], unicode):\n kv[0] = kv[0].encode(page_encoding, \"ignore\")\n\n if isinstance(kv[1], list):\n fname = kv[1][0]\n if isinstance(fname, unicode):\n fname = fname.encode(page_encoding, \"ignore\")\n files.append([kv[0], [fname, kv[1][1]]])\n else:\n if isinstance(kv[1], unicode):\n kv[1] = kv[1].encode(page_encoding, \"ignore\")\n post_params.append([kv[0], kv[1]])\n\n form_rsrc = HTTP.HTTPResource(action,\n method=\"POST\",\n post_params=post_params,\n file_params=files,\n encoding=page_encoding,\n referer=url)\n if form_rsrc not in self.forms:\n self.forms.append(form_rsrc)\n if not (form_rsrc in self.browsed or form_rsrc in self.tobrowse):\n self.tobrowse.append(form_rsrc)\n if files:\n if form_rsrc not in self.uploads:\n self.uploads.append(form_rsrc)\n # We automaticaly exclude 404 urls\n if code == \"404\":\n self.excluded.append(url)\n #return {} # exclude from scan but can be useful for some modules maybe\n\n return True", "def fetch_pdf(url, browser):\n\tpass\n\n\t# grab link page\n\n\t# search soup for pdf file\n\n\t# grab pdf file and return it", "def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')", "def get_test_pdf(self,ls_href):\n # test for length \n for url in ls_href:\n is_pdf = self.test_content_type(url)\n if is_pdf:\n filename = self.download_pdf_url(url)\n print \"downloaded file: \" + filename\n break \n else: \n print url + \" is not a pdf and was not downloaded\"\n filename = False\n next\n #if filename is False:\n # filename = ls_href\n # print \"returning filelist of since no identified pdfs\"\n return(filename)", "def process_page(html,dest):\n html0 = html[:]\n to_root = os.path.relpath(export_path,dest)\n to_root = to_root[1:]# Change '../' or '..' to '.' or './'\n \n # Fix links to directories first since that is easier to find\n html,N1 = re_dirlinks.subn(r'\\1=\"/\\2/index.html\"',html)\n \n # all pages links\n html,N2 = re_all.subn(r'\\1=\"/_all/\\2/index.html\"',html)\n \n # Add index.html for any other internal links. NOTE: by preprocessing\n # all internal links from the main content will already end in .html so this\n # is just special pages.\n for match in re_intlinks.finditer(html):\n dest = match.groups()[-1]\n ext = os.path.splitext(dest)[-1]\n if ext == '':\n old = r'{}=\"/{}\"'.format(*match.groups())\n new = r'{}=\"/{}\"'.format(match.groups()[0], os.path.join(match.groups()[1],'index.html') )\n html = html.replace(old,new)\n \n # Now make all links to the root\n html,N3 = re_intlinks.subn(r'\\1=\"{}/\\2\"'.format(to_root),html)\n \n # Remove the search stuff\n out = []\n ff = False\n for line in html.split('\\n'):\n if not ff and '<!-- search -->' not in line:\n out.append(line)\n continue\n \n if '<!-- search -->' in line:\n ff = True\n \n if ff and '<!-- /search -->' in line:\n ff = False\n\n html = '\\n'.join(out)\n return html", "def download_data(self):\n content = requests.get(self.TOP_250_LIST)\n soup = BeautifulSoup(content.content, 'lxml')\n movies = soup.select('tbody.lister-list tr')\n for m in movies:\n title_column = m.select('td.titleColumn')\n link = self.format_link(title_column[0].a['href'])\n title = self.format_title(title_column[0].a.string.encode('utf-8'))\n path = 'pages/{}.html'.format(title)\n if os.path.isfile(path):\n continue\n response = requests.get(link)\n with open(path, 'wr') as f:\n f.write(response.content)", "def get_pdf_links(self, driver):\n ls_urls = driver.find_elements_by_xpath(\"//a[contains(translate(text(),'PDF','pdf'),'pdf')]\")\n len_urls = len(ls_urls)\n print \"We have \" + str(len_urls) + \" urls with PDF in title\"\n if len_urls != 0:\n ls_href = [url.get_attribute('href') for url in ls_urls]\n elif len_urls == 0: \n ls_urls = driver.find_elements_by_xpath(\"//a[contains(@href,'pdf')]\")\n len_urls = len(ls_urls)\n print \"We have \" + str(len_urls) + \" urls with pdf in the link\"\n ls_href = [url.get_attribute('href') for url in ls_urls]\n\n len_href = len(ls_href)\n\n if len_href == 0: \n #check for frameset\n ls_urls = driver.find_elements_by_xpath(\"//frame[contains(@src,'pdf')]\")\n len_urls = len(ls_urls)\n print \"We have \" + str(len_urls) + \" urls in a frame \"\n ls_href = [url.get_attribute('src') for url in ls_urls]\n #print \"print one frame link: \" + ls_href[0]\n\n len_href = len(ls_href)\n\n if len_href == 0:\n #check for iframe\n ls_urls = driver.find_elements_by_xpath(\"//iframe[contains(@src,'pdf')]\")\n len_urls = len(ls_urls)\n print \"We have \" + str(len_urls) + \" in an Iframe\"\n ls_href = [url.get_attribute('src') for url in ls_urls]\n #print \"print one Iframe link: \" + ls_href[0]\n len_href = len(ls_href)\n\n if len_href == 0:\n return(False)\n else:\n return(ls_href)", "def download_file():\r\n global title_dict\r\n title=ResultsListbox.get(ResultsListbox.curselection())\r\n link=title_dict[title]\r\n file_dl=urllib.URLopener()\r\n file_dl.retrieve(link,str(title)+\".pdf\")", "def getHTML():\n for url in urls: #Because there might be multipe URLs to scrape, iterate through the list \n r = requests.get(url)\n r.raise_for_status()\n webpage_html = str(bs4.BeautifulSoup(r.text, \"html.parser\"))\n filenumber = str(urls.index(url)) #Create a variable called filenumber using the index of the url in the list of urls\n filename = \"output_\" + filenumber + \".html\" #This and above line avoid the loop rewriting the file name of the previous file.\n with open(filename, 'w') as file_object: #open a new (or existing) file to be written (or overwritten)\n file_object.write(webpage_html) #write the scraped HTML into the file\n file_object.close #close the file", "def scan(currentURL):\n\ttry:\n\t\tarchives_hDl = getContentDirectURL_GET(currentURL,'')\n\texcept IOError:\n\t\tlog <= (\"IOError @ %s\" % currentURL)\n\ttry:\n\t\thtmlContent= archives_hDl.read()\n\t\t#print archives_hDl.info()\n\texcept IOError, e:\n\t\tprint \"Cannot open the file,\",(e.strerror)\n\t\treturn\n\texcept AttributeError:\n\t\tprint (\"Grabber cannot retrieve the given url: %s\" % currentURL)\n\t\treturn\n \tprint currentURL\n\tparseHtmlLinks(currentURL,htmlContent)\n\tparseHtmlParams(currentURL,htmlContent)", "def download(self, url_match):\n pass", "def download_files(self) -> None:\n\n for name, url in self.files.items():\n print(f\"Download {name.split('/')[-1]}\")\n wget.download(url, os.path.join(\"data\", name))", "def webscrap_links(source_list):\n # Read URLs into list\n urls = []\n with open(source_list, \"r\") as file:\n urls = [line for line in file]\n\n # Create Pandas DataFrame to store and save CSV data\n dl_files = pd.DataFrame(columns=[\"filename\", \"url\"])\n\n # Retrieve source HTML per URL and process\n for url in urls:\n source = str(urlopen(url).read())\n\n # Raise an error if there are two tables\n assert source.count(\"<tbody>\") == 1\n\n # Select subset of source\n source = source[int(source.find(\"<tbody>\")):int(source.find(\"</tbody>\"))]\n\n # Split table per item (discard first bit)\n source = source.split(\"<tr>\")[1:]\n\n # For each entry, strip and extract download url and name\n prefix = \"https://filer.net\"\n for item in source:\n # Extract download url\n dl_url = item[item.find(\"\\\"\")+1:]\n dl_url = dl_url[:dl_url.find(\"\\\"\")]\n dl_url = prefix + dl_url\n\n # Extract download file name\n name = item[item.find(\"\\\">\")+2:]\n name = name[:name.find(\"</a>\")]\n\n # Add link to Dat\n dl_files = dl_files.append({\"filename\": name, \"url\": dl_url}, ignore_index=True)\n\n # Save DataFrame to CSV file\n dl_files.to_csv(\"../data/file_links.csv\", index=False)\n \n return", "def download_html_for_page(page_number, category_name, skip_if_downloaded):\n\toutput_file_name = output_dir + '/' + category_name + '_page_' + str(page_number) + '.html'\n\tif skip_if_downloaded and os.path.isfile(output_file_name):\n\t\treturn\n\n\tif not os.path.isdir(output_dir):\n\t\tos.mkdir(output_dir)\n\n\turl = 'http://toiletfinder.com/' + category_name + '?page=' + str(page_number)\n\tresponse = urllib2.urlopen(url)\n\thtml = response.read()\n\twith open(output_file_name, 'w') as html_file:\n \t\thtml_file.write(html)", "def get_files(self):\n # self.folder= +str(int(time.time()))\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)\n while len(self.url_queue): # If we have URLs to crawl - we crawl\n href = self.url_queue.popleft() # We grab a URL from the left of the list\n filename = href.rsplit('/', 1)[-1]\n print(\"Downloading %s to %s...\" % (href, filename))\n fullname = os.path.join(self.folder, filename)\n urlretrieve(href, fullname)\n self.xlfnames.append(filename)", "def download_html_command():\n # 1. Get input scan id from Demisto\n scanid = demisto.args().get('scanid')\n # 2. Get the forensic webpage HTML from SlashNext API\n response = download_html(scanid=scanid)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n html_base64 = response.get('htmlData').get('htmlBase64')\n html_data = base64.b64decode(html_base64)\n\n html_file = fileResult('slashnext_{}.html'.format(scanid), html_data, entryTypes['file'])\n\n demisto.results({\n 'Type': entryTypes['file'],\n 'ContentsFormat': formats['text'],\n 'Contents': 'Forensics: Webpage HTML for URL Scan ID = {}'.format(scanid),\n 'File': html_file.get('File'),\n 'FileID': html_file.get('FileID')\n })", "def download_index(gaia_index):\n # Create regex to extract URL and file name\n reFile = re.compile(r'<a href=\"(.*(GaiaSource.*gz))\"\\>')\n # Open Gaia HTML index file\n response = urllib.request.urlopen(gaia_index)\n # Read content\n files = []\n page = response.readlines()\n # Extract URLs from the page\n for line in page:\n line = line.decode('utf-8')\n # Extract URLs\n f = reFile.findall(line)\n if (f):\n f = f[0]\n if (f[0].startswith('http')):\n # Absolute path\n files.append((f[0], f[1]))\n else:\n # Relative path\n files.append((urljoin(gaia_index, f[0]), f[1]))\n if len(files) == 0:\n print(f\"Couldn't extract file names from the index page.\\nCheck URL: {gaia_index}\")\n exit(1)\n return files", "def download_files(valid_links: list) -> list:\n print('Starting process...')\n print('')\n\n year_month_filepath = []\n\n for link_info in valid_links:\n\n # Get file extension\n extension = link_info[0].split('.')[-1]\n\n # Link to download\n link_to_download = link_info[0]\n\n # Get month\n month = link_info[1]\n\n # Get year\n year = link_info[2]\n\n # Create a standard filename to save\n file_name = f'{year}-{month}.{extension}'\n\n print(f'Downloading... {link_to_download} Saving... {file_name}')\n\n # Create a link to save into ./file directory\n link_to_save = f'./file/{file_name}'\n\n # Download file and save it\n wget.download(link_to_download, out=link_to_save)\n\n\n # Special treatment to zip and xlsx file\n if extension == 'zip':\n\n # Get right link to save (.csv) from zip function\n link_to_save = get_file_into_zip(link_to_save)\n\n elif extension == 'xlsx':\n # Get right link to save (.csv) from xlsx function\n link_to_save = excel2csv(link_to_save)\n\n # Include the tuple into a list\n year_month_filepath.append((year, month, link_to_save))\n\n print('Finishing process...')\n\n return year_month_filepath", "def main():\n for file_name in os.listdir(CONTENT_FOLDER):\n if file_name.endswith('.html'):\n try_generate_page(file_name)", "def __download_web(self):\n page = requests.get(self.url)\n\n if page.status_code == 200:\n return BeautifulSoup(page.content, \"html.parser\")", "def rxnorm_crawler():\n # Target webpage\n weburls=[\n 'https://www.nlm.nih.gov/research/umls/rxnorm/docs/rxnormfiles.html',\n 'https://www.nlm.nih.gov/research/umls/rxnorm/docs/rxnormarchive.html'\n ]\n for weburl in weburls:\n # Get contents of webpage\n conn = urllib2.urlopen(weburl)\n html = conn.read()\n # Find urls of all RxNorm files\n pattern = '<a\\s*href=[\\'|\"](.*?/kss/rxnorm/RxNorm_full_\\d+.zip)[\\'|\"]>'\n rxnorm_urls = re.findall(pattern, html)\n for url in rxnorm_urls:\n r = requests.get(url)\n if r.status_code == 200:\n #upload the file\n file_name = re.findall('.*?(\\d+.zip)', url)[0]\n k = Key(bucket)\n k.key = 'rxnorm/' + file_name\n k.content_type = r.headers['content-type']\n k.set_contents_from_string(r.content)\n # Need to add cookies information", "def search_page_download(self):\n print('Downloading:', self.url)\n driver = webdriver.Chrome()\n driver.get(self.url)\n\n ve_code_opts = driver.find_element_by_name(\"ve_code\").find_elements_by_tag_name(\"option\")\n ve_code_opts = ve_code_opts[1:]\n for option in ve_code_opts:\n value = option.get_attribute(\"value\")\n location = option.text\n\n url = \"http://www.fishbase.se/trophiceco/EcosysRef.php?ve_code=\" + value + '&sp='\n\n self.total_urls.append({\n \"value\": value,\n \"location\": location,\n \"url\": url\n })\n\n driver.close()", "def download(url, path=\"./downloads\", save_copy=False, filename=None, headers=None, jar=None, redirects=0, redirect_limit=1):\n if os.path.exists(url):\n return url\n if url == \"\":\n return \"\"\n if url[0] == '/':#likely a library link\n return \"\"\n if headers == None:\n headers = {'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0'}\n if jar == None:\n jar = requests.cookies.RequestsCookieJar()\n if not os.path.exists(path):\n os.makedirs(path)\n\n try:\n res = requests.head(url, headers=headers, cookies=jar, allow_redirects=True)\n if 'Content-Type' in res.headers:\n content_type, encoding = filetype(res.headers['Content-Type'])\n else:\n print(\"No content-type specified\",res.headers.keys())\n return \"\"\n #if link is html crawl page for pdf link\n if content_type == \"html\" and redirects < redirect_limit:\n res = requests.get(url, headers=headers, cookies=jar)\n if not encoding:#assume ascii and strip non ascii bits\n html =res.content.decode('ascii', 'ignore')\n else:\n html = res.content.decode(encoding, 'ignore')\n pdfurl = findpdflink(html, url)\n return download(pdfurl, headers=headers, jar=jar, path=path, filename=filename, save_copy=save_copy, redirects=redirects+1, redirect_limit=redirect_limit)\n #guess a file type\n if filename == None:\n #get filename from request header\n if 'Content-Disposition' in res.headers:\n header = res.headers['Content-Disposition']\n filename = cgi.parse_header(header)[1]['filename']\n #if none avaliable take the url end\n else:\n filename = url.rsplit('/',1)[-1]\n file_path = f\"{path}/{filename}\"\n\n exists = os.path.isfile(file_path)\n if exists and not save_copy:\n print(\"Exists:\", file_path)\n return file_path\n res = requests.get(url, headers=headers, cookies=jar)\n if exists and save_copy:#give new name\n i=0\n while os.path.exists(f\"{path}/{filename}.{i}\"):\n i += 1\n file_path = f\"{path}/{filename}.{i}\"\n print(\"Downloaded to:\",file_path)\n with open(file_path, \"wb\") as fp:\n fp.write(res.content)\n return file_path\n except HTTPError as err:\n print(\"Error\", err.code)\n except (ConnectionError, requests.exceptions.TooManyRedirects) as err:\n print(err)\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n\n return \"\"", "def download_page(link, f, cnt):\n try:\n page = ur.urlopen(link).read().decode()\n fh = open(ALL_PAGES + f + str(cnt) + '.htm', 'w')\n\n fh.write(page)\n fh.close()\n except Exception:\n print('Something wrong with link ' + link)", "def scrape(self):\n try:\n self.result = urlfetch.fetch(self.url)\n except DownloadError:\n self.result = urlfetch.fetch(self.url) \n if ((self.result.status_code == 200) and\n (self.result.content_was_truncated == 0)):\n self.soup = BeautifulSoup(self.result.content)\n else:\n logging.critical(\"Bad Status Code: \", self.result.status_code, self.url)\n sys.exit(1)", "def exactor_links(self, response: BeautifulSoup):\n raise NotImplementedError", "def __handle_one_bite(one_bite, parsed_html, url, path):\n if one_bite == 'yes':\n print 'Downloading Contents Of Page {}'.format(url)\n log_bite = parsed_html\n try:\n Crawler.__write_file(path, url, log_bite)\n except IOError as excep:\n logging.error('Error In URL %s, Reason: %s', url, excep.message)\n return True\n return False", "def download(all):\n print(\"Downloading\")", "def dod():\n file = requests.get(\"https://www.bewakoof.com/design-of-the-day\")\n soup = bs4.BeautifulSoup(file.text, \"lxml\")\n # print(soup)\n\n linkList = soup.select(\"a[class='col-sm-4 col-xs-6'] > div > div > div > img:nth-of-type(2)]\")\n # soup.select(\"div[id=foo] > div > div > div[class=fee] > span > span > a\")\n for i in linkList:\n if \"t-shirt-men\" in str(i):\n # print(i.get('src'))\n webbrowser.open(i.get('src'))", "def download_resume(self, links):\n\t\tbot = self.bot\n\n\t\tfor link in links:\n\t\t\tbot.get(link)\n\t\t\ttime.sleep(5)\n\t\t\tmore = bot.find_element_by_class_name(\"pv-s-profile-actions__overflow-toggle.artdeco-button\").click()\n\t\t\ttime.sleep(2)\n\t\t\tsave_pdf = bot.find_element_by_class_name(\"pv-s-profile-actions--save-to-pdf\").click()\n\t\t\ttime.sleep(5)", "def download_url(url):\n # use url_checker to verify URL is using the full address\n url_name = url_checker(url)\n if url_name:\n print(f'Requesting page {url_name}')\n tstamp = get_tstamp()\n # set the headers like we are a browser\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko)'\n ' Chrome/72.0.3626.109 Safari/537.36'}\n # download the page\n response = requests.get(url, headers=headers)\n\n # create directory for saving file\n URL_DIR_NAME = os.path.join(OUTPUT_DIR, str(url_name))\n URL_TM_DIR_NAME = os.path.join(URL_DIR_NAME, str(tstamp))\n # create directory using url name and timestamp for directories\n ensure_dir(URL_TM_DIR_NAME)\n # save downloaded page as a .txt file\n with open(f'{URL_TM_DIR_NAME}{slash}response.html', 'w') as f:\n print(response.text, file=f)\n # use beautiful soup to extract links\n links = []\n soup = BeautifulSoup(response.text, 'html.parser')\n tags = soup.find_all('a')\n # append links to links list\n for tag in tags:\n links.append(tag.get('href'))\n # get only unique values and sort\n my_set = set(links)\n u_links = list(my_set)\n u_links.sort()\n # save links as a .txt file\n with open(f'{URL_TM_DIR_NAME}{slash}links.txt', 'w') as f:\n for list_item in u_links:\n f.write(f'{list_item}\\n')", "def getDownload(self, html, episode_number):\n soup = BeautifulSoup(html, \"html.parser\")\n download = soup.find_all('source')\n if download:\n self.downloads[\"Episode %s.mp4\" % str(episode_number)] = download[0]['src']\n return\n\n print(\"[!] Download link not found for episode %s\" % str(episode_number))", "def download(self,fn):\n\t\treturn False #TODO: implement meme download", "def get_urls(base_url):\n res = requests.get(base_url, headers=HEADERS)\n res = BeautifulSoup(res.text, 'html.parser')\n res = res.find_all(href=re.compile('pdf'))\n return res", "def download_files(self, inpDate):\n # construct day of year from date\n inpDoY = inpDate.timetuple().tm_yday\n strDoY = str(inpDoY)\n if inpDoY < 10:\n strDoY = \"00\" + str(inpDoY)\n if ( inpDoY > 10) & (inpDoY < 100):\n strDoY = \"0\" + str(inpDoY)\n\n dwnldUrl = self.baseUrl +\\\n \"data_fetch_l1c_imaging_v013?y=\"+\\\n str(inpDate.year) + \"&d=\"+strDoY\n driver = webdriver.Chrome()\n driver.get(dwnldUrl)\n\n try:\n element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'output')))\n filesDiv = driver.find_element_by_id(\"output\")\n fileLinks = filesDiv.find_elements_by_css_selector('a')\n for uEl in fileLinks:\n fUrl = uEl.get_attribute('href')\n if \"L1C-2-disk\" not in fUrl:\n continue\n print \"currently downloading-->\", fUrl\n rf = requests.get( fUrl, verify=False )\n currFName = rf.url.split(\"/\")[-1]\n outDir = self.outBaseDir + inpDate.strftime( \"%Y%m%d\" ) + \"/\"\n if not os.path.exists(outDir):\n os.makedirs(outDir)\n with open( outDir + currFName, \"wb\" ) as ssusiData:\n ssusiData.write( rf.content )\n finally:\n driver.quit()", "def extract_details( session_requests, job_id ):\n \n url_prefix = CONFIG[\"url_prefix\"]\n \n #Extract html from web\n url = CONFIG[\"url_jobno\"] + str(job_id)\n tree = scrape_html(session_requests, url)\n \n #Extact description\n title = \"; \".join(tree.xpath(\"//p[@class='listheader']/text()\"))\n description = \"; \".join(tree.xpath(\"//p//text()\")) #more than one element\n \n #Extract files\n num_file = int(tree.xpath(\"count(//p[contains(text(),'Job Description Document :')]//a)\"))\n loop_range = min(num_file, (MAX_NUM_OF_FILES - 1))\n \n file_link = [\"NA\"] * MAX_NUM_OF_FILES\n file_name = [\"NA\"] * MAX_NUM_OF_FILES\n down_file_name = [\"NA\"] * MAX_NUM_OF_FILES\n \n if (num_file > (MAX_NUM_OF_FILES - 1)):\n file_link[(MAX_NUM_OF_FILES - 1)] = \"More than 9 files\"\n file_name[(MAX_NUM_OF_FILES - 1)] = \"More than 9 files\"\n \n for i in range(loop_range):\n file_link[i] = url_prefix + tree.xpath(\"//p[contains(text(),'Job Description Document :')]//a/@href\")[i]\n file_name[i] = tree.xpath(\"//p[contains(text(),'Job Description Document :')]//a/text()\")[i]\n \n ext = find_file_extention(file_name[i])\n down_file_name[i] = download_file(session_requests, file_link[i], job_id, i, ext)\n \n # dataframe\n row_names_link = init_file_dataframe()[1]\n row_names_name = init_file_dataframe()[2]\n row_names_down = init_file_dataframe()[3]\n \n df_link = np.transpose(pd.DataFrame(file_link, row_names_link))\n df_name = np.transpose(pd.DataFrame(file_name, row_names_name))\n df_down = np.transpose(pd.DataFrame(down_file_name, row_names_down))\n \n df_file = pd.DataFrame(data = {\"job_title\": [title], \"description\": [description], \"num_of_file\": [loop_range]})\n df_file = pd.concat([df_file.reset_index(drop=True), df_link], axis=1, sort=False)\n df_file = pd.concat([df_file.reset_index(drop=True), df_name], axis=1, sort=False)\n df_file = pd.concat([df_file.reset_index(drop=True), df_down], axis=1, sort=False)\n \n return df_file", "def download_data(self):\n headers = {'User-Agent': 'Mozilla/5.0',}\n\n #Request for html data of url page\n r = requests.get(self.url, headers = headers, allow_redirects=True)\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n #Checking if folder path exists, if not, creats it\n i=0\n while i<len(self.folder)-1:\n if self.folder[i] == '/':\n if not os.path.isdir(self.folder[:i]):\n os.mkdir(self.folder[:i])\n i+=1\n if i==len(self.folder)-1:\n if not os.path.isdir(self.folder):\n os.mkdir(self.folder)\n\n # if not os.path.isdir(self.folder):\n # os.mkdir(self.folder)\n\n #Gets every href to zip file with data\n entries = []\n for link in soup.find_all('a'):\n if re.search(\"^data/.*.zip\", link.get('href')):\n entries.append(link.get('href'))\n\n #Gets the newest dataset\n self.getCurrentData(entries)\n\n i=0\n #Saves each file in dataset\n for list in self.ListOfZipFiles:\n if not os.path.isfile(self.folder+list[4:]):\n r = requests.get(self.url+list)\n open(self.folder+list[4:], 'wb').write(r.content)\n #deletes prefix \"data/\"\n self.ListOfZipFiles[i] = list[4:]\n i+=1", "def downloadPdfs(soup, full_path, pattern, subdir):\n # Create subdir, exams or solutions, if not already exists\n path_to_pdfs = os.path.join(full_path, subdir)\n if not os.path.exists(path_to_pdfs):\n os.makedirs(path_to_pdfs)\n\n # Download all the pdfz!\n for x in soup.find_all('a', text=re.compile(pattern)):\n url_to_exam = x['href']\n if url_to_exam.endswith('.pdf'):\n print download_file(url_to_exam, path_to_pdfs), ' downloaded'", "def download(self, url, directory):\n while True:\n try:\n urlretrieve(url, directory) # this fails if no internet\n break\n except IOError:\n if not ask(\"Notes\", \"Error: No internet connection\", self):\n raise", "def download_link(self): # pragma: no cover\n\n if PyFunceble.Check(self.file).is_url():\n # We get the destination.\n destination = self.file.split(\"/\")[-1]\n\n if self.file and self.autocontinue.is_empty():\n # The given file is an URL.\n\n if (\n not PyFunceble.path.isfile(destination)\n or PyFunceble.INTERN[\"counter\"][\"number\"][\"tested\"] == 0\n ):\n # The filename does not exist in the current directory\n # or the currently number of tested is equal to 0.\n\n # We download the content of the link.\n Download(self.file, destination).text()\n\n # We update the global file with the destination.\n self.file = destination", "def __get_files(self):\r\n \r\n files = []\r\n with requests.Session() as s:\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}\r\n respons = s.get(self.__url, headers=headers).text\r\n soup = BeautifulSoup(respons, 'html.parser')\r\n data_files = [link.get('href') for link in soup.find_all('a', class_=\"btn-primary\")]\r\n for year in soup.find_all('td', class_=\"align-middle\"):\r\n regex = re.compile(r\"data/data-?gis({year}|\\-rok\\-{year})\\.zip\".format(year=year.text))\r\n if any((match := regex.match(link)) for link in data_files):\r\n files.append(match.group(0))\r\n else:\r\n files.append(data_files[-1])\r\n return files", "def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:\n # get the data\n data = requests.get(page_url)\n\n # load data into bs4\n soup = BeautifulSoup(data.text, 'html.parser')\n # links = []\n pdf_dis = []\n dates = []\n table = []\n version_hash_fields = []\n\n for tr in soup.find_all('tr'):\n date_col = soup.find_all('td', attrs={'class': 'fd-col2'})\n hyperlink_col = soup.find_all('td', attrs={'class': 'fd-col1'})\n values = [td.text for td in tr.find_all('td')]\n table.append(values)\n for link in hyperlink_col:\n pdf_url = 'https://www.health.mil/' + link.find('a')['href']\n pdf_di = DownloadableItem(doc_type='pdf',\n web_url=pdf_url)\n pdf_dis.append(pdf_di)\n for date in date_col:\n dates.append(date.text)\n\n doc_nums = []\n doc_titles = []\n doc_names = []\n for row in table[1:]:\n doc_data = row[0].split(':')\n\n if len(doc_data) == 1: # if no colon then no doc number\n if doc_data[0] == \"(DTM)-19-004 -Military Service by Transgender Persons and Persons with Gender Dysphoria (Change 1)\":\n doc_nums.append(\"19-004\")\n doc_names.append(\"DTM\")\n doc_titles.append(doc_data[0][14:])\n version_hash_fields.append({\"doc_name\": 'DTM', \"doc_title\": doc_data[0][14:]})\n else:\n doc_nums.append(\" \")\n doc_titles.append(doc_data[0])\n doc_names.append(doc_data[0])\n version_hash_fields.append({\"doc_name\": doc_data[0], \"doc_title\": doc_data[0]})\n else:\n\n tmptitle = doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\")\n\n if \"Volume\" in tmptitle:\n doc_nums.append(doc_data[0][7:]+\" Volume \"+tmptitle.split()[-1])\n else:\n doc_nums.append(doc_data[0][7:])\n doc_titles.append(doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\"))\n doc_names.append(doc_data[0][:6])\n\n version_hash_fields.append({\"doc_name\": doc_data[0][:7], \"doc_title\": doc_data[1]})\n\n parsed_docs = []\n page_url = 'https://www.health.mil/About-MHS/OASDHA/Defense-Health-Agency/Resources-and-Management/DHA-Publications'\n num_docs = len(doc_nums)\n for i in range(num_docs):\n # put all the relevant info into dictionaries\n doc = Document(doc_type=doc_names[i].replace(\" \",\"-\"),\n doc_title=doc_titles[i],\n doc_num=doc_nums[i],\n doc_name=doc_names[i].replace(\" \",\"-\")+\" \"+doc_nums[i],\n publication_date=dates[i],\n cac_login_required=False,\n crawler_used='dha_pubs',\n source_page_url=page_url,\n downloadable_items=[pdf_dis[i]],\n version_hash_raw_data=version_hash_fields[i])\n parsed_docs.append(doc)\n\n return parsed_docs", "def run(self):\n urls_to_download = self._get_links()\n results = ThreadPool(8).imap_unordered(self._download_url, urls_to_download)\n for path in results:\n print(path)", "def download_pdfs_from_site(url: str, verbose=True):\n site_url = get_site_url(url)\n html = requests.get(url).text\n\n\n all_links = get_links(html)\n pdf_links = [link for link in all_links if link.endswith('pdf')]\n pdf_links = maybe_add_full_links(pdf_links, site_url)\n \n if verbose:\n print('Found the following pdf links')\n print(pdf_links)\n pdf_links = tqdm.tqdm(pdf_links)\n for link in pdf_links:\n download_from_link(link)", "def get_table_download_link(df, file_name):\n if 'embedding_average' in df.columns:\n df = df.drop(columns='embedding_average')\n # df = results_output.drop(columns='embedding_average')\n # csv = df.to_csv(index=False)\n # b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\n # href = f'<a href=\"data:file/csv;base64,{encoded}\">Download Excel File</a> (right-click and save as &lt;some_name&gt;.csv)'\n # href = f'<a href=\"data:file/csv;base64,{b64}\">Download CSV File</a> (right-click and save as &lt;some_name&gt;.csv)'\n towrite = io.BytesIO()\n df.to_excel(towrite,index = False, encoding = 'UTF-8') # write to BytesIO buffer\n towrite.seek(0) # reset pointer\n encoded = base64.b64encode(towrite.read()).decode() # encoded object\n href = f'<a href=\"data:file/csv;base64,{encoded}\" download =\"{file_name}\">Download Excel File</a> (right-click and save as &lt;some_name&gt;.csv)'\n st.markdown(href, unsafe_allow_html=True)", "def get_table_download_link(df):\r\n\tval = to_excel(df)\r\n\tb64 = base64.b64encode(val).decode() # val looks like b'...'\r\n\thref=f'<a href=\"data:application/octet-stream;base64,{b64}\" download=\"captura.xlsx\" target=\"_blank\">Descargar: Haga clic derecho y guardar enlace como...</a>' # decode b'abc' => abc\t\r\n\treturn href", "def download_presentation(epObject, uc):\r\n fileDict = make_file_dict()\r\n fileDict = populate_file_dict(epObject, uc, fileDict)\r\n now = str(datetime.datetime.now().hour) + \\\r\n str(datetime.datetime.now().minute) + \\\r\n str(datetime.datetime.now().second)\r\n directoryName = epObject.Name.replace(\" \", \"\") + \"_presentation_\" + now\r\n os.mkdir(directoryName)\r\n os.chdir(directoryName)\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(fileDict['pageUrls'][0]).read())\r\n temp.seek(0)\r\n update_page(temp, fileDict, \"index.html\", index=True)\r\n temp.close()\r\n os.mkdir(\"Pages\")\r\n os.chdir(\"Pages\")\r\n for (pageUrl, pageFileName) in zip(fileDict['pageUrls'][1:], \r\n fileDict['pageFileNames'][1:]):\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(pageUrl).read())\r\n update_page(temp, fileDict, pageFileName)\r\n temp.close()\r\n os.chdir(\"../\")\r\n os.mkdir(\"Content\")\r\n os.chdir(\"Content\")\r\n for (fileUrl, fileId) in zip(fileDict['fileUrls'], fileDict['fileIds']):\r\n fileName = eportfolio.get_ep_object_properties(uc, fileId).\\\r\n FileName.strip()\r\n urllib.request.urlretrieve(fileUrl, fileName)\r\n os.chdir(\"../\")\r\n os.mkdir(\"Formatting\")\r\n os.chdir(\"Formatting\")\r\n for (cssUrl, cssFileName) in zip(fileDict['cssUrls'],\r\n fileDict['cssFileNames']):\r\n temp = tempfile.TemporaryFile()\r\n temp.write(urllib.request.urlopen(cssUrl).read())\r\n temp.seek(0)\r\n update_css_file(cssUrl, temp, cssFileName)\r\n temp.close()\r\n for imgUrl in fileDict['imgUrls']:\r\n fileName = imgUrl[imgUrl.rfind(\"/\"): ]\r\n if fileName.find(\"?\") > 0:\r\n fileName = fileName[: fileName.find(\"?\")]\r\n urllib.request.urlretrieve(imgUrl, fileName)\r\n os.chdir(\"../\")\r\n print(str(fileDict))\r\n return fileDict", "def extract_image(page_html, family_url, folder):\n image_extractor = Extractor(page_html, family_url)\n for url in image_extractor.get_image_table():\n image_page_url = urljoin(family_url, url)\n # print(image_page_url)\n imres = requests.get(image_page_url)\n image_page_extractor = Extractor(imres.text, image_page_url)\n image_src, image_name = image_page_extractor.get_image_link()\n\n image_link = urljoin(image_page_url, image_src)\n\n print(image_link, image_name)\n # Download image\n fetch(image_link, image_name, folder)", "def requestFromSite(self, link_s):\n \n with open('dividend_history.csv', 'wb') as handle:\n request = requests.get(link_s, stream=True)\n \n for block in request.iter_content(1024):\n if not block:\n break\n \n handle.write(block)", "def find_download_url(self):\n devpage = requests.get(DEVPAGE_URL)\n soup = BeautifulSoup(devpage.text, 'html.parser')\n rt = soup.find(id='rightcolumn')\n anchors = rt.findAll('a')\n for anchor in anchors:\n href = anchor.attrs['href']\n if href.endswith('.zip'):\n return href\n\n # if got this far, no GTFS download link found\n return None", "def get_table_download_link(df):\r\n\tval = to_excel(df)\r\n\tb64 = base64.b64encode(val) # val looks like b'...'\r\n\treturn f'<a href=\"data:application/octet-stream;base64,{b64.decode()}\" download=\"extract.xlsx\">Download xlsx file</a>' # decode b'abc' => abc\r", "def test_get_indexhtml(self):\n url = self.baseurl + \"/main\"\n req = urllib2.urlopen(url, None, 3)\n self.assertTrue( req.getcode() == 200 , \"200 OK Not FOUND!\")", "def download_data(self):\r\n \r\n for file in self.__files:\r\n file_to_download = os.path.join(self.__folder, os.path.basename(file))\r\n if not os.path.isfile(file_to_download):\r\n self.__download_file(file)", "def download(self, url):\n try:\n webFile = urllib.urlopen(url)\n localFile = open(self.workdir + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n print(\"could not get url \" + url)", "def download_pdf(url):\n # Extracts the last part of the URL to be used as the name of the file\n local_filename = url.split('/')[-1].replace('%','')\n \n if local_filename not in REPORTS:\n with urllib.request.urlopen(url) as r:\n with open(f'reports/{local_filename}', 'wb') as f:\n f.write(r.read())\n \n # updates report files in the directory\n return f'reports/{local_filename}'\n else:\n print(f'Already in the database - {local_filename}')\n return False", "def ferry_data_download(URL):\n explanation = 'File exists'\n file_downloaded = True\n # Request if the thredds server is working, add .html to URL\n req = requests.get(URL + '.html')\n if req.status_code == 200:\n \"\"\"File exists and is good for download, so write file\"\"\"\n print('File is ok')\n explanation = 'Good URL, File downloaded'\n file_downloaded = True\n ferry = xr.open_dataset(URL)\n else:\n print('File not found or unavailable')\n explanation = ' File not found or unavailable'\n file_downloaded = False\n ferry = np.nan\n return (ferry, file_downloaded, explanation)", "def collect_html(args):\n url_list = args.url_list\n output_dir = args.output_dir\n\n print(url_list)\n\n # do some checks\n try: \n assert os.path.exists(url_list), 'url_list must exist'\n assert os.path.exists(output_dir), 'output_dir must exist'\n except AssertionError as err: \n logger.error('Failed check: {}'.format(err)) \n return \n\n urls = common.read_file(url_list)\n \n for url in urls: \n logger.debug(url) \n\n html = spy_tools.collect_html(url)\n out = url.split('/')\n output = os.path.join(output_dir, out[-1] + '.html')\n common.write_file(html, output)", "def fetch_pages(folder=pages_folder):\r\n if not (folder.endswith('/') or folder.endswith('\\\\')):\r\n folder += '/'\r\n _classes = classes\r\n if not _classes:\r\n _classes = extract_classes(getsoup())\r\n for classnum in _classes.keys():\r\n with open(folder + str(classnum) + '.html', 'x') as f:\r\n f.write(getsoup(classnum).prettify())", "def html_link(filings_list):\n html_links = {}\n for lista in filings_list:\n \n for e in lista: \n if 'html' in e:\n url = 'https://www.sec.gov/Archives/' + e \n table = pd.read_html(url)[0]\n table = table['Document'][0].split(' ')\n\n url_formatted = url.replace('-', '').replace('index.html', '')\n html_links[lista[3]] = url_formatted+'/'+table[0] \n\n return html_links", "def download(url, folder, prefix):\n if url.startswith(\"static:\"):\n url = url.lstrip(\"static:\")\n\n changes = url != url.format(*([0] * 100)) # this is ugly\n\n if not changes:\n download_with_handler(download_url, url, folder, prefix)\n else:\n i = 1\n failed_in_a_row = 0\n while True:\n success = download_with_handler(download_url, url.format(i), folder, prefix)\n\n if not success:\n failed_in_a_row +=1\n\n if failed_in_a_row > 3:\n break\n else:\n failed_in_a_row = 0\n\n i += 1", "def download_linked_files(name=None, links=None, pattern=None):\n if name and links and pattern:\n record_of_download = ['Tab-delimited record of information for each file. item 0: link; item 1: st_size; item 2: mtime converted to human-readable']\n timestamp = construct_date()\n for link in links:\n extension = link.split('.')[-1]\n if extension not in ['html', 'pdf']:\n print('Unknown extension:', extension, ' in link:', link)\n sys.exit()\n # Isolate the file-number of the linked-to file within its title.\n file_no = re.sub(pattern, r'\\1', link)\n filename = name + '_' + file_no + '_' + timestamp + '.' + extension\n # Download file and convert to text. \n # (Only wget succeeds with some servers; urllib.request is blocked.)\n os.system('wget ' + link + ' -O ' + \n os.path.join('..', extension, filename))\n os.system('pdftotext ' + os.path.join('..', extension, filename))\n # Find original size and modification time and save this info.\n (_, _, _, _, _, _, st_size, _, mtime, _) = os.stat(\n os.path.join('..', extension, filename))\n filename = filename.replace('.' + extension, '.txt')\n print(' new filename:', filename)\n os.system('mv ' + os.path.join('..', extension, filename) + ' ' +\n os.path.join('..', 'txt/'))\n record_of_download.append(\n link + '\\t' + str(st_size) + '\\t' + \n convert_from_unixtime(mtime))\n print('record:', record_of_download[-1])\n print('done, {} {}'.format(name, timestamp))\n record_of_download = '\\n'.join(record_of_download)\n with open(os.path.join(\n '..', 'indexes', 'download_data_' + name + '_' + timestamp + \n '.txt'), 'w') as f:\n f.write(record_of_download)", "def wget_content(url):\n\n try:\n\n for i in range(len(url)):\n url[i].replace(' ', \"%20\") if i > url.find('?') else url[i]\n\n with TemporaryDirectory() as dirname:\n retval = ''\n retcode = subprocess.Popen([\"wget\", \"--tries=5\", '--timeout=10', url, \"-O\", os.path.join(dirname, \"1.txt\")])\n retcode.wait()\n file_name = os.path.join(dirname, \"1.txt\")\n handle = open(file_name)\n if handle:\n retval = handle.read()\n\n\n except Exception as ex:\n if url.startswith(\"https://\") and \"handshake failure\" in retval:\n return wget_content(url.replace(\"https://\", \"http://\"))\n else:\n wxpush(\"Crawler module failure\", traceback.extract_stack(), True)\n\n return retval or \"\"", "def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url", "def download_files(path):\n return edgar.download_index(path,2019,skip_all_present_except_last=False)", "def GetUrlSecond(self):\n counter = 0\n file_handler = open(self.base_dir_url+\"url_first.html\", \"r\")\n for line in file_handler.readlines():\n url_name = line.strip().split()\n \n self.url = 'http:'+url_name[0]\n if re.search('www.taobao.*', url_name[0]):\n self.host = \"www.taobao.com\"\n else:\n getHost = re.findall('//(.*?.com)', url_name[0])\n self.host = getHost[0]\n print url_name[1].decode('utf-8').encode('gbk') +':'+ getHost[0]+','+self.url\n self.referer = \"https://www.taobao.com/\"\n match_txt = open(self.base_dir_url+url_name[1].decode('utf-8').encode('gbk')+'.txt', \"w\")\n content = self.GetContent()\n print >> match_txt, content\n match_txt.close()\n if (counter < 4):\n match = open(self.base_dir_url+url_name[1].decode('utf-8').encode('gbk'), \"w\")\n self.DealUrlSecond14(match, content)\n match.close()\n elif (counter > 3 and counter < 8) or (counter > 11 and counter < 16) or (counter > 19 and counter < 24):\n match = open(self.base_dir_url+url_name[1].decode('utf-8').encode('gbk'), \"w\")\n self.DealUrlSecond58(match, content)\n match.close()\n counter += 1\n file_handler.close()", "def download_extracted_files(a1000):\n hash_value = demisto.getArg('hash')\n try:\n response = a1000.download_extracted_files(hash_value)\n except Exception as e:\n return_error(str(e))\n\n filename = hash_value + '.zip'\n command_results = CommandResults(\n readable_output=f\"## ReversingLabs A1000 download extraced files \\nExtracted files are available for download \"\n f\"under the name {filename}\"\n )\n\n file_result = fileResult(filename, response.content, file_type=EntryType.FILE)\n\n return [command_results, file_result]", "def check_if_downloaded( url, debug_print = True ):\n\t# Get pdf filename\n\tfilename = basename( url )\n\tfileno, ext_pdf = splitext( filename )\n\tfor file in listdir( getcwd() ):\n\t\tif fileno in file:\n\t\t\tif debug_print:\n\t\t\t\tprint 'Skipping %s' % ( filename )\n\t\t\treturn True\n\treturn False", "def download():\n \n browser.find_element_by_xpath('//*[@id=\"ctl00_contentPlaceHolder_divAllVariablesPerYear2012\"]/div[2]/div[2]/div[1]/a').click()", "def mk_htmls(self):\n\n domain_link_list = self.scrape_domain_int_links()\n html_fail_rate = float(self.failed_retrieving_html_counter / len(domain_link_list))\n print \"mk_htmls SUCCES RATE %r\" % html_fail_rate\n if html_fail_rate < (float(0.1)):\n # driving links, to collect htmls from web and create htmls with removed img elements\n for ind in range(len(domain_link_list)):\n page_file_name = \"page_\" + str(ind + 1) + \".html\" # gen file names for html file of page on site\n active_page_link = domain_link_list[ind]\n if \";;;\" in active_page_link: # separator problems hedging\n self.links_contain_seprator.append([active_page_link, page_file_name])\n print \"LINK CONTAINS SEPARATOR, CAN NOT ADD TO PAGE LIST, WILL RUIN DATASYSTEM, WILL BE ADDED TO SEPARATE FILE\"\n else:\n BS_object = self.retrieve_html(active_page_link, self.domain_folder_name, self.html_folder_name, page_file_name)\n self.page_list.append([active_page_link, page_file_name])\n self.bs_object_dict[active_page_link] = BS_object\n # if no \";;;\" separator found in link, then write as usual\n if len(self.links_contain_seprator) > 0:\n c_m.l_of_l_write(self.links_contain_seprator, self.main_path, self.domain_folder_name, self.link_contains_separator)\n else:\n c_m.l_of_l_write(self.page_list, self.main_path, self.domain_folder_name, self.page_list_f_name)\n return self.bs_object_dict\n else:\n return False", "def _DownloadPageContent(self, download_url):\n if not download_url:\n return None, None\n\n try:\n url_object = urlopen(download_url)\n except urllib_error.URLError as exception:\n logging.warning(\n u'Unable to download URL: {0:s} with error: {1:s}'.format(\n download_url, exception))\n return None, None\n\n if url_object.code != 200:\n return None, None\n\n return url_object.read(), url_object.info()", "def extract_web_archive(url, apath, ffilter=[]):\n\n download(url, apath)\n output_files = extract(apath, ffilter=ffilter)\n\n return output_files", "def get_PDF_links(soup):\n pdf_links = soup.find(\"div\", class_=\"calendar_panel time_table\").find_all(\"a\", class_=\"lnk\")\n pdf_links_parsed = []\n for p in pdf_links:\n if re.search(\"http.+\\.pdf\", p[\"onclick\"]):\n pdf_links_parsed.append(re.search(\"http.+\\.pdf\", p[\"onclick\"]).group(0))\n\n return pdf_links_parsed", "def download(site=None,\n sdate=None,\n ndays=1,\n edate=None,\n f_df=None,\n force=False,\n verbose=True):\n\n # get file names\n if f_df is None: \n f_df = list_files(site, sdate, ndays=ndays, edate=edate) \n # download files\n for di, row in f_df.iterrows():\n # get file name and check\n # if it exists\n fn = os.path.join(row['dir'], row['fname'])\n if not os.path.exists(fn) or force:\n # if forcing download and file\n #exists remove file before\n #redownloading\n if os.path.exists(fn):\n os.remove(fn)\n try: \n wget.download(row['hdir']+row['fname'],out=row['dir'])\n except:\n print('HTTP file not found {0}'.format(row['fname']))\n elif verbose:\n print('File {0} exists use force=True to download'.format(row['fname']))", "def fetch_all_snapshots(archive_dir, wayback_filename, target_url):\n # Read the list of snapshots.\n with open(wayback_filename) as f:\n data = f.read()\n\n url_template = \"http://web.archive.org/web/{timestamp}/{target_url}\"\n snapshots = data.split(\"\\n\")\n pages_downloaded = 0\n pages_failed = 0\n pages_skipped = 0\n for snapshot in snapshots:\n fields = snapshot.split()\n if len(fields) < 1:\n print(\"Bad fields. End of data?\")\n break\n date_string = fields[1]\n assert 14 == len(date_string)\n ymd = date_string[:8]\n year = int(date_string[:4])\n month = int(date_string[4:6])\n day = int(date_string[6:8])\n assert 1900 < year < 2100 and 1 <= month <= 12 and 1 <= day <=31\n date_of_fire = datetime.date(year,month, day)\n filename = F\"firedata_{year}_{month:02}_{day:02}.html\"\n path = os.path.join(archive_dir, filename)\n if os.path.exists(path):\n print(\"Not replacing \", path)\n pages_skipped += 1\n continue\n else:\n print(\"Downloading for \", path)\n url = url_template.format(timestamp=date_string, target_url=target_url)\n print(url)\n\n page = fetch(url)\n if page is None:\n print(\"Fetching above url failed.\")\n pages_failed +=1\n continue\n\n pages_downloaded += 1\n with open(path, \"wb\") as f:\n f.write(page)\n print(\"Page saved\")\n sleep(2)\n return pages_downloaded, pages_failed, pages_skipped", "def _get_download_urls(self):\n path = \"//path/to/text/text()\"\n return list(self.html.xpath(path))", "def pywget_inside_crawler(url):\n\n # open and read the url\n content = ''\n try:\n request = urllib.request.urlopen(url)\n content = request.read().decode(\"utf-8\")\n except:\n pass\n\n # find all contents we need which are links and srcs using regex\n match = re.findall(r'<a href=\"(.*?)\"', content) + \\\n re.findall(r'<img src=\"(.*?)\"', content) + \\\n re.findall(r'<a href = \"(.*?)\"', content) + \\\n re.findall(r'<img src = \"(.*?)\"', content)\n\n domain_name = url[0 : url.rfind('/')]\n\n all_item_list = []\n\n # if it's an absolute link, add it to all_item_list\n # if it's a relative link, add prefix in the front and add it to the list\n if match:\n for item in match:\n if item.startswith(\"http://\") or item.startswith(\"https://\") or item.startswith(\"//\"):\n if item.startswith(domain_name):\n all_item_list.append(item)\n else:\n all_item_list.append(domain_name + \"/\" + item)\n\n # apply pywget_download_inside\n for item in all_item_list:\n pywget(item, first_time=False)", "def do_GET(self):\n try:\n is_digi = False #sys.platform.startswith('digi')\n page = self.server.get_page()\n\n if self.path.endswith(page) or \\\n not is_digi and self.path == '/':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(web_files.html % {\"page\": page,\n \"title\":self.server.get_title()})\n elif not is_digi and self.path.endswith(\"stylesheet.css\"):\n self.send_response(200)\n self.send_header('Content-type', 'text/css')\n self.end_headers()\n self.wfile.write(stylesheet_css)\n elif not is_digi and self.path.find('?') > 0:\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n self.wfile.write(self.server.get_table(\n self.path[self.path.find('?')+1:]))\n return\n except IOError:\n self.send_error(404,'File Not Found: %s' % self.path)", "def doLink(self):\n self.log.info('Starting TabLinker for all sheets in workbook')\n \n for n in range(self.rb.nsheets) :\n self.log.info('Starting with sheet {0}'.format(n))\n self.r_sheet = self.rb.sheet_by_index(n)\n self.w_sheet = self.wb.get_sheet(n)\n \n self.rowns, self.colns = self.getValidRowsCols()\n \n self.sheet_qname = urllib.quote(re.sub('\\s','_',self.r_sheet.name))\n self.log.info('Base for QName generator set to: {0}'.format(self.sheet_qname))\n \n self.log.debug('Starting parser')\n self.parseSheet()", "def run(self):\n\n try:\n # Get the content from this page\n if self.verbose:\n print \"Getting page content for '%s'\" % self.url.strip()\n \n content = getPageContent(self.url)\n\n # Verify that this is not binary data\n if content is not None and isHTML(content):\n\n\n # Extract basic data about this result\n content = content.lower()\n title, keywords, description = parseMetaDataFromContent(content)\n headers = parseHeaderInformationFromContent(content)\n\n # Add this result data\n self.resultDictionary['title'] = title\n self.resultDictionary['keywords'] = keywords\n self.resultDictionary['description'] = description\n self.resultDictionary['content'] = content\n self.resultDictionary['headers'] = headers\n\n # Run the extensions\n for extension in self.extensions:\n extension.run(self.resultDictionary)\n\n\n except URLError:\n\n # Skip this URL, and register it as an error on the cache\n if self.verbose:\n print(\"Error accessing '%s', %s\" % (self.url.strip(), str(sys.exc_info()[1]).strip()))", "def download_data(files: page_iterator.HTTPIterator, folder: str) -> None:\n logging.info('File download Started... Wait for the job to complete.')\n\n # create folder locally if not exists\n if not os.path.exists(folder): os.makedirs(folder)\n\n for file in files:\n logging.info('GCS File: {}'.format(file.name))\n destination_uri = '{}/{}'.format(folder, file.name.split('/')[-1])\n file.download_to_filename(destination_uri if destination_uri.endswith('.csv') else destination_uri + '.csv')\n logging.info('Exported {} to {}'.format(file.name, destination_uri))\n\n return None", "def download_report(self, response):\n \n if self.is_visited(response.url) == True:\n return None\n \n def get_filename_from_url(url):\n #http://www.gtja.com/f//lotus/201510/20151023%20Company%20Report%2001816%20HK_addStamper_addEncrypt.pdf\n import re\n pattern = re.compile(\"http://www.gtja.com/f//lotus/(\\d+)/(.*)\")\n result = pattern.match(url)\n if result is None:\n return str(datetime.date.today()), hashlib.md5(url).hexdigest() + \".pdf\"\n else:\n #return str(datetime.date.today()), hashlib.md5(url).hexdigest() + \".pdf\"\n return result.group(1), unquote(result.group(2))\n \n date, name = get_filename_from_url(response.url) #TODO Create date directory.\n\n file_path = settings[\"FILES_STORE_PATH\"] + date + \"/\"\n if os.path.exists(file_path) != True:\n os.mkdir(file_path)\n\n filename = file_path + name\n with open(filename.decode(\"utf-8\"), \"wb\") as f: #TODO what is the diffenrence between \"w+\" and \"wb\"\n f.write(response.body)\n \n item = ReportFileItem()\n item[\"url\"] = unquote(response.url)\n item[\"date\"] = date\n item[\"path\"] = \"/\" + date + \"/\" + name #Relative path\n item[\"link\"] = response.meta[\"link_url\"]\n item[\"create_date\"] = datetime.datetime.now()\n \n self.visit(response.url)\n \n return item", "def download(self, url, filename):\n print(\"url\", url)\n print(\"filename\", filename)\n # open in binary mode\n with open(filename, \"wb\") as file:\n # get request\n try:\n r = requests.get(url)\n if r.status_code == 404:\n raise NotFoundException(\n \"URL: \", url, \" is not working. Status code 404\")\n # write to file\n file.write(r.content)\n print(\"file downloaded\")\n except ConnectionError as ex:\n print(ex)\n except NotFoundException as ex:\n print(ex)\n except Exception as ex:\n print(ex)", "def link_scraping(final_links, driver):\n\n for final_link in final_links:\n tags = extract_all_tags(final_link, driver)\n if len(tags) != 0:\n final_tags = find_usefull_tags(tags, tagmodel, tag_count_vect)\n if len(final_tags) != 0:\n print('Extracting(classname): ', final_link)\n scrape_data(final_link, final_tags, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)\n else:\n print('Extracting(tag): ', final_link)\n scrape_data_tag(final_link, driver)", "def url_to_file(url):\n \n try:\n r = get(url)\n print(r.status_code)\n if r.status_code == 200:\n try:\n with open(f'print-{date}.html', 'w') as f:\n f.write(r.text)\n except UnicodeEncodeError as e:\n print(\"Unicode error :using encodeing utf-8\")\n with open(f'print-{date}.html', 'w', encoding=\"utf-8\") as f:\n f.write(r.text)\n else:\n print(\"passing headers\")\n headers = {\"user-agent\":\"Edg/87.0.664.66\"}\n r = get(url, headers=headers)\n print(r.status_code)\n if r.status_code == 200:\n try:\n with open(f'print-{date}.html', 'w') as f:\n f.write(r.text)\n except UnicodeEncodeError as e:\n print(\"Unicode error: using encodeing utf-8\")\n with open(f'print-{date}.html', 'w', encoding=\"utf-8\") as f:\n f.write(r.text)\n else:\n print(f\"Unable to send requests {r.status_code}\")\n return r\n except Exception as e:\n print(\"Error occured\",e)" ]
[ "0.6434533", "0.6253483", "0.613989", "0.61118245", "0.6107879", "0.605488", "0.60413843", "0.60022914", "0.5989048", "0.59720886", "0.5906197", "0.5898894", "0.5878793", "0.58744365", "0.5851572", "0.58209777", "0.57681555", "0.5762606", "0.57579845", "0.5752446", "0.56934166", "0.5686021", "0.56830597", "0.5673555", "0.56515247", "0.56415015", "0.5628601", "0.5617447", "0.5611391", "0.56098527", "0.56069845", "0.56054276", "0.56034064", "0.55969983", "0.55946916", "0.5592611", "0.55728096", "0.5569726", "0.5569318", "0.5563395", "0.55566895", "0.55289066", "0.5522486", "0.5522297", "0.551817", "0.5514915", "0.5493979", "0.5481553", "0.5445912", "0.54453206", "0.54389954", "0.5431788", "0.5395023", "0.5393851", "0.53931946", "0.5388897", "0.5386989", "0.53804135", "0.5378832", "0.537836", "0.5371364", "0.5368258", "0.5368209", "0.5362959", "0.5353183", "0.5342754", "0.53343254", "0.5332082", "0.5323662", "0.5320736", "0.53195775", "0.52993464", "0.5297915", "0.5290514", "0.5284045", "0.52713394", "0.5271189", "0.5265775", "0.5262386", "0.5256744", "0.5256151", "0.5256129", "0.525597", "0.5252528", "0.52499807", "0.5246079", "0.5240861", "0.52405167", "0.52319586", "0.52256244", "0.5220185", "0.52191967", "0.52188677", "0.52117133", "0.5211217", "0.5209346", "0.52069885", "0.52061075", "0.5202901", "0.5196222" ]
0.60851127
5