query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
set_loop_bandwidth(self, float bw) Set the loop bandwidth. Set the loop filter's bandwidth to . This should be between 2pi/200 and 2pi/100 (in rads/samp). It must also be a positive number. When a new damping factor is set, the gains, alpha and beta, of the loop are recalculated by a call to update_gains(). | def set_loop_bandwidth(self, *args, **kwargs):
return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_loop_bandwidth(self, *args, **kwargs) | [
"def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_loop_bandwidth(self, *args, **kwargs)",
"def set_bandwidth(self, bandwidth):\r\n self.obs.bandwidthHz = float(bandwidth)\r\n self.ave.bandwidthHz = float(bandwidth)\r\n self.hot.bandwidthHz = float(bandwidth)\r\n self.cold.bandwidthHz = float(bandwidth)\r\n self.ref.bandwidthHz = float(bandwidth)\r\n deltaNu = self.obs.bandwidthHz/float(self.vlen)\r\n n0 = self.obs.centerFreqHz - (self.obs.bandwidthHz/2.)\r\n nu = n0\r\n if len(self.ave.xdata) != self.vlen:\r\n self.update_len(self.ave)\r\n if len(self.hot.xdata) != self.vlen:\r\n self.update_len(self.hot)\r\n if len(self.cold.xdata) != self.vlen:\r\n self.update_len(self.cold)\r\n if len(self.ref.xdata) != self.vlen:\r\n self.update_len(self.ref)\r\n print(\"Setting Bandwidth: %10.0f Hz\" % (self.obs.bandwidthHz))\r\n for iii in range(self.vlen):\r\n self.obs.xdata[iii] = nu\r\n self.ave.xdata[iii] = nu\r\n self.hot.xdata[iii] = nu\r\n self.cold.xdata[iii] = nu\r\n self.ref.xdata[iii] = nu\r\n nu = nu + deltaNu",
"def set_wifi_bandwidth(self, bandwidth):\n if int(bandwidth) == 20:\n cmd = \"channel width 20\"\n elif int(bandwidth) == 40:\n cmd = \"channel width 40-Above\"\n else:\n raise Exception(-5,\n \"Unsupported wifi bandwidth '%s'.\" % str(bandwidth))\n for radio in self.WIFI_RADIOS:\n self._send_cmd(\"interface dot11radio \" + str(radio))\n self._send_cmd(cmd)\n self._send_cmd(\"exit\")",
"def get_loop_bandwidth(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_loop_bandwidth(self)",
"def get_loop_bandwidth(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_loop_bandwidth(self)",
"def set_bandwidth(self, out_bw, in_bw):\n self.m_outbound_bw = out_bw\n self.m_inbound_bw = in_bw",
"def set_bw(self, bw):\n return _radio_astro_swig.detect_sptr_set_bw(self, bw)",
"def set_bandwidth_limit(self, value='BWFULL'):\n #CMD$=“BWL C1,ON”\n print debug_msg.TBD_MSG",
"def set_bandwidths_and_cutoffs(self, bandwidths):\n self.logger.info(\"set_bandwidths_and_cutoffs with bandwidths=%s\", str(bandwidths))\n if not hasattr(bandwidths, '__iter__'):\n bandwidths = [bandwidths] * self.ndim\n\n if len(bandwidths) != self.ndim:\n raise AttributeError(\"Number of supplied bandwidths does not match the dimensionality of the data\")\n\n self.bandwidths = bandwidths\n # compute cutoffs directly from the kernel class method\n # if tol is None, only non-arbitrary cutoffs are applied\n self.cutoffs = self.ktype.compute_cutoffs(bandwidths, tol=self.cutoff_tol)\n # if kernels have already been set, update them\n if len(self.kernels):\n for k in self.kernels:\n k.update_bandwidths(self.bandwidths, time_cutoff=self.cutoffs[0])",
"def set_freq_damping(self, freq, damping):\n self.k = freq * freq * 4 * math.pi * math.pi\n self.b = 2 * math.sqrt(self.k) * damping\n return",
"def set_bw(self, bw):\n return _radio_astro_swig.detect_set_bw(self, bw)",
"def setWavelength(self, wl, block=False):\r\n self['WAVELENGTH'] = int(wl)\r\n if block:\r\n while True:\r\n if not self.isTuning():\r\n break\r\n time.sleep(0.1)\r\n print(\"still tuning\")",
"def SetPassBand(self, *args) -> \"void\":\n return _itkFrequencyBandImageFilterPython.itkFrequencyBandImageFilterICF2_SetPassBand(self, *args)",
"def set_if_bandwidth(instrument, if_bandwidth, window_num=1, channel_num=1):\n command = ':SENSe%s:BANDwidth:RESolution %G HZ' % (window_num, if_bandwidth)\n instrument.write(command)",
"def keyTcsBandwidth(self, key, data):\n self.sb_bandwidth = float(data[key])",
"def compute_bandwidth(self):\n self._bw, self._covariance = kde_methods.compute_bandwidth(self)",
"def SetPassBand(self, *args) -> \"void\":\n return _itkFrequencyBandImageFilterPython.itkFrequencyBandImageFilterICF3_SetPassBand(self, *args)",
"def blockJobSetSpeed(self, disk, bandwidth, flags=0):\n ret = libvirtmod.virDomainBlockJobSetSpeed(self._o, disk, bandwidth, flags)\n if ret == -1: raise libvirtError ('virDomainBlockJobSetSpeed() failed', dom=self)\n return ret",
"def setThrottleChannel(self, channel: int):\n self.axes[self.Axis.kThrottle] = channel"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set_max_rate_deviation(self, float m) Set the maximum deviation from 0 d_rate can have | def set_max_rate_deviation(self, *args, **kwargs):
return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_max_rate_deviation(self, *args, **kwargs) | [
"def set_max_rate_deviation(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_max_rate_deviation(self, *args, **kwargs)",
"def error_rate_deviation(self, error_rate_deviation):\n\n self._error_rate_deviation = error_rate_deviation",
"def property_max_rate(self, property_max_rate):\n\n self._property_max_rate = property_max_rate",
"def setMaxPeriod(self, maxPeriod):\n hal.setCounterMaxPeriod(self.counter, float(maxPeriod))",
"def maxRate(rate):\n if rate == 'None' or rate is None:\n rate = 0\n else:\n try:\n rate = int(rate)\n except:\n return getRate()\n\n if rate < 0:\n rate = 0\n \n info('Resetting MAX_RATE to: ' + str(rate) + 'KB/s')\n \n rate = rate * 1024\n \n restartCheckRead = False\n if rate == 0:\n if Hellanzb.ht.unthrottleReadsID is not None and \\\n not Hellanzb.ht.unthrottleReadsID.cancelled and \\\n not Hellanzb.ht.unthrottleReadsID.called:\n Hellanzb.ht.unthrottleReadsID.cancel()\n\n if Hellanzb.ht.checkReadBandwidthID is not None and \\\n not Hellanzb.ht.checkReadBandwidthID.cancelled:\n Hellanzb.ht.checkReadBandwidthID.cancel()\n Hellanzb.ht.unthrottleReads()\n elif Hellanzb.ht.readLimit == 0 and rate > 0:\n restartCheckRead = True\n \n Hellanzb.ht.readLimit = rate\n\n if restartCheckRead:\n Hellanzb.ht.readThisSecond = 0 # nobody's been resetting this value\n reactor.callLater(1, Hellanzb.ht.checkReadBandwidth)\n return getRate()",
"def decay_rate_SM(mp, ml): \n \n drate= mp* ml*ml*(1-ml*ml/(mp*mp))*(1-ml*ml/(mp*mp))/(1*8*np.pi);#2*8*3.1415\n\n return drate",
"def max_rate(self) -> float:\n type_info = cast(\n EventableStateVariableTypeInfo, self._state_variable_info.type_info\n )\n return type_info.max_rate or 0.0",
"def get_sample_clock_max_rate (self):\n d = float64(0)\n CALL ('GetSampClkMaxRate', self, ctypes.byref(d))\n return d.value",
"def error_rate100_deviation(self, error_rate100_deviation):\n\n self._error_rate100_deviation = error_rate100_deviation",
"def setStdDev(self, sigma) -> None:\n ...",
"def fulltext_max_rate(self, fulltext_max_rate):\n\n self._fulltext_max_rate = fulltext_max_rate",
"def set_amax(self, value):\n assert 0 <= value <= 1, 'Invalid scale factor value'\n self._amax = value",
"def error_rate50_deviation(self, error_rate50_deviation):\n\n self._error_rate50_deviation = error_rate50_deviation",
"def density_deviation(self, density_deviation):\n\n self._density_deviation = density_deviation",
"def max_sample_value(self, max_sample_value):\n self._max_sample_value = max_sample_value",
"def get_convert_max_rate(self):\n d = float64(0)\n CALL ('GetAIConvMaxRate', self, ctypes.byref(d))\n return d.value",
"def mag_rate(self):\n return self._mag_rate",
"def setMaxPeriod(self, maxPeriod):\n hal.setEncoderMaxPeriod(self.encoder, maxPeriod)",
"def set_yaw_limit(self, max_yaw_rate):\n for m in self._mux:\n m.max_yaw_rate = max_yaw_rate"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_loop_bandwidth(self) > float Returns the loop bandwidth. | def get_loop_bandwidth(self):
return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_loop_bandwidth(self) | [
"def get_loop_bandwidth(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_loop_bandwidth(self)",
"def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_loop_bandwidth(self, *args, **kwargs)",
"def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_loop_bandwidth(self, *args, **kwargs)",
"def _get_bandwidth(self):\n return self.__getter('get_bandwidth')",
"def get_maxbandwidth(self):\n return self.options['maxbandwidth']",
"def compute_bandwidth(self):\n self._bw, self._covariance = kde_methods.compute_bandwidth(self)",
"def get_bandwidth(self, fingerprint):\n\n try:\n desc = self.control.get_server_descriptor(fingerprint)\n return desc.observed_bandwidth / 1000\n except stem.ControllerError:\n return 0",
"def compute_bw_efficiency(self):\n comm_len = timedelta(microseconds=0)\n for txop in self.txops:\n comm_len += txop.stop_usec - txop.start_usec\n\n bw_eff = comm_len / timedelta(microseconds=100000)\n return bw_eff",
"def shaping_peak_bandwidth(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"shaping_peak_bandwidth\")",
"def bandwidthAvg(self):\n raise NotImplemented # TODO",
"def shaping_peak_bandwidth(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"shaping_peak_bandwidth\")",
"def bandwidth_control(self):\n ret = self._get_attr(\"bandwidthControl\")\n return IBandwidthControl(ret)",
"def time_bandwidth_product(self):\r\n return (lib.standard_deviation(self.t, self.intensity) *\r\n lib.standard_deviation(self.w, self.spectral_intensity))",
"def calc_bandwidth(self):\n if self.stream_data[-1][2]!= 0:\n download_time = self.stream_data[-1][0]-self.stream_data[-2][0]\n estimated_bandwidth = self.stream_data[-1][2]/download_time\n self.stream_data[-1].append(estimated_bandwidth)\n elif self.req==1:\n for i in range(1,len(self.stream_data)):\n if self.stream_data[-i][2] !=0:\n download_time = self.stream_data[-i][0]-self.stream_data[-i-1][0]\n estimated_bandwidth = self.stream_data[-i][2]/download_time\n self.stream_data[-1].append(estimated_bandwidth)\n break\n elif self.stream_data[-1][2] == -1:\n self.stream_data[-1].append(-1)\n else:\n self.stream_data[-1].append(self.stream_data[-2][3])\n self.stream_data[-1].append(self.quali_req)",
"def shaping_average_bandwidth(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"shaping_average_bandwidth\")",
"def getSampleRate(self) -> \"int\":\n return _coin.SoVRMLAudioClip_getSampleRate(self)",
"def _get_channel_speed(self):\n return self.__channel_speed",
"def get_speed(self):\n if self.speed and self.period:\n return self.speed / 1024\n else:\n return 0",
"def bitrate(self):\n b = 0\n if 'bit_rate' in self.__dict__:\n try:\n b = int(self.__dict__['bit_rate'])\n except Exception as e:\n pass\n return b"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_damping_factor(self) > float Returns the loop damping factor. | def get_damping_factor(self):
return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_damping_factor(self) | [
"def get_damping_factor(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_damping_factor(self)",
"def velocity_damping(self, kpar):\n return (1.0 + (kpar * self.sigma_v(self.ps_redshift))**2.)**-1.",
"def set_damping_factor(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_damping_factor(self, *args, **kwargs)",
"def stiffness(self):\n return self.force()*self.b",
"def getDopplerVelocity(self) -> \"float\":\n return _coin.SoVRMLSound_getDopplerVelocity(self)",
"def set_freq_damping(self, freq, damping):\n self.k = freq * freq * 4 * math.pi * math.pi\n self.b = 2 * math.sqrt(self.k) * damping\n return",
"def getDopplerFactor(self) -> \"float\":\n return _coin.SoVRMLSound_getDopplerFactor(self)",
"def speedMultiplier(self) -> float:\n return self._getMultiplier('speed')",
"def damping_coefficient(self, damping_coefficient):\n if (self.local_vars_configuration.client_side_validation and\n damping_coefficient is not None and damping_coefficient > 10000000): # noqa: E501\n raise ValueError(\"Invalid value for `damping_coefficient`, must be a value less than or equal to `10000000`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n damping_coefficient is not None and damping_coefficient < 0): # noqa: E501\n raise ValueError(\"Invalid value for `damping_coefficient`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._damping_coefficient = damping_coefficient",
"def acceleration(self):\n if self.state.lightning:\n return self.character.acceleration / 2\n else:\n return self.character.acceleration",
"def pollutionMultiplier(self) -> float:\n return self._getMultiplier('pollution')",
"def calc_blade_friction_force():\r\n # return c_a * d * w\r\n return 0",
"def getDamageMultiplier(self):\n return 1.0",
"def initial_dose(self) -> float:\n return self.__Initial_dose",
"def accel(self):\n return self.force()/self.mass",
"def angular_velocity(self):\n return 0.0",
"def _partial_penalty_factor(self) -> float:\n penalty = ((self.time_penalty_function()) ** self.alpha) * self.n_of_objectives\n if penalty < 0:\n penalty = 0\n if penalty > 1:\n penalty = 1\n return penalty",
"def elo_k_decay_val(self) -> float:\n return self._elo_k_decay_var",
"def dv(self):\n return self.accel()*self.__dt"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_clock_rate(self) > float Returns the current clock rate. | def get_clock_rate(self):
return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_clock_rate(self) | [
"def get_clock_rate(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_clock_rate(self)",
"def get_sample_clock_rate(self):\n d = float64(0)\n CALL ('GetSampClkRate', self, ctypes.byref(d))\n return d.value",
"def get_convert_clock_rate(self):\n d = float64(0)\n CALL ('GetAIConvRate', self, ctypes.byref(d))\n return d.value",
"def getRate(self):\n return self.distancePerPulse / self.getPeriod()",
"def get_rate(self) -> float:\n return self._count / (time.time() - self._start_time)",
"def sample_rate(self) -> float:\n return self._rate",
"def rate(self):\n return self._rate",
"def get_sample_clock_max_rate (self):\n d = float64(0)\n CALL ('GetSampClkMaxRate', self, ctypes.byref(d))\n return d.value",
"def getScheduleRate(self):\n schedule_rate = DPxGetDoutSchedRate()\n return schedule_rate[0]",
"def relative_rate(self):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr_relative_rate(self)",
"def frame_rate(self):\n return self._frame_rate",
"def getRate(self):\n return hal.getEncoderRate(self.encoder)",
"def flow_rate_flow(self):\n flow_rate = (math.pi * self.diameter**2 * self.length_per_minute()) / 4\n return flow_rate",
"def sweep_rate(self):\n return float(self.query('R9')[1:])",
"def Rate(self):\n freq = float(rospy.get_param('/publish_freq', None))\n\n if freq is None:\n raise RuntimeError(\"No Frequency has been set by the driving node..\")\n elif freq == 0.0:\n rospy.logwarn(\"Simulator running as fast as possible. Returning rate of 1000\")\n else:\n return rospy.Rate(freq)",
"def sample_rate(self) -> int:\n logger.debug(\"'self._sample_rate' is set to %.1f [Hz].\", self._sample_rate)\n return self._sample_rate",
"def get_speed(self):\n if self.speed and self.period:\n return self.speed / 1024\n else:\n return 0",
"def RxFilterRate(self):\n if self.force_auto_sync:\n self.get('RxFilterRate')\n return self._RxFilterRate",
"def get_rate(self):\r\n command = \":scan:rate?\\n\"\r\n self._log_write(command, mode=\"write\")\r\n self.ser.write(command)\r\n answer = self.ser.read(3)\r\n self._log_write(answer, mode=\"read\")\r\n rlvalue = float(answer[:-2])\r\n self.Stat = self.Stat._replace(rate=rlvalue)\r\n return rlvalue"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
pfb_clock_sync_ccf(double sps, float loop_bw, __dummy_4__ taps, unsigned int filter_size = 32, float init_phase = 0, float max_rate_deviation = 1.5, int osps = 1) > digital_pfb_clock_sync_ccf_sptr Timing synchronizer using polyphase filterbanks. This block performs timing synchronization for PAM signals by minimizing the derivative of the filtered signal, which in turn maximizes the SNR and minimizes ISI. This approach works by setting up two filterbanks; one filterbank contains the signal's pulse shaping matched filter (such as a root raised cosine filter), where each branch of the filterbank contains a different phase of the filter. The second filterbank contains the derivatives of the filters in the first filterbank. Thinking of this in the time domain, the first filterbank contains filters that have a sinc shape to them. We want to align the output signal to be sampled at exactly the peak of the sinc shape. The derivative of the sinc contains a zero at the maximum point of the sinc (sinc(0) = 1, sinc(0)' = 0). Furthermore, the region around the zero point is relatively linear. We make use of this fact to generate the error signal. | def pfb_clock_sync_ccf(*args, **kwargs):
return _digital_swig.pfb_clock_sync_ccf(*args, **kwargs) | [
"def mfccInitFilterBanks(fs, nfft):\n\n # filter bank params:\n lowfreq = 133.33\n linsc = 200/3.\n logsc = 1.0711703\n numLinfilterTotal = 13\n numLogFilt = 27\n\n if fs < 8000:\n nlogfil = 5\n\n # Total number of filters\n nFiltTotal = numLinfilterTotal + numLogFilt\n\n # Compute frequency points of the triangle:\n freqs = np.zeros(nFiltTotal+2)\n freqs[:numLinfilterTotal] = lowfreq + np.arange(numLinfilterTotal) * linsc\n freqs[numLinfilterTotal:] = freqs[numLinfilterTotal-1] * logsc ** np.arange(1, numLogFilt + 3)\n heights = 2./(freqs[2:] - freqs[0:-2])\n\n # Compute filterbank coeff (in fft domain, in bins)\n fbank = np.zeros((nFiltTotal, nfft))\n nfreqs = np.arange(nfft) / (1. * nfft) * fs\n\n for i in range(nFiltTotal):\n lowTrFreq = freqs[i]\n cenTrFreq = freqs[i+1]\n highTrFreq = freqs[i+2]\n\n lid = np.arange(np.floor(lowTrFreq * nfft / fs) + 1, np.floor(cenTrFreq * nfft / fs) + 1, dtype=np.int)\n lslope = heights[i] / (cenTrFreq - lowTrFreq)\n rid = np.arange(np.floor(cenTrFreq * nfft / fs) + 1, np.floor(highTrFreq * nfft / fs) + 1, dtype=np.int)\n rslope = heights[i] / (highTrFreq - cenTrFreq)\n fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)\n fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])\n\n return fbank, freqs",
"def mfcc(signal, width, step, fs, Ntfd=512):\n b, a = [1, -0.97], [1]\n signal = sgl.lfilter(b, a, signal)\n frames = split(signal, width, step, fs)\n P = []\n\n for frame in frames:\n win_frame = sgl.windows.hamming(frame.size) * frame\n p = (np.abs(sgl.freqz(win_frame, worN=Ntfd)[1]) ** 2) / Ntfd\n P.append(p)\n\n P = np.array(P)\n filtered_P = filter_banks(P, fs, NFFT=2*Ntfd-1)\n res = scipy.fft.dct(filtered_P, type=2, axis=1, norm='ortho')\n return res[:, :13]",
"def pfb_clock_sync_fff(*args, **kwargs):\n return _digital_swig.pfb_clock_sync_fff(*args, **kwargs)",
"def main():\n # Create a new instance of a SavitzkyGolayFilter filter, setting the number of left hand and right hand points to 15\n sgf = GRT.SavitzkyGolayFilter(15, 15)\n\n # Create some variables to help generate the signal data\n num_seconds = 6 # The number of seconds of data we want to generate\n t = 0 # This keeps track of the time\n t_step = 1.0 / 1000.0 # This is how much the time will be updated at each iteration in the for loop\n\n # Add the freq rates\n # The first value is the time in seconds and the second value is the frequency that should be set at that time\n freq_rates = {0: 0.1, 1: 0.5, 2: 1, 3: 2, 4: 4, 5: 8, 6: 16}\n\n # Generate the signal and filter the data\n for i in range(num_seconds * 1000):\n # Check to see if we should update the freq rate to the next value\n # Set the new frequency value\n freq = [v for (k, v) in freq_rates.items() if k > (i / 1000)][0]\n\n # Generate the signal\n signal = math.sin(t * math.tau * freq)\n\n # Filter the signal\n filtered_value = sgf.filter(signal)\n\n # Print the signal and the filtered data\n print(\"%.3f %.3f %.3f\" % (freq, signal, filtered_value))\n\n # Update the t\n t += t_step\n\n # Save the HighPassFilter settings to a file\n sgf.save(\"SavitzkyGolayFilterSettings.grt\")\n\n # We can then load the settings later if needed\n sgf.load(\"SavitzkyGolayFilterSettings.grt\")",
"def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_channel_taps(self, *args, **kwargs)",
"def sccs_bit_sync(y, ns):\n # decimated symbol sequence for SEP\n rx_symb_d = np.zeros(int(np.fix(len(y) / ns)))\n track = np.zeros(int(np.fix(len(y) / ns)))\n bit_count = -1\n y_abs = np.zeros(len(y))\n clk = np.zeros(len(y))\n k = ns + 1 # initial 1-of-Ns symbol synch clock phase\n # Sample-by-sample processing required\n for i in range(len(y)):\n # y_abs(i) = abs(round(real(y(i))))\n if i >= ns: # do not process first Ns samples\n # Collect timing decision unit (TDU) samples\n y_abs[i] = np.abs(np.sum(y[i - ns + 1:i + 1]))\n # Update sampling instant and take a sample\n # For causality reason the early sample is 'i',\n # the on-time or prompt sample is 'i-1', and \n # the late sample is 'i-2'.\n if (k == 0):\n # Load the samples into the 3x1 TDU register w_hat.\n # w_hat[1] = late, w_hat[2] = on-time; w_hat[3] = early.\n w_hat = y_abs[i - 2:i + 1]\n bit_count += 1\n if w_hat[1] != 0:\n if w_hat[0] < w_hat[2]:\n k = ns - 1\n clk[i - 2] = 1\n rx_symb_d[bit_count] = y[i - 2 - int(np.round(ns / 2)) - 1]\n elif w_hat[0] > w_hat[2]:\n k = ns + 1\n clk[i] = 1\n rx_symb_d[bit_count] = y[i - int(np.round(ns / 2)) - 1]\n else:\n k = ns\n clk[i - 1] = 1\n rx_symb_d[bit_count] = y[i - 1 - int(np.round(ns / 2)) - 1]\n else:\n k = ns\n clk[i - 1] = 1\n rx_symb_d[bit_count] = y[i - 1 - int(np.round(ns / 2))]\n track[bit_count] = np.mod(i, ns)\n k -= 1\n # Trim the final output to bit_count\n rx_symb_d = rx_symb_d[:bit_count]\n return rx_symb_d, clk, track",
"def MBfilter_CF(st, frequencies,\n CN_HP, CN_LP,\n filter_norm, filter_npoles=2,\n var_w=True,\n CF_type='envelope', CF_decay_win=1.0,\n hos_order=4,\n rosenberger_decay_win=1.0,\n rosenberger_filter_power=1.0,\n rosenberger_filter_threshold=None,\n rosenberger_normalize_each=False,\n wave_type='P',\n hos_sigma=None,\n rec_memory=None,\n full_output=False):\n delta = st[0].stats.delta\n Tn = 1. / frequencies\n Nb = len(frequencies)\n CF_decay_nsmps = CF_decay_win / delta\n rosenberger_decay_nsmps = rosenberger_decay_win / delta\n\n if hos_sigma is None:\n hos_sigma = -1.\n\n # Single component analysis\n if len(st) < 2:\n # Use just the first trace in stream\n tr = st[0]\n y = tr.data\n\n YN1 = np.zeros((Nb, len(y)), float)\n CF1 = np.zeros((Nb, len(y)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem = rec_memory[(tr.id, wave_type)][n]\n else:\n rmem = None\n\n YN1[n] = recursive_filter(y, CN_HP[n], CN_LP[n],\n filter_npoles, rmem)\n YN1[n] /= filter_norm[n]\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n]/delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n # Calculates CF for each MBF signal\n if CF_type == 'envelope':\n CF1[n] = recursive_rms(YN1[n], CF_decay_constant, rmem)\n\n if CF_type == 'kurtosis':\n CF1[n] = recursive_hos(YN1[n], CF_decay_constant,\n hos_order, hos_sigma, rmem)\n\n # 2 (horizontal) components analysis\n elif len(st) == 2:\n # Assumes that 2 horizontal components are used\n tr1 = st.select(channel='*[E,W,1]')[0]\n tr2 = st.select(channel='*[N,S,2]')[0]\n\n y1 = tr1.data\n y2 = tr2.data\n\n # Initializing arrays\n YN_E = np.zeros((Nb, len(y1)), float)\n YN_N = np.zeros((Nb, len(y1)), float)\n YN1 = np.zeros((Nb, len(y1)), float)\n CF1 = np.zeros((Nb, len(y1)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem1 = rec_memory[(tr1.id, wave_type)][n]\n rmem2 = rec_memory[(tr2.id, wave_type)][n]\n else:\n rmem1 = None\n rmem2 = None\n\n YN_E[n] = recursive_filter(y1, CN_HP[n], CN_LP[n],\n filter_npoles, rmem1)\n YN_E[n] /= filter_norm[n]\n YN_N[n] = recursive_filter(y2, CN_HP[n], CN_LP[n],\n filter_npoles, rmem2)\n YN_N[n] /= filter_norm[n]\n # Combining horizontal components\n YN1[n] = np.sqrt(np.power(YN_E[n], 2) + np.power(YN_N[n], 2))\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n] / delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n # Calculates CF for each MBF signal\n if CF_type == 'envelope':\n CF1[n] = recursive_rms(YN1[n], CF_decay_constant, rmem1)\n\n if CF_type == 'kurtosis':\n CF1[n] = recursive_hos(YN1[n], CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n\n # 3 components analysis, includes polarization P and S decomposition\n else:\n # Vertical\n tr1 = st.select(channel='*[Z,U,D]')[0]\n # Horizontals\n tr2 = st.select(channel='*[E,W,1]')[0]\n tr3 = st.select(channel='*[N,S,2]')[0]\n\n y1 = tr1.data\n y2 = tr2.data\n y3 = tr3.data\n\n # Initializing arrays\n YN1 = np.zeros((Nb, len(y1)), float)\n YN2 = np.zeros((Nb, len(y1)), float)\n YN3 = np.zeros((Nb, len(y1)), float)\n CF1 = np.zeros((Nb, len(y1)), float)\n filteredDataP = np.zeros((Nb, len(y1)), float)\n filteredDataS = np.zeros((Nb, len(y1)), float)\n if full_output:\n CF2 = np.zeros((Nb, len(y1)), float)\n\n for n in range(Nb):\n if rec_memory is not None:\n rmem1 = rec_memory[(tr1.id, wave_type)][n]\n rmem2 = rec_memory[(tr2.id, wave_type)][n]\n rmem3 = rec_memory[(tr3.id, wave_type)][n]\n else:\n rmem1 = None\n rmem2 = None\n rmem3 = None\n\n YN1[n] = recursive_filter(y1, CN_HP[n], CN_LP[n],\n filter_npoles, rmem1)\n YN1[n] /= filter_norm[n]\n YN2[n] = recursive_filter(y2, CN_HP[n], CN_LP[n],\n filter_npoles, rmem2)\n YN2[n] /= filter_norm[n]\n YN3[n] = recursive_filter(y3, CN_HP[n], CN_LP[n],\n filter_npoles, rmem3)\n YN3[n] /= filter_norm[n]\n\n # Define the decay constant\n rosenberger_decay_constant = 1 / rosenberger_decay_nsmps\n\n # print('Rosenberger in process {}/{}\\r'.format(n+1, Nb),\n # sys.stdout.flush())\n\n # third value returned by rosenberger() is the polarizaion filter,\n # which we do not use here\n filt_dataP, filt_dataS, _ =\\\n rosenberger(YN2[n], YN3[n], YN1[n],\n rosenberger_decay_constant,\n pol_filter_power=rosenberger_filter_power,\n pol_filter_threshold=rosenberger_filter_threshold,\n normalize_each=rosenberger_normalize_each)\n\n # Use vertical component for P data\n filteredDataP[n] = filt_dataP[0, :]\n # Use vector composition of the two horizontal component for S data\n filteredDataS[n] = np.sqrt(np.power(filt_dataS[1, :], 2) +\n np.power(filt_dataS[2, :], 2))\n\n if var_w and CF_type == 'envelope':\n CF_decay_nsmps_mb = (Tn[n]/delta) * CF_decay_nsmps\n else:\n CF_decay_nsmps_mb = CF_decay_nsmps\n\n # Define the decay constant\n CF_decay_constant = 1 / CF_decay_nsmps_mb\n\n if CF_type == 'envelope':\n if wave_type == 'P':\n CF1[n] = recursive_rms(filteredDataP[n],\n CF_decay_constant, rmem1)\n if full_output:\n CF2[n] = recursive_rms(filteredDataS[n],\n CF_decay_constant, rmem2)\n else:\n CF1[n] = recursive_rms(filteredDataS[n],\n CF_decay_constant, rmem1)\n if full_output:\n CF2[n] = recursive_rms(filteredDataP[n],\n CF_decay_constant, rmem2)\n\n if CF_type == 'kurtosis':\n if wave_type == 'P':\n CF1[n] = recursive_hos(filteredDataP[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n if full_output:\n CF2[n] = recursive_hos(filteredDataS[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem2)\n else:\n CF1[n] = recursive_hos(filteredDataS[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem1)\n if full_output:\n CF2[n] = recursive_hos(filteredDataP[n],\n CF_decay_constant,\n hos_order, hos_sigma, rmem2)\n\n if full_output:\n return YN1, CF1, CF2, Tn, Nb, filteredDataP, filteredDataS\n else:\n return YN1, CF1, Tn, Nb",
"def makeConvolutionKernel(xobs, yobs, detector, psf):\n\n half=detector.nPix/2\n xx,yy=np.meshgrid((np.arange(detector.nPix)-half)*detector.pixScale,(np.arange(detector.nPix)-half)*detector.pixScale)\n if(psf.atmosFWHM > 0):\n atmos_sigma=psf.atmosFWHM/(2.*np.sqrt(2.*np.log(2.)))\n if(detector.vSampConvolve): # PSF and Fiber convolution\n psfArr=np.exp(-(xx**2 + yy**2)/(2.*atmos_sigma**2))\n fibArrs=np.zeros((detector.nVSamp,detector.nPix,detector.nPix))\n if(detector.vSampShape==\"circle\"):\n sel=np.array([((xx-pos[0])**2 + (yy-pos[1])**2 < detector.vSampSize**2) for pos in zip(xobs,yobs)])\n elif(detector.vSampShape==\"square\"):\n PArad=np.deg2rad(detector.vSampPA)\n sel=np.array([((np.abs((xx-pos[0])*np.cos(PArad) - (yy-pos[1])*np.sin(PArad)) < 0.5*detector.vSampSize) & (np.abs((xx-pos[0])*np.sin(PArad) + (yy-pos[1])*np.cos(PArad)) < 0.5*detector.vSampSize)) for pos in zip(xobs,yobs)])\n fibArrs[sel]=1.\n kernel=np.array([scipy.signal.fftconvolve(psfArr,fibArrs[ii],mode=\"same\") for ii in range(detector.nVSamp)])\n else:\n # this is basically the psf convolved with a delta function at the center of each fiber\n kernel=np.array([np.exp(-((xx-pos[0])**2 + (yy-pos[1])**2)/(2.*atmos_sigma**2)) for pos in zip(xobs,yobs)])\n else:\n # Fiber only\n kernel=np.zeros((detector.nVSamp,detector.nPix,detector.nPix))\n if(detector.vSampShape==\"circle\"):\n sel=np.array([((xx-pos[0])**2 + (yy-pos[1])**2 < detector.vSampSize**2) for pos in zip(xobs,yobs)])\n elif(detector.vSampShape==\"square\"):\n PArad=np.deg2rad(detector.vSampPA)\n sel=np.array([((np.abs((xx-pos[0])*np.cos(PArad) - (yy-pos[1])*np.sin(PArad)) < 0.5*detector.vSampSize) & (np.abs((xx-pos[0])*np.sin(PArad) + (yy-pos[1])*np.cos(PArad)) < 0.5*detector.vSampSize)) for pos in zip(xobs,yobs)])\n kernel[sel]=1.\n \n return kernel",
"def fn_buildFilters(params, fs):\n bandPassRange = params.bpRanges\n params.filtType = 'bandpass'\n params.filterSignal = True\n \n # Handle different filter cases:\n # 1) low pass\n if params.bpRanges[0] == 0:\n # they only specified a top freqency cutoff, so we need a low pass\n # filter\n bandPassRange = params.bpRanges[1]\n params.filtType = 'low'\n if bandpassRange == fs/2:\n # they didn't specify any cutoffs, so we need no filter\n params.filterSignal = False\n \n # 2) High passs\n if params.bpRanges[1] == fs/2 and params.filterSignal:\n # they only specified a lower freqency cutoff, so we need a high pass\n # filter\n bandPassRange = params.bpRanges[0]\n params.filtType = 'high'\n \n if params.filterSignal:\n params.fB, params.fA = signal.butter(params.filterOrder, bandPassRange/(fs/2),btype=params.filtType)\n \n # filtTaps = length(fB)\n previousFs = fs\n \n params.fftSize = int(math.ceil(fs * params.frameLengthUs / 10**6))\n if params.fftSize % 2 == 1:\n params.fftSize = params.fftSize - 1 # Avoid odd length of fft\n\n params.fftWindow = signal.windows.hann(params.fftSize)\n\n lowSpecIdx = int(params.bpRanges[0]/fs*params.fftSize)\n highSpecIdx = int(params.bpRanges[1]/fs*params.fftSize)\n\n params.specRange = np.arange(lowSpecIdx, highSpecIdx+1)\n params.binWidth_Hz = fs / params.fftSize\n params.binWidth_kHz = params.binWidth_Hz / 1000\n params.freq_kHz = params.specRange*params.binWidth_kHz # calculate frequency axis\n return previousFs, params",
"def filter_psr(psr, bw=1.1, dt=7, filter_dict=None, min_toas=10,\n frequency_filter=True, fmax=3000, verbose=True, plot=False,\n low_freq_cut=False, legacy_cut=False):\n psr.deleted[:] = 1\n print('Working on PSR {}'.format(psr.name))\n\n # Flag filtering\n flag_keep = []\n if filter_dict:\n for key, val in filter_dict.items():\n if verbose: print('Keeping TOAs corresponding to {} {}'\n .format(key, val))\n if type(val) is not list:\n val = [val]\n if legacy_cut:\n print('Cutting legacy data!')\n flag_conds = [psr.flagvals(key)==v for v in val]\n else:\n flag_conds = [psr.flagvals(key)==psr.flagvals(key)]\n # if TOA has ANY acceptable value for this flag\n flag_keep.append(np.any(flag_conds, axis=0))\n\n # if TOA satisfies all flags\n idx_flag = np.flatnonzero(np.alltrue(flag_keep, axis=0))\n\n # filter low frequency observations\n if low_freq_cut:\n if verbose: print(\"Cutting data with frequency < 1000MHz\")\n idx_freq = []\n idx_freq = list(np.argwhere(psr.freqs < 1000))\n # check for empty list (i.e. there is no multi-frequency data)\n if not idx_freq:\n print(\"No low-frequency data, returning original psr\")\n return psr\n # delete\n idx = np.unique(np.concatenate(idx_freq))\n else:\n idx = idx_flag\n\n psr.deleted[idx] = 0 # mark filtered TOAs as \"deleted\"\n\n # filter for frequency coverage\n if frequency_filter:\n if verbose: print(\"Running multi-frequency filter\")\n bins = get_dm_bins(psr.toas()*86400, dt=dt)\n idx_freq = []\n for bn in bins:\n if sum(bn) > 1:\n ix = list(filter(lambda x: x in idx_flag, np.flatnonzero(bn)))\n if len(ix) > 0:\n if psr.freqs[ix].max() / psr.freqs[ix].min() >= bw:\n idx_freq.append(ix)\n elif psr.freqs[ix].max() >= fmax:\n idx_freq.append(ix)\n\n # check for empty list (i.e. there is no multi-frequency data)\n if not idx_freq:\n print(\"No multi-frequency data, returning original psr\")\n return psr\n\n # delete\n idx = np.unique(np.concatenate(idx_freq))\n else:\n idx = idx_flag\n psr.deleted[idx] = 0 # mark filtered TOAs as \"deleted\"\n\n # check for \"orphan\" backends (less than min_toas obsv.)\n orphans = []\n for gr in np.unique(psr.flagvals('group')):\n in_group = [gr == b for b in psr.flagvals('group')]\n mask = np.logical_and(in_group, ~psr.deletedmask())\n N = np.sum(mask)\n if N>0 and N<min_toas:\n psr.deleted[mask] = True\n orphans.append([gr, N])\n if verbose and len(orphans): print(\"backends marked as 'orphan': {}\".format(orphans))\n\n # filter design matrix\n mask = np.logical_not(psr.deleted)\n if not sum(mask):\n print(\"all TOAs cut, returning original psr\")\n return psr\n\n M = psr.designmatrix()[mask, :]\n dpars = []\n for ct, (par, val) in enumerate(zip(psr.pars(), M.sum(axis=0)[1:])):\n if val == 0:\n dpars.append(par)\n psr[par].fit = False\n psr[par].val = 0.0\n\n if verbose:\n print('Cutting {} TOAs'.format(np.sum(~mask)))\n if len(dpars): print('Turning off fit for {}'.format(dpars))\n\n if np.sum(~mask) == psr.nobs:\n print(\"Cutting all TOAs, so returning None\")\n else:\n fix_jumps(psr)\n\n if plot:\n plt.figure(figsize=(8,3))\n for pta in np.unique(psr.flagvals('pta')):\n nix = psr.flagvals('pta') == pta\n plt.plot(psr.toas()[nix], psr.freqs[nix], '.', label=pta)\n plt.plot(psr.toas()[~psr.deletedmask()], psr.freqs[~psr.deletedmask()], '.',\n color='C3', alpha=0.3, label='filtered')\n plt.legend(loc='best', frameon=False)\n plt.title(psr.name)\n\n return psr",
"def get_clock_rate(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_clock_rate(self)",
"def pilot_pll(xr, fq, fs, loop_type, bn, zeta):\n T = 1 / float(fs)\n # Set the VCO gain in Hz/V \n Kv = 1.0\n # Design a lowpass filter to remove the double freq term\n Norder = 5\n b_lp, a_lp = signal.butter(Norder, 2 * (fq / 2.) / float(fs))\n fstate = np.zeros(Norder) # LPF state vector\n\n Kv = 2 * np.pi * Kv # convert Kv in Hz/v to rad/s/v\n\n if loop_type == 1:\n # First-order loop parameters\n fn = bn\n Kt = 2 * np.pi * fn # loop natural frequency in rad/s\n elif loop_type == 2:\n # Second-order loop parameters\n fn = 1 / (2 * np.pi) * 2 * bn / (zeta + 1 / (4 * zeta)) # given Bn in Hz\n Kt = 4 * np.pi * zeta * fn # loop natural frequency in rad/s\n a = np.pi * fn / zeta\n else:\n print('Loop type must be 1 or 2')\n\n # Initialize integration approximation filters\n filt_in_last = 0\n filt_out_last = 0\n vco_in_last = 0\n vco_out = 0\n vco_out_last = 0\n\n # Initialize working and final output vectors\n n = np.arange(0, len(xr))\n theta = np.zeros(len(xr))\n ev = np.zeros(len(xr))\n phi_error = np.zeros(len(xr))\n # Normalize total power in an attemp to make the 19kHz sinusoid\n # component have amplitude ~1.\n # xr = xr/(2/3*std(xr));\n # Begin the simulation loop\n for kk in range(len(n)):\n # Sinusoidal phase detector (simple multiplier)\n phi_error[kk] = 2 * xr[kk] * np.sin(vco_out)\n # LPF to remove double frequency term\n phi_error[kk], fstate = signal.lfilter(b_lp, a_lp, np.array([phi_error[kk]]), zi=fstate)\n pd_out = phi_error[kk]\n # pd_out = 0\n # Loop gain\n gain_out = Kt / Kv * pd_out # apply VCO gain at VCO\n # Loop filter\n if loop_type == 2:\n filt_in = a * gain_out\n filt_out = filt_out_last + T / 2. * (filt_in + filt_in_last)\n filt_in_last = filt_in\n filt_out_last = filt_out\n filt_out = filt_out + gain_out\n else:\n filt_out = gain_out\n # VCO\n vco_in = filt_out + fq / (Kv / (2 * np.pi)) # bias to quiescent freq.\n vco_out = vco_out_last + T / 2. * (vco_in + vco_in_last)\n vco_in_last = vco_in\n vco_out_last = vco_out\n vco_out = Kv * vco_out # apply Kv\n # Measured loop signals\n ev[kk] = filt_out\n theta[kk] = np.mod(vco_out, 2 * np.pi); # The vco phase mod 2pi\n return theta, phi_error",
"def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps(self)",
"def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_loop_bandwidth(self, *args, **kwargs)",
"def get_filter(self,Nfft,spSym,maskSize,nCycles=0.5):\n\n wavePhase = np.linspace(1/spSym,1,spSym)*np.pi*2*nCycles\n\n symbols = self._get_xcorrMasks(maskSize)\n\n filtersPh = np.empty((len(symbols),len(symbols[0])*spSym))\n for i,p in enumerate(symbols):\n p = p*2-1\n filtersPh[i,0:spSym] = p[0] * wavePhase + -1*p[0]*np.pi/2 \n for j in range(1,len(p)):\n filtersPh[i,j*spSym:(j+1)*spSym] = filtersPh[i,j*spSym-1] + p[j]*wavePhase\n \n filters = [np.exp(1j*f) for f in filtersPh]\n \n filtersPadded = np.empty((len(filters), Nfft),dtype=np.complex64)\n for k in range(len(filters)):\n filtersPadded[k] = np.conj(np.fft.fft(filters[k], Nfft)).astype(np.complex64)\n \n \n return filtersPadded.shape[0], filtersPadded",
"def bootstrap_spectral(data, fs, nperseg, fwin, nit=1000, ci=95,\n trim=0.2, calc_coherence=True):\n print(r\"Spectral estimates from {:.1f} to {:.1f} Hz\".format(*fwin))\n nchans = data.shape[0]\n # get the indices for the confidence intervals\n ci_idx = np.array([\n int((0.5 - ci/200.)*(nit-1)), # lower CI\n (nit-1)//2, # mean\n int(np.ceil((0.5 + ci/200.)*(nit-1))) # upper CI\n ])\n # get the frequencies\n f = np.fft.rfftfreq(nperseg, d=1./fs)\n f_keep = np.all([\n f >= fwin[0],\n f <= fwin[1]],\n axis = 0)\n print('Number of Fourier coefficients: %d' % f_keep.sum())\n f = f[f_keep]\n psd_segs = scipy.signal.spectral._spectral_helper(data, data, axis=-1,\n nperseg = nperseg, fs=fs, mode='psd',\n scaling='density')[2][:,f_keep,:]\n # get the indices with replacement of the array for the bootstrap\n bootstrap_indices = np.random.random_integers(\n low = 0, high = psd_segs.shape[-1] - 1,\n size = (nit, psd_segs.shape[-1]))\n # perform the bootstrap for the psd\n psd_bootstrap = np.array(\n [scipy.stats.trim_mean(psd_segs[...,idx], trim, axis=-1)\n for idx in bootstrap_indices])\n if calc_coherence:\n # perform the bootstrap for coh and icoh\n coh = []\n icoh = []\n phs = []\n for i in range(nchans):\n for j in range(i):\n print('Channel %d vs. %d.' % (i + 1, j + 1))\n csd_segs = scipy.signal.spectral._spectral_helper(\n data[i], data[j], axis=-1, nperseg = nperseg, fs=fs,\n mode='psd', scaling='density')[2][f_keep]\n # perform the bootstrap\n csd_bootstrap = np.array([\n (scipy.stats.trim_mean(\n np.real(csd_segs[...,idx]), trim, axis=-1) + \n 1j*scipy.stats.trim_mean(\n np.imag(csd_segs[...,idx]), trim, axis=-1))\n for idx in bootstrap_indices])\n # get the phase spectrum confidence intervals\n phs.append(np.sort(np.angle(csd_bootstrap,\n deg=True), axis=0)[ci_idx])\n # normalize the csd bootstrap with the product of the psds\n # for the coherence estimates\n csd_bootstrap /= np.sqrt(psd_bootstrap[:,i]*psd_bootstrap[:,j])\n # get the confidence interval for coherence and icoh\n coh.append(np.sort(np.abs(csd_bootstrap), axis=0)[ci_idx])\n icoh.append(np.sort(np.imag(csd_bootstrap), axis=0)[ci_idx])\n # get the CI of the psd\n psd = np.swapaxes(np.sort(psd_bootstrap, axis=0)[ci_idx], 0, 1)\n if calc_coherence:\n return f, psd, np.array(coh), np.array(icoh), np.array(phs)\n else:\n return f, psd",
"def kcdetect(data, sf, proba_thr, amp_thr, hypno, nrem_only, tmin, tmax,\n kc_min_amp, kc_max_amp, fmin=.5, fmax=4., delta_thr=.75,\n smoothing_s=20, spindles_thresh=2., range_spin_sec=20,\n min_distance_ms=500.):\n # Find if hypnogram is loaded :\n hyploaded = True if np.unique(hypno).size > 1 and nrem_only else False\n\n # PRE DETECTION\n # Compute delta band power using wavelet\n freqs = np.array([0.1, 4., 8., 12., 16., 30.])\n delta_npow = morlet_power(data, freqs, sf, norm=True)[0]\n delta_nfpow = smoothing(delta_npow, smoothing_s * sf)\n idx_no_delta = np.where(delta_nfpow < delta_thr)[0]\n idx_loc_delta = np.where(delta_npow > np.median(delta_npow))[0]\n\n # MAIN DETECTION\n # Bandpass filtering\n sig_filt = filt(sf, np.array([fmin, fmax]), data)\n # Taiger-Keaser energy operator\n sig_tkeo = tkeo(sig_filt)\n # Define hard and soft thresholds\n hard_thr = np.nanmean(sig_tkeo) + amp_thr * np.nanstd(sig_tkeo)\n soft_thr = 0.8 * hard_thr\n\n with np.errstate(divide='ignore', invalid='ignore'):\n idx_hard = np.where(sig_tkeo > hard_thr)[0]\n idx_soft = np.where(sig_tkeo > soft_thr)[0]\n\n # Find threshold-crossing indices of soft threshold\n idx_zc_soft = _events_to_index(idx_soft).flatten()\n\n if idx_hard.size == 0:\n return np.array([], dtype=int)\n\n # Initialize K-complexes index vector\n idx_kc = np.array([], dtype=int)\n # Fill gap between events separated by less than min_distance_ms\n idx_hard = _events_distance_fill(idx_hard, min_distance_ms, sf)\n # Get where K-complex start / end :\n idx_start, idx_stop = _events_to_index(idx_hard).T\n\n # Find true beginning / end using soft threshold\n for s in idx_start:\n d = s - idx_zc_soft\n soft_beg = d[d > 0].min()\n soft_end = np.abs(d[d < 0]).min()\n idx_kc = np.append(idx_kc, np.arange(s - soft_beg, s + soft_end))\n\n # Check if spindles are present in range_spin_sec\n idx_spin = spindlesdetect(data, sf, spindles_thresh, hypno, False)[0]\n idx_start, idx_stop = _events_to_index(idx_kc).T\n spin_bool = np.array([], dtype=np.bool)\n\n for idx, val in enumerate(idx_start):\n step = 0.5 * range_spin_sec * sf\n is_spin = np.in1d(np.arange(val - step, val + step, 1),\n idx_spin, assume_unique=True)\n spin_bool = np.append(spin_bool, any(is_spin))\n\n kc_spin = np.where(spin_bool)[0]\n idx_kc_spin = _index_to_events(np.c_[idx_start, idx_stop][kc_spin])\n\n # Compute probability\n proba = np.zeros(shape=data.shape)\n proba[idx_kc] += 0.1\n proba[idx_no_delta] += 0.1\n proba[idx_loc_delta] += 0.1\n proba[idx_kc_spin] += 0.1\n\n if hyploaded:\n proba[hypno == -1] += -0.1\n proba[hypno == 0] += -0.2\n proba[hypno == 1] += 0\n proba[hypno == 2] += 0.1\n proba[hypno == 3] += -0.1\n proba[hypno == 4] += -0.2\n\n # Smooth and normalize probability vector\n proba = proba / 0.5 if hyploaded else proba / 0.4\n proba = smoothing(proba, sf)\n # Keep only proba >= proba_thr (user defined threshold)\n idx_kc = np.intersect1d(idx_kc, np.where(proba >= proba_thr)[0], True)\n\n if idx_kc.size == 0:\n return np.array([], dtype=int)\n\n # Morphological criteria\n idx_start, idx_stop = _events_to_index(idx_kc).T\n duration_ms = (idx_stop - idx_start) * (1000 / sf)\n\n # Remove events with bad duration\n good_dur = np.where(np.logical_and(duration_ms > tmin,\n duration_ms < tmax))[0]\n idx_kc = _index_to_events(np.c_[idx_start, idx_stop][good_dur])\n\n # Remove events with bad amplitude\n idx_start, idx_stop = _events_to_index(idx_kc).T\n amp = np.zeros(shape=idx_start.size)\n for i, (start, stop) in enumerate(zip(idx_start, idx_stop)):\n amp[i] = np.ptp(data[start:stop])\n good_amp = np.where(np.logical_and(amp > kc_min_amp,\n amp < kc_max_amp))[0]\n\n return np.c_[idx_start, idx_stop][good_amp]",
"def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_channel_taps(self, *args, **kwargs)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
__init__(self) > digital_pfb_clock_sync_fff_sptr __init__(self, p) > digital_pfb_clock_sync_fff_sptr | def __init__(self, *args):
this = _digital_swig.new_digital_pfb_clock_sync_fff_sptr(*args)
try: self.this.append(this)
except: self.this = this | [
"def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def pfb_clock_sync_fff(*args, **kwargs):\n return _digital_swig.pfb_clock_sync_fff(*args, **kwargs)",
"def pfb_clock_sync_ccf(*args, **kwargs):\n return _digital_swig.pfb_clock_sync_ccf(*args, **kwargs)",
"def __init__(self, spi_rack, module, frequency=100e6):\n #def __init__(self, module, frequency=100e6):\n self.spi_rack = spi_rack\n self.module = module\n\n self.rf_frequency = frequency\n self.stepsize = 1e6\n self.ref_frequency = 10e6\n self.use_external = 0\n self.outputPower = None\n\n # These are the 6 registers present in the ADF4351\n self.registers = 6*[0]\n # In REG3: set ABP=1 (3 ns, INT-N) and CHARGE CANCEL=1\n self.registers[3] = (1<<22) | (1<<21) | 3\n # In REG5: set LD PIN MODE to 1 -> digital lock detect\n self.registers[5] = (1<<22) | (3<<19) | 5\n\n self.set_frequency(frequency)",
"def __init__(self, p, i, d, get_current_time, get_feedback_value):\r\n # p, i, and d constants\r\n self.p, self.i, self.d = p, i, d\r\n\r\n # saves the functions that return the time and the feedback\r\n self.get_current_time = get_current_time\r\n self.get_feedback_value = get_feedback_value",
"def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self):\n\t\tself._address = MCP79410address\n\t\tlocaltime = time.localtime(time.time())\n\t\trtc_time=RTCC_Struct(localtime.tm_sec,localtime.tm_min,localtime.tm_hour,localtime.tm_wday,localtime.tm_mday,localtime.tm_mon,(localtime.tm_year-2000))\n\n\t\tself.SetHourFormat(24)\n\t\tself.EnableVbat()\t\t\t\t#Enable the battery back-up\t\n\t\tself.EnableOscillator()\t\t\t#Start RTC clock\n\t\tself.SetTime(rtc_time)",
"def __init__(self, limit):\r\n self.limit = limit\r\n self.clock = 0",
"def __init__(self, *args):\n this = _coin.new_SbTime(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self):\r\n\r\n super(ElapsedTime, self).__init__()\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lambdab_count = []\r\n\r\n # Define private scalar attributes.\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.piT = 0.0 # Temperature stress pi factor.\r",
"def set_damping_factor(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_damping_factor(self, *args, **kwargs)",
"def __init__(self):\r\n# self.__init__(defaultPixels)\r\n #TODO nicht möglich mehrere Konstruktoren zu haben?\r\n #def __init__(self, pixels):\r\n #\"\"\"init the countdown with pixel positions (a list of (x, y) tuples)\"\"\"\r\n #self.pixels = pixels\r\n self.pixels = self.defaultPixels\r\n self.initColors()\r\n self.t = 0",
"def __init__(self):\n this = _coin.new_SoVRMLTimeSensor()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self):\n this = _coin.new_SoSFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_taps(self) > __dummy_11__ Returns all of the taps of the matched filter | def get_taps(self):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps(self) | [
"def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps(self)",
"def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps(self)",
"def get_swaps(self, t: int) -> list:\n swaps = []\n for (i, j) in self._arcs:\n if i >= j:\n continue\n for q in range(self.num_vqubits):\n if self.solution.get_value(f\"x_{t}_{q}_{i}_{j}\") > 0.5:\n swaps.append((self.global_qubit[i], self.global_qubit[j]))\n return swaps",
"def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps(self)",
"def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_channel_taps(self, *args, **kwargs)",
"def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_channel_taps(self, *args, **kwargs)",
"def _locate_gaps(self, t):\n true_gap_starts = []\n true_gap_ends = []\n for i in range(len(t)-1):\n if t[i+1] - t[i] > 0.1 * self.period.value:\n true_gap_starts += [i]\n true_gap_ends += [i+1]\n\n return true_gap_starts, true_gap_ends",
"def __call__(self, data: np.ndarray, threshold: float):\n t_list = []\n time = 0\n # Find all threshold crossings\n data_thresh = data[data[:, 2] >= threshold, :]\n while time < self.max_time:\n # Find threshold crossings less than \"time\" before the time of event\n inds = np.logical_and(data_thresh[:, 1] >= (time), data_thresh[:, 1] <= (time + self.step_size))\n # Store a boolean indicating if a warning was ever \"On\"\n t_list.append(any(inds))\n time += self.step_size\n return t_list",
"def filterBreakerTrips(events):\n filters = []\n filters.append( UndervoltageMerge() )\n filters.append( RunstopMerge() )\n filters.append( CircuitBreakerMerge() )\n filters.append( KeepEventTypes(['CircuitBreakerTrip']) )\n return runFilters(filters,events)",
"def multi_t_filter(w_in, a_threshold_in, vt_max_in, vt_min_in, t_out):\n \n # initialize arrays, padded with the elements we want\n t_out[:] = np.nan \n \n # checks \n if (np.isnan(w_in).any() or np.isnan(a_threshold_in)):\n return\n if (np.isnan(vt_max_in).all() and np.isnan(vt_min_in).all()):\n return \n if (not len(t_out)<=len(w_in)):\n raise DSPFatal('The length of your return array must be smaller than the length of your waveform')\n\n # Initialize an intermediate array to hold the tp0 values before we remove duplicates from it\n intermediate_t_out = np.full_like(t_out, np.nan, dtype=np.float32)\n \n # Go through the list of maxima, calling time_point_thresh (the refactored version ignores the nan padding)\n time_point_thresh(w_in, a_threshold_in, vt_max_in, 0, intermediate_t_out)\n\n # Remove duplicates from the t_out list\n remove_duplicates(intermediate_t_out, vt_min_in, t_out)",
"def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_channel_taps(self, *args, **kwargs)",
"def _find_ttl(t, x, thresh=500, polarity=1):\n\n times = []\n inpulse = 0\n if polarity > 0:\n for i in range(0, len(t)):\n if (not inpulse) and (x[i] < thresh):\n times.append(t[i])\n inpulse = 1\n elif inpulse and (x[i] > thresh):\n inpulse = 0\n else:\n for i in range(0, len(t)):\n if (not inpulse) and (x[i] > thresh):\n times.append(t[i])\n inpulse = 1\n elif inpulse and (x[i] < thresh):\n inpulse = 0\n return times",
"def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_channel_taps(self, *args, **kwargs)",
"def rippleBandFilterSimulated(lfp, time, FS, bpFilterTaps, lpFilterTaps):\n #Bandpass filter into ripple band\n rippleData = signal.lfilter(bpFilterTaps,1,lfp)\n #Envelope\n rippleEnvelope = np.absolute(rippleData)\n #smooth\n smoothed_envelope = signal.lfilter(lpFilterTaps,1,rippleEnvelope)\n return smoothed_envelope, rippleData",
"def trapfilt(xt, Fs, fL, k, alfa): \n ixk = round(Fs*k/float(2*fL)) # Tail cutoff index \n tt = arange(-ixk,ixk+1)/float(Fs) # Time axis for h(t) \n n = len(tt)-1 # Filter order \n ht = zeros(len(tt))\n ix = where(logical_and(tt>=-ixk,tt<ixk+1))[0]\n ht[int(len(ix)/2)] = 2*fL\n ixn = ix[0:n/2]\n ixp = ix[(n/2)+1:n+1]\n ix = hstack((ixn,ixp))\n ht[ix] = (sin(2*pi*fL*tt[ix])/(pi*tt[ix])) * (sin(2*pi*alfa*fL*tt[ix])/(2*pi*alfa*fL*tt[ix]))\n #ht[int(len(ix)/2)] = 2*fL\n if alfa == 0 :\n ixk = round(Fs*k/float(2*fL))\n ix = where(logical_and(tt>=-ixk,tt<ixk+1))[0]\n ixn = ix[0:160]\n ixp = ix[161:321]\n ix = hstack((ixn,ixp))\n TL = 1/float(2*fL)\n ht[int(len(ix)/2)] = 1 # At exception t=0, assign value of sinc directly at t =0 point\n ht[ix] = sin(pi*tt[ix]/TL)/(pi*tt[ix]/TL)\n \n yt = lfilter(ht, 1, hstack((xt, zeros(ixk)))) \n # Compute filter output y(t) \n yt = yt[ixk:] # Filter delay compensation \n return yt, n # Return y(t) and filter order",
"def hot_segments(self):\n return [s for s in self.segments if s.heat_flow > 0]",
"def get_frame_gaps(self):\n first_index = self.frames.get_first_frame_value('fixed_index')\n first_mjd = self.frames.get_first_frame_value('mjd')\n\n dt = self.info.sampling_interval\n measured_time = (self.frames.mjd - first_mjd) * units.Unit('day')\n expected_time = (self.frames.fixed_index - first_index) * dt\n gap_time = (measured_time - expected_time).decompose().to(dt.unit)\n frame_gaps = round_values((gap_time / dt).decompose().value)\n frame_gaps[~self.frames.valid] = 0\n gap_time[~self.frames.valid] = np.nan\n return frame_gaps, gap_time",
"def _masks(signal,r_peaks, p_start, p_end, t_start, t_end):\n phases = np.zeros(len(signal))\n nni = np.diff(r_peaks) \n ppi =[]\n tti =[]\n qrsi = []\n tpi = []\n zzi = []\n \n for i in range(len(r_peaks)-1): \n # if nni[i] > 400 and nni[i] < 1400:\n ppl = p_end[i]-p_start[i]\n if ppl < 400 and ppl > 0:\n phases[p_start[i]: p_end[i]] = 1\n ppi.append(ppl)\n ttl = t_end[i]-t_start[i]\n if ttl < 700 and ttl > 0:\n phases[t_start[i]: t_end[i]] = 3\n tti.append(ttl)\n tpl = p_start[i]-t_end[i]\n if tpl < 1400 and tpl >0:\n phases[t_end[i]: p_start[i]] = 4 \n tpi.append(tpl)\n for i in range(len(r_peaks)-2): \n # if nni[i] > 400 and nni[i] < 1400 and nni[i+1] > 400 and nni[i+1] < 1400:\n qrsl = t_start[i+1] - p_end[i]\n if qrsl < 500 and qrsl > 0:\n phases[p_end[i]: t_start[i+1]] = 2\n qrsi.append(qrsl)\n return phases",
"def get_trips(self):\n return self.trips"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_diff_taps(self) > __dummy_11__ Returns all of the taps of the derivative filter | def get_diff_taps(self):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps(self) | [
"def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps(self)",
"def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_channel_taps(self, *args, **kwargs)",
"def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_channel_taps(self, *args, **kwargs)",
"def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps(self)",
"def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps(self)",
"def get_diffs(self):\n return list(self.iter_diffs())",
"def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps_as_string(self)",
"def test_backward_divided_difference_gaps(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (7, 3), (10, 6)]\n expected = [(2, -1), (3, 0), (4, 0), (6, 0), (7, -1), (10, 1)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in backward_divided_difference(walker)], expected)",
"def test_timeseries_get_diffs(self):\n\n ts = self.ts.get_diffs()\n\n self.assertListEqual(\n ts.tseries.tolist(),\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],\n )\n\n self.assertEqual(len(ts.tseries), len(self.ts.tseries) - 1)\n\n self.assertTrue(np.array_equal(self.ts.dseries[1:], ts.dseries))",
"def periods(t, y, threshold):\n transition_times = find_transition_times(t, y, threshold)\n deltas = np.diff(transition_times)\n return deltas",
"def test_backward_divided_difference_gaps_step(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (7, 3), (10, 6)]\n expected = [(2, -1), (3, 0), (4, 0), (7, -1)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in backward_divided_difference(walker, step=1)], expected)",
"def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps_as_string(self)",
"def test_forward_divided_difference_gaps_step(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (7, 3), (10, 6)]\n expected = [(1, -1), (2, 0), (3, 0), (6, -1)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in forward_divided_difference(walker, step=1)], expected)",
"def test_backward_divided_difference_gaps_step2(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (8, 3), (10, 6)]\n expected = [(6, 0), (8, -0.5), (10, 1.5)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in backward_divided_difference(walker, step=2)], expected)",
"def test_forward_divided_difference_gaps_step2(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (8, 3), (10, 6)]\n expected = [(4, 0), (6, -0.5), (8, 1.5)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in forward_divided_difference(walker, step=2)], expected)",
"def calc_delta_thresh(self):\n threshold_vals = self.calc_threshold_vals()\n return np.array(threshold_vals - threshold_vals[0])",
"def backward_differences(T):\n\tnumOfTimes = len(T)\n\t#the number of steps in the method\n\tm = numOfTimes - 1\n\t#generate the initial differences, which\n\t#is just the standard basis.\n\tD = np.array([ [np.float64((i+1)==(numOfTimes-j)) for i in range(numOfTimes)] for j in range(numOfTimes)])\n\tdifferences = np.zeros_like(D)\n\tdifferences[0] = D[0]\n\t\n\t\n\tfor q in range(1,numOfTimes):\n\t\tfor j in range(numOfTimes - q):\n\t\t\tD[j] = first_difference([T[m-j],T[m-j-q]],[D[j],D[j+1]])\n\t\t\tdifferences[q] = D[0]\n\treturn differences",
"def test_backward_divided_difference_gaps_auto_step(self):\n orig = [(1, 5), (2, 4), (3, 4), (4, 4), (6, 4), (7, 3), (10, 6)]\n expected = [(2, -1), (3, 0), (4, 0), (7, -1)]\n walker = (('a', p, v) for p, v in orig)\n assert_equal([(p, v) for _, p, v in backward_divided_difference(walker, auto_step=True)], expected)",
"def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_channel_taps(self, *args, **kwargs)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_channel_taps(self, int channel) > __dummy_4__ Returns the taps of the matched filter for a particular channel | def get_channel_taps(self, *args, **kwargs):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_channel_taps(self, *args, **kwargs) | [
"def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_channel_taps(self, *args, **kwargs)",
"def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_channel_taps(self, *args, **kwargs)",
"def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_channel_taps(self, *args, **kwargs)",
"def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps(self)",
"def get_channels():",
"def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps(self)",
"def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps(self)",
"def take_channels(data, channelmap):\n channelmap = [c-1 for c in channelmap]\n return data[:, channelmap]",
"def get_channel_history(channel):\n incidents = list(Incident.objects.filter(channel=channel).all())\n notes = list(ChannelNote.objects.filter(channel=channel).all())\n\n history = incidents + notes\n history.sort(key=lambda x: x.created_at, reverse=True)\n\n return history",
"def get_existing_traces_by_channel(self) -> Dict[int, List[Tuple[int, str]]]:\n ret = {}\n for i in range(1, 9):\n traces = self.ask(f\"CALC{i}:PAR:CAT:EXT?\").strip('\"')\n if traces == \"NO CATALOG\":\n continue\n else:\n ret[i] = []\n traces = traces.split(',')\n names = traces[::2]\n params = traces[1::2]\n for n, p in zip(names, params):\n ret[i].append((int(n.split('_')[-1]), p))\n return ret",
"def feed_comparison(self, channel):\n comparison_results = []\n retval = []\n # Alert if tower is not in feed DB\n if (channel[\"cgi_str\"] not in self.bad_cgis and\n channel[\"cgi_str\"] not in self.cgi_whitelist and\n channel[\"cgi_str\"] not in self.good_cgis):\n comparison_results.append(self.check_channel_against_feed(channel))\n # Else, be willing to alert if channel is not in range\n if (channel[\"cgi_str\"] not in self.bad_cgis and\n channel[\"cgi_str\"] not in self.cgi_whitelist and\n channel[\"cgi_str\"] not in self.good_cgis):\n comparison_results.append(self.check_channel_range(channel))\n # Test for primary BTS change\n if channel[\"cell\"] == '0':\n comparison_results.append(self.process_cell_zero(channel))\n for result in comparison_results:\n if result != ():\n retval.append(result)\n if len(retval) == 0:\n if channel[\"cgi_str\"] not in self.good_cgis:\n self.good_cgis.append(channel[\"cgi_str\"])\n return retval",
"def get_channel_events(self, channel_index, source='xform', subsample=False):\n events = self.get_events(source=source, subsample=subsample)\n events = events[:, channel_index]\n\n return events",
"def get_keys( self, step=None, channel=None):\r\n if step==None:\r\n return self.channels[channel] if channel<self.nchannels else []\r\n if channel==None:\r\n return self.steps[step] if step<self.nsteps else []\r\n return self.steps[step][channel] if (steps<self.nsteps and channel<nchannels) else []",
"def get_channel_dict(self):\n return self.channels",
"def _get_all_channels(self):\n\n try:\n available_channel_tuple = list(\n self._tagger.getChannelList(TT.TT_CHANNEL_RISING_AND_FALLING_EDGES)\n )\n # handle exception in the call (TT functions normally produce NotImplementedError)\n except NotImplementedError:\n # self.log.error('_get_all_channels(): communication with the device failed')\n return []\n # handle the case of self._tagger = None\n except AttributeError:\n # self.log.error('_get_all_channels(): _tagger is None. Initialize device first')\n return []\n\n return list(available_channel_tuple)",
"def extract(self, ev):\n ch_events = channelEvents(atype='Physical')\n channel = ev.get_ad() >> self.nBitsTotal # Get channel information.\n\n for channelIdx in xrange(self.nChannels):\n t = pylab.find(channelIdx == channel)\n # Much faster than boolean list or filter\n if len(t) > 0:\n ad = (ev.get_ad()[t]) & (2 ** self[channelIdx].nBitsTotal - 1)\n ch_events.add_adtmch(channelIdx, ad, ev.get_tm()[t])\n return ch_events",
"def get_channel_aliases(self, channel):\n chan_key = channel.key.lower()\n nicktuples = self.caller.nicks.get(category=\"channel\", return_tuple=True, return_list=True)\n if nicktuples:\n return [tup[2] for tup in nicktuples if tup[3].lower() == chan_key]\n return []",
"def get_active_channels(self):\n sources = {}\n for inp in self.IFswitch.inputs.keys():\n if self.IFswitch.inputs[inp].source:\n sources[self.IFswitch.inputs[inp].name] = self.IFswitch.inputs[inp].source\n return sources",
"def get_channel_list(self):\r\n channels = self.items()\r\n channels.sort()\r\n return [value for key, value in channels]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_diff_channel_taps(self, int channel) > __dummy_4__ Returns the taps in the derivative filter for a particular channel | def get_diff_channel_taps(self, *args, **kwargs):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_channel_taps(self, *args, **kwargs) | [
"def get_diff_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_channel_taps(self, *args, **kwargs)",
"def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_channel_taps(self, *args, **kwargs)",
"def get_channel_taps(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_channel_taps(self, *args, **kwargs)",
"def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps(self)",
"def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps(self)",
"def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps(self)",
"def describe_diff(channels: List[np.ndarray],\n diff_fraction_threshold: float = 0.01) -> Diff:\n diff_fraction, nearby_variants = analyze_diff_and_nearby_variants(channels)\n # Thresholds were chosen by visual experimentation, i.e. human curation.\n if diff_fraction > diff_fraction_threshold:\n return Diff.MANY_DIFFS\n elif nearby_variants >= 5:\n return Diff.NEARBY_VARIANTS\n else:\n return Diff.FEW_DIFFS",
"def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps_as_string(self)",
"def get_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps(self)",
"def periods(t, y, threshold):\n transition_times = find_transition_times(t, y, threshold)\n deltas = np.diff(transition_times)\n return deltas",
"def get_channel_history(channel):\n incidents = list(Incident.objects.filter(channel=channel).all())\n notes = list(ChannelNote.objects.filter(channel=channel).all())\n\n history = incidents + notes\n history.sort(key=lambda x: x.created_at, reverse=True)\n\n return history",
"def channel_distances_downstream(self, ch_nodes):\n ch_links = self._grid.at_node[\"flow__link_to_receiver_node\"][ch_nodes]\n ch_dists = np.empty_like(ch_nodes, dtype=float)\n # dists from ch head, NOT drainage divide\n ch_dists[0] = 0.0\n np.cumsum(self._grid.length_of_d8[ch_links[:-1]], out=ch_dists[1:])\n return ch_dists",
"def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps_as_string(self)",
"def receive( self, channel, differential ):\n\t\tresponse = self._spi.xfer([1, channel | differential, 0])\n\t\t# Capture 11 bits (null bit + 10 bit result)\n\t\tvalue = (((response[1] & 0b11) << 8) | (response[2]))\n\n\t\treturn value",
"def get_existing_traces_by_channel(self) -> Dict[int, List[Tuple[int, str]]]:\n ret = {}\n for i in range(1, 9):\n traces = self.ask(f\"CALC{i}:PAR:CAT:EXT?\").strip('\"')\n if traces == \"NO CATALOG\":\n continue\n else:\n ret[i] = []\n traces = traces.split(',')\n names = traces[::2]\n params = traces[1::2]\n for n, p in zip(names, params):\n ret[i].append((int(n.split('_')[-1]), p))\n return ret",
"def feed_comparison(self, channel):\n comparison_results = []\n retval = []\n # Alert if tower is not in feed DB\n if (channel[\"cgi_str\"] not in self.bad_cgis and\n channel[\"cgi_str\"] not in self.cgi_whitelist and\n channel[\"cgi_str\"] not in self.good_cgis):\n comparison_results.append(self.check_channel_against_feed(channel))\n # Else, be willing to alert if channel is not in range\n if (channel[\"cgi_str\"] not in self.bad_cgis and\n channel[\"cgi_str\"] not in self.cgi_whitelist and\n channel[\"cgi_str\"] not in self.good_cgis):\n comparison_results.append(self.check_channel_range(channel))\n # Test for primary BTS change\n if channel[\"cell\"] == '0':\n comparison_results.append(self.process_cell_zero(channel))\n for result in comparison_results:\n if result != ():\n retval.append(result)\n if len(retval) == 0:\n if channel[\"cgi_str\"] not in self.good_cgis:\n self.good_cgis.append(channel[\"cgi_str\"])\n return retval",
"def calc_delta_thresh(self):\n threshold_vals = self.calc_threshold_vals()\n return np.array(threshold_vals - threshold_vals[0])",
"def get_keys( self, step=None, channel=None):\r\n if step==None:\r\n return self.channels[channel] if channel<self.nchannels else []\r\n if channel==None:\r\n return self.steps[step] if step<self.nsteps else []\r\n return self.steps[step][channel] if (steps<self.nsteps and channel<nchannels) else []",
"def calculate_distances(deltas: np.ndarray, sampling_freq_hz: float, c: float = 343) -> np.ndarray:\n conversion_factor = c / (2 * sampling_freq_hz)\n\n deltas_t = deltas.T\n\n k1 = deltas * np.eye(deltas.shape[0]) @ np.ones(deltas.shape)\n k2 = k1.T\n k = k1 + k2\n\n return conversion_factor * (np.abs(deltas - deltas_t) + k)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_taps_as_string(self) > string Return the taps as a formatted string for printing | def get_taps_as_string(self):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps_as_string(self) | [
"def get_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps_as_string(self)",
"def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps_as_string(self)",
"def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps_as_string(self)",
"def printable(self):\n\t\tif not self.is_set:\n\t\t\treturn(\" \")\n\n\t\ttoPrint = \"A: ({}, {}, {})\".format(self.node_A,self.port_A,self.app_id_A) + \" \"\n\t\ttoPrint += \"B: ({}, {}, {})\".format(self.node_B,self.port_B,self.app_id_B) + \" \"\n\t\ttoPrint = toPrint + \"Entanglement ID: \" + str(self.id_AB) + \" \"\n\t\ttoPrint = toPrint + \"Timestamp: \" + str(self.timestamp) + \" \"\n\t\ttoPrint = toPrint + \"Time of Goodness: \" + str(self.ToG) + \" \"\n\t\ttoPrint = toPrint + \"Goodness: \" + str(self.goodness) + \" \"\n\t\ttoPrint = toPrint + \"Directionality Flag: \" + str(self.DF)\n\t\treturn(toPrint)",
"def _stack_values_to_string(self, stack_values):\n\n strings = []\n for stack_value in stack_values:\n if self.solver.symbolic(stack_value):\n concretized_value = \"SYMBOLIC - %s\" % repr(stack_value)\n else:\n if len(self.solver.eval_upto(stack_value, 2)) == 2:\n concretized_value = repr(stack_value)\n else:\n concretized_value = repr(stack_value)\n strings.append(concretized_value)\n\n return \" .. \".join(strings)",
"def get_perfdata(self) -> str:\n return ' '.join([str(x) for x in self._perfdata])",
"def state_to_string(state):\n return ('i: \\t' + str(state[2][0]) + '\\t' + str(state[2][1]) + '\\n'\n 'v: \\t' + str(state[1][0]) + '\\t'+str(state[1][1]) + '\\n'\n 'o: \\t' + str(state[0][0]) + '\\t'+str(state[0][1]) + '\\n'\n 'h: \\t' + str(state[3][0]) + '\\t'+str(state[3][1]) + '\\n'\n 'p: \\t' + str(state[4][0]) + '\\t'+str(state[4][1]) + '\\n')",
"def attractorstring(self):\n attractorstring = \"\"\n _, attractor = RBN.get_cycle(self.nodes)\n for count, state in enumerate(attractor):\n attractorstring += str(count) + \" \" + str(state) + linesep\n return attractorstring",
"def do_str(self, indent):\n items = [\" \" * indent + \"{:32}\".format(self.name.split(_SEP)[-1]) + \" \" * (10 - indent) +\n \": {:8.4f} seconds\".format(self.time)]\n ctimers = sorted(self.children(), key=lambda t: t.name)\n if ctimers:\n indent += _INDENT\n self_time = self.time - self.child_time()\n items.append(\" \" * indent + \"{:32}\".format(\"self time\") + \" \" * (10 - indent) +\n \": {:8.4f} seconds\".format(self_time))\n for t in ctimers:\n items.append(t.do_str(indent))\n return \"\\n\".join(items)",
"def __str__(self):\n r = ''\n r += 'Timings:\\n' + \\\n '\\tOrdering:\\t\\t{}s\\n'.format(self.ordering_time) + \\\n '\\tConstruction:\\t{}s\\n'.format(self.construction_time) + \\\n '\\tMinimising:\\t{}s\\n'.format(self.minimising_time)\n r += 'Nodes:\\n' + \\\n '\\tNot minimized:\\t\\t{}\\n'.format(self.bdd_nodes) + \\\n '\\tMinimised:\\t\\t\\t{}'.format(self.min_bdd_nodes)\n return r",
"def __str__(self):\n if self.is_empty():\n return \"Stack is empty\"\n string_repr = \"Top of stack\\n===================\\n\"\n for node in reversed(self.data):\n string_repr += \"{{left_visited: {}, node: {}, right_visited: {}}}\".format(node[\"left_visited\"], node[\"node\"], node[\"right_visited\"])\n return string_repr",
"def __str__(self):\n text = \"Attractor \" + self.label + \"\\n\"\n text += \"\\tLength: \"+ str(len(self.states)) + \"\\n\"\n text += \"\\tBasin: \"+ str(self.basin) + \"\\n\"\n text += \"\\tWith nodes: \"+ ', '.join(self.node_names) + \"\\n\" \n text += \"\\tWith states: \"\n for a in self.states: text += \" -> \" + state_to_str(a)\n return text.strip()",
"def get_print(self):\n return ('Trip\\n\\tstart date: {}\\n\\tfinal date: {}\\n\\tgasoline: {}'.\n format(time.strftime(\"%Y.%m.%d %H:%M\",\n time.localtime(self.start_date)),\n time.strftime(\"%Y.%m.%d %H:%M\",\n time.localtime(self.end_date)),\n self.fuel))",
"def __str__(self):\n return '\\t'.join(self._flowgram.split())",
"def __str__(self):\n return str(self.stack)",
"def getPhasesAsString(self):\n result = \"\"\n for phase in self.iterPhases():\n result += \"%s\\n\" % str(phase)\n return result",
"def tprint(s):\n print(\"[\" + time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()) + \"] \" + s)",
"def __str__(self):\n import abjad\n items = [str(_) for _ in self]\n separator = ' '\n if self.item_class is abjad.NumberedPitchClass:\n separator = ', '\n return 'PC<{}>'.format(separator.join(items))",
"def as_str(terminals):\n return \"\".join(map(str, terminals))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_diff_taps_as_string(self) > string Return the derivative filter taps as a formatted string for printing | def get_diff_taps_as_string(self):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps_as_string(self) | [
"def get_diff_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps_as_string(self)",
"def get_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_taps_as_string(self)",
"def get_taps_as_string(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_taps_as_string(self)",
"def get_perfdata(self) -> str:\n return ' '.join([str(x) for x in self._perfdata])",
"def delta2str(td: timedelta) -> str:\n s = \"\"\n\n def build_s(v, suffix):\n nonlocal s\n if v > 0:\n s += f\"{v}{suffix}\"\n\n days_left, seconds_left = float(td.days), td.seconds\n y = int(days_left / DAYS_PER_YEAR)\n days_left -= y * DAYS_PER_YEAR\n build_s(y, \"y\")\n d = int(days_left)\n build_s(d, \"d\")\n seconds_left += int((days_left - d) * SEC_PER_HOUR * 24)\n build_s(int(seconds_left / SEC_PER_HOUR), \"h\")\n build_s(int(seconds_left % SEC_PER_HOUR), \"s\")\n return s",
"def generate_output(tree_diff):\n output_diff = []\n for diff in tree_diff:\n if diff['previous_value']:\n output_diff.append(\n '{0}{1}: {2}\\n{3}{4}: {5}\\n'.format(\n STATES['deleted'],\n diff['name'],\n diff['previous_value'],\n STATES['added'],\n diff['name'],\n diff['actual_value'],\n ),\n )\n else:\n output_diff.append(\n '{0}{1}: {2}\\n'.format(\n STATES[diff['state']],\n diff['name'],\n diff['actual_value'],\n ),\n )\n return ''.join(['{\\n'] + output_diff + ['}'])",
"def _fmt_tags(self):\n return \"\\t\".join(\n \"{}:{}:{}\".format(k, REV_TYPES.get(k), v) for k, v in self[-1].items()\n )",
"def __str__(self):\n return ''.join(list(self.signal_history))",
"def __repr__(self):\n s = ''\n if self.tags != []:\n s += f'{\".\".join(self.tags)}.'\n s += f'{self.transfer_number}'\n return s",
"def __repr__(self):\n k = self._k\n if k == 1:\n kth = 'First'\n elif k == 2:\n kth = 'Second'\n elif k == 3:\n kth = 'Third'\n else:\n kth = '%s-th'%k\n return \"%s derivative of %s\"%(kth, self._lseries)",
"def diff2str(diff):\n diff_str = str(diff)\n diff_str = diff_str.replace(\"\\n---\", _(\"\\n\\n::\\n\\n\"), 1)\n diff_str = diff_str.replace(\"\\nlhs\", _(\"\\n\\nAvant\")).replace(\"\\nrhs\", _(\"\\n\\nAprès\"))\n diff_str = diff_str.replace(\"\\nfile added in rhs\", \"\")\n diff_str = diff_str.replace(\"\\nfile deleted in rhs\", _(\"\\n\\n::\\n\\nFichier supprimé\"))\n diff_str = diff_str.replace(\"\\nfile renamed from \", _(\"\\n\\n::\\n\\nFichier renommé, à l'origine \"))\n diff_str = diff_str.replace(\"\\nfile renamed to \", _(\"\\n\\n::\\n\\nFichier renommé en \"))\n diff_str = diff_str.replace(\"\\nOMITTED BINARY DATA\", _(\"\\n\\n::\\n\\nDonnées binaires omises\"))\n\n double_colon_idx = diff_str.find(\"::\")\n diff_str = diff_str[:double_colon_idx] + diff_str[double_colon_idx:].replace(\"\\n\", \"\\n \")\n\n return diff_str",
"def _stack_values_to_string(self, stack_values):\n\n strings = []\n for stack_value in stack_values:\n if self.solver.symbolic(stack_value):\n concretized_value = \"SYMBOLIC - %s\" % repr(stack_value)\n else:\n if len(self.solver.eval_upto(stack_value, 2)) == 2:\n concretized_value = repr(stack_value)\n else:\n concretized_value = repr(stack_value)\n strings.append(concretized_value)\n\n return \" .. \".join(strings)",
"def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_diff_taps(self)",
"def get_diff_taps(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_diff_taps(self)",
"def to_string(self):\n string = []\n\n if isinstance(self.weights, list): # This State is belong to dur model, print name only\n string.append(\"~s\" + ' \"' + self.name + '\"')\n for ste in self.pdf:\n if ste:\n string.append(ste.to_string())\n\n if \"\" in string:\n string.remove(\"\")\n\n return \"\\n\".join(string)",
"def dxfstr(self) -> str:\n return ''.join(tag.dxfstr() for tag in self.dxftags())",
"def attractorstring(self):\n attractorstring = \"\"\n _, attractor = RBN.get_cycle(self.nodes)\n for count, state in enumerate(attractor):\n attractorstring += str(count) + \" \" + str(state) + linesep\n return attractorstring",
"def __str__(self):\n r = ''\n r += 'Timings:\\n' + \\\n '\\tOrdering:\\t\\t{}s\\n'.format(self.ordering_time) + \\\n '\\tConstruction:\\t{}s\\n'.format(self.construction_time) + \\\n '\\tMinimising:\\t{}s\\n'.format(self.minimising_time)\n r += 'Nodes:\\n' + \\\n '\\tNot minimized:\\t\\t{}\\n'.format(self.bdd_nodes) + \\\n '\\tMinimised:\\t\\t\\t{}'.format(self.min_bdd_nodes)\n return r",
"def toString(self):\n ln0 = Interval.toString(self)\n ln1 = \"Tone Frequency = %d Hz \\n\"%self.freq\n ln2 = \"freqType= %s\"%self.freqType #raj-change for freq\n return ln0+ln1+ln2 #raj- added ln2"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set_loop_bandwidth(self, float bw) Set the loop bandwidth. Set the loop filter's bandwidth to . This should be between 2pi/200 and 2pi/100 (in rads/samp). It must also be a positive number. When a new damping factor is set, the gains, alpha and beta, of the loop are recalculated by a call to update_gains(). | def set_loop_bandwidth(self, *args, **kwargs):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_loop_bandwidth(self, *args, **kwargs) | [
"def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_loop_bandwidth(self, *args, **kwargs)",
"def set_bandwidth(self, bandwidth):\r\n self.obs.bandwidthHz = float(bandwidth)\r\n self.ave.bandwidthHz = float(bandwidth)\r\n self.hot.bandwidthHz = float(bandwidth)\r\n self.cold.bandwidthHz = float(bandwidth)\r\n self.ref.bandwidthHz = float(bandwidth)\r\n deltaNu = self.obs.bandwidthHz/float(self.vlen)\r\n n0 = self.obs.centerFreqHz - (self.obs.bandwidthHz/2.)\r\n nu = n0\r\n if len(self.ave.xdata) != self.vlen:\r\n self.update_len(self.ave)\r\n if len(self.hot.xdata) != self.vlen:\r\n self.update_len(self.hot)\r\n if len(self.cold.xdata) != self.vlen:\r\n self.update_len(self.cold)\r\n if len(self.ref.xdata) != self.vlen:\r\n self.update_len(self.ref)\r\n print(\"Setting Bandwidth: %10.0f Hz\" % (self.obs.bandwidthHz))\r\n for iii in range(self.vlen):\r\n self.obs.xdata[iii] = nu\r\n self.ave.xdata[iii] = nu\r\n self.hot.xdata[iii] = nu\r\n self.cold.xdata[iii] = nu\r\n self.ref.xdata[iii] = nu\r\n nu = nu + deltaNu",
"def set_wifi_bandwidth(self, bandwidth):\n if int(bandwidth) == 20:\n cmd = \"channel width 20\"\n elif int(bandwidth) == 40:\n cmd = \"channel width 40-Above\"\n else:\n raise Exception(-5,\n \"Unsupported wifi bandwidth '%s'.\" % str(bandwidth))\n for radio in self.WIFI_RADIOS:\n self._send_cmd(\"interface dot11radio \" + str(radio))\n self._send_cmd(cmd)\n self._send_cmd(\"exit\")",
"def get_loop_bandwidth(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_loop_bandwidth(self)",
"def get_loop_bandwidth(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_loop_bandwidth(self)",
"def set_bandwidth(self, out_bw, in_bw):\n self.m_outbound_bw = out_bw\n self.m_inbound_bw = in_bw",
"def set_bw(self, bw):\n return _radio_astro_swig.detect_sptr_set_bw(self, bw)",
"def set_bandwidth_limit(self, value='BWFULL'):\n #CMD$=“BWL C1,ON”\n print debug_msg.TBD_MSG",
"def set_bandwidths_and_cutoffs(self, bandwidths):\n self.logger.info(\"set_bandwidths_and_cutoffs with bandwidths=%s\", str(bandwidths))\n if not hasattr(bandwidths, '__iter__'):\n bandwidths = [bandwidths] * self.ndim\n\n if len(bandwidths) != self.ndim:\n raise AttributeError(\"Number of supplied bandwidths does not match the dimensionality of the data\")\n\n self.bandwidths = bandwidths\n # compute cutoffs directly from the kernel class method\n # if tol is None, only non-arbitrary cutoffs are applied\n self.cutoffs = self.ktype.compute_cutoffs(bandwidths, tol=self.cutoff_tol)\n # if kernels have already been set, update them\n if len(self.kernels):\n for k in self.kernels:\n k.update_bandwidths(self.bandwidths, time_cutoff=self.cutoffs[0])",
"def set_freq_damping(self, freq, damping):\n self.k = freq * freq * 4 * math.pi * math.pi\n self.b = 2 * math.sqrt(self.k) * damping\n return",
"def set_bw(self, bw):\n return _radio_astro_swig.detect_set_bw(self, bw)",
"def setWavelength(self, wl, block=False):\r\n self['WAVELENGTH'] = int(wl)\r\n if block:\r\n while True:\r\n if not self.isTuning():\r\n break\r\n time.sleep(0.1)\r\n print(\"still tuning\")",
"def SetPassBand(self, *args) -> \"void\":\n return _itkFrequencyBandImageFilterPython.itkFrequencyBandImageFilterICF2_SetPassBand(self, *args)",
"def set_if_bandwidth(instrument, if_bandwidth, window_num=1, channel_num=1):\n command = ':SENSe%s:BANDwidth:RESolution %G HZ' % (window_num, if_bandwidth)\n instrument.write(command)",
"def keyTcsBandwidth(self, key, data):\n self.sb_bandwidth = float(data[key])",
"def compute_bandwidth(self):\n self._bw, self._covariance = kde_methods.compute_bandwidth(self)",
"def SetPassBand(self, *args) -> \"void\":\n return _itkFrequencyBandImageFilterPython.itkFrequencyBandImageFilterICF3_SetPassBand(self, *args)",
"def blockJobSetSpeed(self, disk, bandwidth, flags=0):\n ret = libvirtmod.virDomainBlockJobSetSpeed(self._o, disk, bandwidth, flags)\n if ret == -1: raise libvirtError ('virDomainBlockJobSetSpeed() failed', dom=self)\n return ret",
"def setThrottleChannel(self, channel: int):\n self.axes[self.Axis.kThrottle] = channel"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set_damping_factor(self, float df) Set the loop damping factor. Set the loop filter's damping factor to . The damping factor should be sqrt(2)/2.0 for critically damped systems. Set it to anything else only if you know what you are doing. It must be a number between 0 and 1. When a new damping factor is set, the gains, alpha and beta, of the loop are recalculated by a call to update_gains(). | def set_damping_factor(self, *args, **kwargs):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_damping_factor(self, *args, **kwargs) | [
"def set_freq_damping(self, freq, damping):\n self.k = freq * freq * 4 * math.pi * math.pi\n self.b = 2 * math.sqrt(self.k) * damping\n return",
"def get_damping_factor(self):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_damping_factor(self)",
"def setFlyingVelocityMod(self, flying):\n getHandle().setFlyingVelocityMod(flying)",
"def get_damping_factor(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_damping_factor(self)",
"def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_loop_bandwidth(self, *args, **kwargs)",
"def velocity_damping(self, kpar):\n return (1.0 + (kpar * self.sigma_v(self.ps_redshift))**2.)**-1.",
"def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_loop_bandwidth(self, *args, **kwargs)",
"def setDopplerFactor(self, factor):\n self.audio_manager.audio3dSetDopplerFactor(factor)",
"def set_permeability(self):\n # Viscosity has units of Pa s, and is consequently divided by the scalar scale.\n viscosity = self.fluid.dynamic_viscosity() / self.scalar_scale\n gb = self.gb\n key = self.scalar_parameter_key\n from_iterate = True\n blocking_perm = self.params.get(\"blocking_perm\", None)\n for g, d in gb:\n if g.dim < self.Nd:\n # Set fracture permeability\n specific_volumes = self.specific_volumes(g, from_iterate)\n\n if d[\"node_number\"] == 1 or blocking_perm is None:\n # Use cubic law in fractures. First compute the unscaled\n # permeability\n apertures = self.aperture(g, from_iterate=from_iterate)\n apertures_unscaled = apertures * self.length_scale\n k = np.power(apertures_unscaled, 2) / 12 / viscosity\n else:\n # Blocking and intersection\n k = blocking_perm\n d[pp.PARAMETERS][key][\"perm_nu\"] = k\n # Multiply with the cross-sectional area\n k = k * specific_volumes\n # Divide by fluid viscosity and scale back\n kxx = k / self.length_scale ** 2\n\n else:\n # Use the rock permeability in the matrix\n kxx = (\n self.rock.PERMEABILITY\n / viscosity\n * np.ones(g.num_cells)\n / self.length_scale ** 2\n )\n K = pp.SecondOrderTensor(kxx)\n d[pp.PARAMETERS][key][\"second_order_tensor\"] = K\n\n # Normal permeability inherited from the neighboring fracture g_l\n for e, d in gb.edges():\n mg = d[\"mortar_grid\"]\n g_l, _ = gb.nodes_of_edge(e)\n data_l = gb.node_props(g_l)\n a = self.aperture(g_l, from_iterate)\n V = self.specific_volumes(g_l, from_iterate)\n # We assume isotropic permeability in the fracture, i.e. the normal\n # permeability equals the tangential one\n k_s = data_l[pp.PARAMETERS][key][\"second_order_tensor\"].values[0, 0]\n # Division through half the aperture represents taking the (normal) gradient\n kn = mg.slave_to_mortar_int() * np.divide(k_s, a * V / 2)\n pp.initialize_data(mg, d, key, {\"normal_diffusivity\": kn})",
"def update(self,lr):\n self.sample_minibatch(lr)\n # Calculate gradients at current point\n dlogbeta = lr.dlogpost(self)\n lr.grad_sample[self.iter-1,:] = dlogbeta\n\n # Update parameters using SGD\n eta = np.random.normal( scale = self.epsilon )\n lr.beta += self.epsilon / 2 * dlogbeta + eta",
"def setGravity( self, direction, accel ):\n self.gravangle = direction\n self.gravaccel = accel\n for body in self.bodies:\n body.setGravity( direction, accel )\n return",
"def update_params(self): # computes gradient descent\n self.W=self.W-(self.rate*self.dW)\n self.b=self.b-(self.rate*self.db)",
"def set_de_jager_wind_efficiency(self, index_of_the_star, de_jager_wind_efficiency):\n return self.set_control(index_of_the_star,'de_jager_scaling_factor',de_jager_wind_efficiency)",
"def add_friction(self,mu=0):\n self.fric_coeff=mu",
"def update_parameters(self, params):\n self.tbf.update_lengthscales(np.exp(params[:self.D])) # update TBF lengthscales\n self.tbf.update_amplitude(np.exp(2*params[self.D])) # update TBF amplitude\n self.var_n = np.exp(2*params[self.D + 1]) # update noise variance\n self.tbf.update_frequencies(params[self.D + 2:]) # update the TBF spectral frequencies",
"def apply_impulse(self, p):\n\t\tself.force=p",
"def lr_decay(self):\n\t\tself.lr = self.lr * self.gamma",
"def set_detachment_generalized_fqs_law(self,\n KB=1e-4,\n KR=1e-4,\n MB=0.5,\n NB=1,\n BETA=1):\n self.parameters['OPTNOFLUVIAL'] = 0\n self.parameters['DETACHMENT_LAW'] = 3\n self.parameters['KB'] = KB\n self.parameters['KR'] = KR\n self.parameters['MB'] = MB\n self.parameters['NB'] = NB\n self.parameters['BETA'] = BETA",
"def __init__(self, config):\n if 'lr_decay_rate' not in config.keys():\n raise ValueError(\"Missing 'lr_decay_rate' from config\")\n self.k = config['lr_decay_rate']\n super(IterationDecay, self).__init__(\n update_granularity='step', config=config)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set_max_rate_deviation(self, float m) Set the maximum deviation from 0 d_rate can have | def set_max_rate_deviation(self, *args, **kwargs):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_max_rate_deviation(self, *args, **kwargs) | [
"def set_max_rate_deviation(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_max_rate_deviation(self, *args, **kwargs)",
"def error_rate_deviation(self, error_rate_deviation):\n\n self._error_rate_deviation = error_rate_deviation",
"def property_max_rate(self, property_max_rate):\n\n self._property_max_rate = property_max_rate",
"def setMaxPeriod(self, maxPeriod):\n hal.setCounterMaxPeriod(self.counter, float(maxPeriod))",
"def maxRate(rate):\n if rate == 'None' or rate is None:\n rate = 0\n else:\n try:\n rate = int(rate)\n except:\n return getRate()\n\n if rate < 0:\n rate = 0\n \n info('Resetting MAX_RATE to: ' + str(rate) + 'KB/s')\n \n rate = rate * 1024\n \n restartCheckRead = False\n if rate == 0:\n if Hellanzb.ht.unthrottleReadsID is not None and \\\n not Hellanzb.ht.unthrottleReadsID.cancelled and \\\n not Hellanzb.ht.unthrottleReadsID.called:\n Hellanzb.ht.unthrottleReadsID.cancel()\n\n if Hellanzb.ht.checkReadBandwidthID is not None and \\\n not Hellanzb.ht.checkReadBandwidthID.cancelled:\n Hellanzb.ht.checkReadBandwidthID.cancel()\n Hellanzb.ht.unthrottleReads()\n elif Hellanzb.ht.readLimit == 0 and rate > 0:\n restartCheckRead = True\n \n Hellanzb.ht.readLimit = rate\n\n if restartCheckRead:\n Hellanzb.ht.readThisSecond = 0 # nobody's been resetting this value\n reactor.callLater(1, Hellanzb.ht.checkReadBandwidth)\n return getRate()",
"def decay_rate_SM(mp, ml): \n \n drate= mp* ml*ml*(1-ml*ml/(mp*mp))*(1-ml*ml/(mp*mp))/(1*8*np.pi);#2*8*3.1415\n\n return drate",
"def max_rate(self) -> float:\n type_info = cast(\n EventableStateVariableTypeInfo, self._state_variable_info.type_info\n )\n return type_info.max_rate or 0.0",
"def get_sample_clock_max_rate (self):\n d = float64(0)\n CALL ('GetSampClkMaxRate', self, ctypes.byref(d))\n return d.value",
"def error_rate100_deviation(self, error_rate100_deviation):\n\n self._error_rate100_deviation = error_rate100_deviation",
"def setStdDev(self, sigma) -> None:\n ...",
"def fulltext_max_rate(self, fulltext_max_rate):\n\n self._fulltext_max_rate = fulltext_max_rate",
"def set_amax(self, value):\n assert 0 <= value <= 1, 'Invalid scale factor value'\n self._amax = value",
"def error_rate50_deviation(self, error_rate50_deviation):\n\n self._error_rate50_deviation = error_rate50_deviation",
"def density_deviation(self, density_deviation):\n\n self._density_deviation = density_deviation",
"def max_sample_value(self, max_sample_value):\n self._max_sample_value = max_sample_value",
"def get_convert_max_rate(self):\n d = float64(0)\n CALL ('GetAIConvMaxRate', self, ctypes.byref(d))\n return d.value",
"def mag_rate(self):\n return self._mag_rate",
"def setMaxPeriod(self, maxPeriod):\n hal.setEncoderMaxPeriod(self.encoder, maxPeriod)",
"def set_yaw_limit(self, max_yaw_rate):\n for m in self._mux:\n m.max_yaw_rate = max_yaw_rate"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_loop_bandwidth(self) > float Returns the loop bandwidth. | def get_loop_bandwidth(self):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_loop_bandwidth(self) | [
"def get_loop_bandwidth(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_loop_bandwidth(self)",
"def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_set_loop_bandwidth(self, *args, **kwargs)",
"def set_loop_bandwidth(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_loop_bandwidth(self, *args, **kwargs)",
"def _get_bandwidth(self):\n return self.__getter('get_bandwidth')",
"def get_maxbandwidth(self):\n return self.options['maxbandwidth']",
"def compute_bandwidth(self):\n self._bw, self._covariance = kde_methods.compute_bandwidth(self)",
"def get_bandwidth(self, fingerprint):\n\n try:\n desc = self.control.get_server_descriptor(fingerprint)\n return desc.observed_bandwidth / 1000\n except stem.ControllerError:\n return 0",
"def compute_bw_efficiency(self):\n comm_len = timedelta(microseconds=0)\n for txop in self.txops:\n comm_len += txop.stop_usec - txop.start_usec\n\n bw_eff = comm_len / timedelta(microseconds=100000)\n return bw_eff",
"def shaping_peak_bandwidth(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"shaping_peak_bandwidth\")",
"def bandwidthAvg(self):\n raise NotImplemented # TODO",
"def shaping_peak_bandwidth(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"shaping_peak_bandwidth\")",
"def bandwidth_control(self):\n ret = self._get_attr(\"bandwidthControl\")\n return IBandwidthControl(ret)",
"def time_bandwidth_product(self):\r\n return (lib.standard_deviation(self.t, self.intensity) *\r\n lib.standard_deviation(self.w, self.spectral_intensity))",
"def calc_bandwidth(self):\n if self.stream_data[-1][2]!= 0:\n download_time = self.stream_data[-1][0]-self.stream_data[-2][0]\n estimated_bandwidth = self.stream_data[-1][2]/download_time\n self.stream_data[-1].append(estimated_bandwidth)\n elif self.req==1:\n for i in range(1,len(self.stream_data)):\n if self.stream_data[-i][2] !=0:\n download_time = self.stream_data[-i][0]-self.stream_data[-i-1][0]\n estimated_bandwidth = self.stream_data[-i][2]/download_time\n self.stream_data[-1].append(estimated_bandwidth)\n break\n elif self.stream_data[-1][2] == -1:\n self.stream_data[-1].append(-1)\n else:\n self.stream_data[-1].append(self.stream_data[-2][3])\n self.stream_data[-1].append(self.quali_req)",
"def shaping_average_bandwidth(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"shaping_average_bandwidth\")",
"def getSampleRate(self) -> \"int\":\n return _coin.SoVRMLAudioClip_getSampleRate(self)",
"def _get_channel_speed(self):\n return self.__channel_speed",
"def get_speed(self):\n if self.speed and self.period:\n return self.speed / 1024\n else:\n return 0",
"def bitrate(self):\n b = 0\n if 'bit_rate' in self.__dict__:\n try:\n b = int(self.__dict__['bit_rate'])\n except Exception as e:\n pass\n return b"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_damping_factor(self) > float Returns the loop damping factor. | def get_damping_factor(self):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_damping_factor(self) | [
"def get_damping_factor(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_damping_factor(self)",
"def velocity_damping(self, kpar):\n return (1.0 + (kpar * self.sigma_v(self.ps_redshift))**2.)**-1.",
"def set_damping_factor(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_damping_factor(self, *args, **kwargs)",
"def stiffness(self):\n return self.force()*self.b",
"def getDopplerVelocity(self) -> \"float\":\n return _coin.SoVRMLSound_getDopplerVelocity(self)",
"def set_freq_damping(self, freq, damping):\n self.k = freq * freq * 4 * math.pi * math.pi\n self.b = 2 * math.sqrt(self.k) * damping\n return",
"def getDopplerFactor(self) -> \"float\":\n return _coin.SoVRMLSound_getDopplerFactor(self)",
"def speedMultiplier(self) -> float:\n return self._getMultiplier('speed')",
"def damping_coefficient(self, damping_coefficient):\n if (self.local_vars_configuration.client_side_validation and\n damping_coefficient is not None and damping_coefficient > 10000000): # noqa: E501\n raise ValueError(\"Invalid value for `damping_coefficient`, must be a value less than or equal to `10000000`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n damping_coefficient is not None and damping_coefficient < 0): # noqa: E501\n raise ValueError(\"Invalid value for `damping_coefficient`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._damping_coefficient = damping_coefficient",
"def acceleration(self):\n if self.state.lightning:\n return self.character.acceleration / 2\n else:\n return self.character.acceleration",
"def pollutionMultiplier(self) -> float:\n return self._getMultiplier('pollution')",
"def calc_blade_friction_force():\r\n # return c_a * d * w\r\n return 0",
"def getDamageMultiplier(self):\n return 1.0",
"def initial_dose(self) -> float:\n return self.__Initial_dose",
"def accel(self):\n return self.force()/self.mass",
"def angular_velocity(self):\n return 0.0",
"def _partial_penalty_factor(self) -> float:\n penalty = ((self.time_penalty_function()) ** self.alpha) * self.n_of_objectives\n if penalty < 0:\n penalty = 0\n if penalty > 1:\n penalty = 1\n return penalty",
"def elo_k_decay_val(self) -> float:\n return self._elo_k_decay_var",
"def dv(self):\n return self.accel()*self.__dt"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get_clock_rate(self) > float Returns the current clock rate. | def get_clock_rate(self):
return _digital_swig.digital_pfb_clock_sync_fff_sptr_get_clock_rate(self) | [
"def get_clock_rate(self):\n return _digital_swig.digital_pfb_clock_sync_ccf_sptr_get_clock_rate(self)",
"def get_sample_clock_rate(self):\n d = float64(0)\n CALL ('GetSampClkRate', self, ctypes.byref(d))\n return d.value",
"def get_convert_clock_rate(self):\n d = float64(0)\n CALL ('GetAIConvRate', self, ctypes.byref(d))\n return d.value",
"def getRate(self):\n return self.distancePerPulse / self.getPeriod()",
"def get_rate(self) -> float:\n return self._count / (time.time() - self._start_time)",
"def sample_rate(self) -> float:\n return self._rate",
"def rate(self):\n return self._rate",
"def get_sample_clock_max_rate (self):\n d = float64(0)\n CALL ('GetSampClkMaxRate', self, ctypes.byref(d))\n return d.value",
"def getScheduleRate(self):\n schedule_rate = DPxGetDoutSchedRate()\n return schedule_rate[0]",
"def relative_rate(self):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr_relative_rate(self)",
"def frame_rate(self):\n return self._frame_rate",
"def getRate(self):\n return hal.getEncoderRate(self.encoder)",
"def flow_rate_flow(self):\n flow_rate = (math.pi * self.diameter**2 * self.length_per_minute()) / 4\n return flow_rate",
"def sweep_rate(self):\n return float(self.query('R9')[1:])",
"def Rate(self):\n freq = float(rospy.get_param('/publish_freq', None))\n\n if freq is None:\n raise RuntimeError(\"No Frequency has been set by the driving node..\")\n elif freq == 0.0:\n rospy.logwarn(\"Simulator running as fast as possible. Returning rate of 1000\")\n else:\n return rospy.Rate(freq)",
"def sample_rate(self) -> int:\n logger.debug(\"'self._sample_rate' is set to %.1f [Hz].\", self._sample_rate)\n return self._sample_rate",
"def get_speed(self):\n if self.speed and self.period:\n return self.speed / 1024\n else:\n return 0",
"def RxFilterRate(self):\n if self.force_auto_sync:\n self.get('RxFilterRate')\n return self._RxFilterRate",
"def get_rate(self):\r\n command = \":scan:rate?\\n\"\r\n self._log_write(command, mode=\"write\")\r\n self.ser.write(command)\r\n answer = self.ser.read(3)\r\n self._log_write(answer, mode=\"read\")\r\n rlvalue = float(answer[:-2])\r\n self.Stat = self.Stat._replace(rate=rlvalue)\r\n return rlvalue"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
pfb_clock_sync_fff(double sps, float loop_bw, __dummy_4__ taps, unsigned int filter_size = 32, float init_phase = 0, float max_rate_deviation = 1.5, int osps = 1) > digital_pfb_clock_sync_fff_sptr Timing synchronizer using polyphase filterbanks. This block performs timing synchronization for PAM signals by minimizing the derivative of the filtered signal, which in turn maximizes the SNR and minimizes ISI. This approach works by setting up two filterbanks; one filterbank contains the signal's pulse shaping matched filter (such as a root raised cosine filter), where each branch of the filterbank contains a different phase of the filter. The second filterbank contains the derivatives of the filters in the first filterbank. Thinking of this in the time domain, the first filterbank contains filters that have a sinc shape to them. We want to align the output signal to be sampled at exactly the peak of the sinc shape. The derivative of the sinc contains a zero at the maximum point of the sinc (sinc(0) = 1, sinc(0)' = 0). Furthermore, the region around the zero point is relatively linear. We make use of this fact to generate the error signal. | def pfb_clock_sync_fff(*args, **kwargs):
return _digital_swig.pfb_clock_sync_fff(*args, **kwargs) | [
"def fn_buildFilters(params, fs):\n bandPassRange = params.bpRanges\n params.filtType = 'bandpass'\n params.filterSignal = True\n \n # Handle different filter cases:\n # 1) low pass\n if params.bpRanges[0] == 0:\n # they only specified a top freqency cutoff, so we need a low pass\n # filter\n bandPassRange = params.bpRanges[1]\n params.filtType = 'low'\n if bandpassRange == fs/2:\n # they didn't specify any cutoffs, so we need no filter\n params.filterSignal = False\n \n # 2) High passs\n if params.bpRanges[1] == fs/2 and params.filterSignal:\n # they only specified a lower freqency cutoff, so we need a high pass\n # filter\n bandPassRange = params.bpRanges[0]\n params.filtType = 'high'\n \n if params.filterSignal:\n params.fB, params.fA = signal.butter(params.filterOrder, bandPassRange/(fs/2),btype=params.filtType)\n \n # filtTaps = length(fB)\n previousFs = fs\n \n params.fftSize = int(math.ceil(fs * params.frameLengthUs / 10**6))\n if params.fftSize % 2 == 1:\n params.fftSize = params.fftSize - 1 # Avoid odd length of fft\n\n params.fftWindow = signal.windows.hann(params.fftSize)\n\n lowSpecIdx = int(params.bpRanges[0]/fs*params.fftSize)\n highSpecIdx = int(params.bpRanges[1]/fs*params.fftSize)\n\n params.specRange = np.arange(lowSpecIdx, highSpecIdx+1)\n params.binWidth_Hz = fs / params.fftSize\n params.binWidth_kHz = params.binWidth_Hz / 1000\n params.freq_kHz = params.specRange*params.binWidth_kHz # calculate frequency axis\n return previousFs, params",
"def main():\n # Create a new instance of a SavitzkyGolayFilter filter, setting the number of left hand and right hand points to 15\n sgf = GRT.SavitzkyGolayFilter(15, 15)\n\n # Create some variables to help generate the signal data\n num_seconds = 6 # The number of seconds of data we want to generate\n t = 0 # This keeps track of the time\n t_step = 1.0 / 1000.0 # This is how much the time will be updated at each iteration in the for loop\n\n # Add the freq rates\n # The first value is the time in seconds and the second value is the frequency that should be set at that time\n freq_rates = {0: 0.1, 1: 0.5, 2: 1, 3: 2, 4: 4, 5: 8, 6: 16}\n\n # Generate the signal and filter the data\n for i in range(num_seconds * 1000):\n # Check to see if we should update the freq rate to the next value\n # Set the new frequency value\n freq = [v for (k, v) in freq_rates.items() if k > (i / 1000)][0]\n\n # Generate the signal\n signal = math.sin(t * math.tau * freq)\n\n # Filter the signal\n filtered_value = sgf.filter(signal)\n\n # Print the signal and the filtered data\n print(\"%.3f %.3f %.3f\" % (freq, signal, filtered_value))\n\n # Update the t\n t += t_step\n\n # Save the HighPassFilter settings to a file\n sgf.save(\"SavitzkyGolayFilterSettings.grt\")\n\n # We can then load the settings later if needed\n sgf.load(\"SavitzkyGolayFilterSettings.grt\")",
"def mfccInitFilterBanks(fs, nfft):\n\n # filter bank params:\n lowfreq = 133.33\n linsc = 200/3.\n logsc = 1.0711703\n numLinfilterTotal = 13\n numLogFilt = 27\n\n if fs < 8000:\n nlogfil = 5\n\n # Total number of filters\n nFiltTotal = numLinfilterTotal + numLogFilt\n\n # Compute frequency points of the triangle:\n freqs = np.zeros(nFiltTotal+2)\n freqs[:numLinfilterTotal] = lowfreq + np.arange(numLinfilterTotal) * linsc\n freqs[numLinfilterTotal:] = freqs[numLinfilterTotal-1] * logsc ** np.arange(1, numLogFilt + 3)\n heights = 2./(freqs[2:] - freqs[0:-2])\n\n # Compute filterbank coeff (in fft domain, in bins)\n fbank = np.zeros((nFiltTotal, nfft))\n nfreqs = np.arange(nfft) / (1. * nfft) * fs\n\n for i in range(nFiltTotal):\n lowTrFreq = freqs[i]\n cenTrFreq = freqs[i+1]\n highTrFreq = freqs[i+2]\n\n lid = np.arange(np.floor(lowTrFreq * nfft / fs) + 1, np.floor(cenTrFreq * nfft / fs) + 1, dtype=np.int)\n lslope = heights[i] / (cenTrFreq - lowTrFreq)\n rid = np.arange(np.floor(cenTrFreq * nfft / fs) + 1, np.floor(highTrFreq * nfft / fs) + 1, dtype=np.int)\n rslope = heights[i] / (highTrFreq - cenTrFreq)\n fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq)\n fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid])\n\n return fbank, freqs",
"def pilot_pll(xr, fq, fs, loop_type, bn, zeta):\n T = 1 / float(fs)\n # Set the VCO gain in Hz/V \n Kv = 1.0\n # Design a lowpass filter to remove the double freq term\n Norder = 5\n b_lp, a_lp = signal.butter(Norder, 2 * (fq / 2.) / float(fs))\n fstate = np.zeros(Norder) # LPF state vector\n\n Kv = 2 * np.pi * Kv # convert Kv in Hz/v to rad/s/v\n\n if loop_type == 1:\n # First-order loop parameters\n fn = bn\n Kt = 2 * np.pi * fn # loop natural frequency in rad/s\n elif loop_type == 2:\n # Second-order loop parameters\n fn = 1 / (2 * np.pi) * 2 * bn / (zeta + 1 / (4 * zeta)) # given Bn in Hz\n Kt = 4 * np.pi * zeta * fn # loop natural frequency in rad/s\n a = np.pi * fn / zeta\n else:\n print('Loop type must be 1 or 2')\n\n # Initialize integration approximation filters\n filt_in_last = 0\n filt_out_last = 0\n vco_in_last = 0\n vco_out = 0\n vco_out_last = 0\n\n # Initialize working and final output vectors\n n = np.arange(0, len(xr))\n theta = np.zeros(len(xr))\n ev = np.zeros(len(xr))\n phi_error = np.zeros(len(xr))\n # Normalize total power in an attemp to make the 19kHz sinusoid\n # component have amplitude ~1.\n # xr = xr/(2/3*std(xr));\n # Begin the simulation loop\n for kk in range(len(n)):\n # Sinusoidal phase detector (simple multiplier)\n phi_error[kk] = 2 * xr[kk] * np.sin(vco_out)\n # LPF to remove double frequency term\n phi_error[kk], fstate = signal.lfilter(b_lp, a_lp, np.array([phi_error[kk]]), zi=fstate)\n pd_out = phi_error[kk]\n # pd_out = 0\n # Loop gain\n gain_out = Kt / Kv * pd_out # apply VCO gain at VCO\n # Loop filter\n if loop_type == 2:\n filt_in = a * gain_out\n filt_out = filt_out_last + T / 2. * (filt_in + filt_in_last)\n filt_in_last = filt_in\n filt_out_last = filt_out\n filt_out = filt_out + gain_out\n else:\n filt_out = gain_out\n # VCO\n vco_in = filt_out + fq / (Kv / (2 * np.pi)) # bias to quiescent freq.\n vco_out = vco_out_last + T / 2. * (vco_in + vco_in_last)\n vco_in_last = vco_in\n vco_out_last = vco_out\n vco_out = Kv * vco_out # apply Kv\n # Measured loop signals\n ev[kk] = filt_out\n theta[kk] = np.mod(vco_out, 2 * np.pi); # The vco phase mod 2pi\n return theta, phi_error",
"def filter_fir_shared(clock, reset, x, y, b):\n assert isinstance(x, Samples)\n assert isinstance(y, Samples)\n\n ntaps = len(b)\n scnt = Signal(intbv(ntaps+1, min=0, max=ntaps+2))\n pmax = x.data.max * x.data.max\n sop = Signal(intbv(0, min=-pmax, max=pmax))\n scale = int(len(x.data)-1)\n\n xd = [Signal(intbv(0, min=x.data.min, max=x.data.max))\n for _ in range(len(b))]\n\n @always_seq(clock.posedge, reset=reset)\n def beh_sop():\n y.valid.next = False\n if scnt == ntaps+1 and x.valid:\n # tap update loop\n xd[0].next = x.data\n for ii in range(1, len(b)-1):\n xd[ii].next = xd[ii-1]\n # compute the first product \n c = b[0]\n sop.next = c * x.data\n scnt.next = 1\n elif scnt == ntaps:\n assert not x.valid\n y.data.next = sop >> scale\n y.valid.next = True\n scnt.next = scnt + 1\n elif scnt < ntaps:\n assert not x.valid\n c = b[scnt]\n sop.next = sop + c * xd[scnt]\n scnt.next = scnt + 1\n\n return hdl.instances()",
"def pink_tsp():\n \n import primes\n import cmath\n import numpy as np\n from scipy.io import wavfile\n from utility import float2pcm\n \n # User settings\n dur = 5 # length of signal (seconds)\n fs = 48000 # number of samples per second (Hz)\n nbits = 16 # number of bits per sample (bit)\n reps = 4 # number of repeated measurements (times)\n \n N = 2 ** (nextpow2(dur * fs))\n m = primes.primes_below(N / 4)\n m = m[-1]\n \n a = 2 * m * np.pi / ((N / 2) * np.log(N / 2))\n j = cmath.sqrt(-1)\n pi = (cmath.log(-1)).imag\n \n H = np.array([1])\n H = np.hstack(\n [H, np.exp(j * a * np.arange(1, N / 2 + 1) * np.log(np.arange(1, N / 2 + 1))) / np.sqrt(np.arange(1, N / 2 + 1))])\n H = np.hstack([H, np.conj(H[int((N / 2 - 1)):0:-1])])\n h = (np.fft.ifft(H)).real\n mvBefore = np.abs(h)\n mv = min(mvBefore)\n mi = np.where(mvBefore == mvBefore.min())\n mi = int(mi[0])\n h = np.hstack([h[mi:len(h)], h[0:mi]])\n h = h[::-1]\n \n Hinv = np.array([1])\n Hinv = np.hstack([Hinv, np.exp(j * a * np.arange(1, N / 2 + 1) * np.log(np.arange(1, N / 2 + 1))) * np.sqrt(\n np.arange(1, N / 2 + 1))])\n Hinv = np.hstack([Hinv, np.conj(Hinv[int((N / 2 - 1)):0:-1])])\n hinv = (np.fft.ifft(Hinv)).real\n \n hh = np.hstack((np.tile(h, (reps, 1)).flatten(), np.zeros(len(h))))\n out = hh / max(np.abs(hh)) / np.sqrt(2)\n \n wavfile.write('pinktsp.wav', fs, float2pcm(out, 'int16'))\n \n plt.specgram(out, Fs=fs)\n plt.show()",
"def pfb_clock_sync_ccf(*args, **kwargs):\n return _digital_swig.pfb_clock_sync_ccf(*args, **kwargs)",
"def opt_lf_num_bits(lf_params, min_bits, max_bits, rms_filt_error=0.1, noise_figure=1,\n sim_steps=1000, fpoints=512, mode=\"tdc\", sigma_ph=0.1):\n print(\"\\n********************************************************\")\n print(\"Optimizing loop filter digital direct form-I implementation for\")\n print(\"number of bits in fixed point data words utilized\")\n sign_bits = 1\n # fint number of integer bits needed\n int_bits = n_int_bits(lf_params)\n print(\"\\n* Integer bits = %d\"%int_bits)\n\n \"\"\" Optimization for quantization noise\n \"\"\"\n print(\"\\n* Optimizing for quantization noise:\")\n # find optimal number of bits for quantization noise\n lf_ideal = LoopFilterIIRPhase(ignore_clk=True, **lf_params)\n w = np.floor(np.random.normal(0, 0.1*lf_params[\"m\"], sim_steps))\n pow_ntdc_post_lf = var_ntdc_post_lf(lf_params, mode=mode) # variance of TDC noise at loop filter\n\n x_ideal = np.zeros(sim_steps)\n for n in range(sim_steps):\n x_ideal[n] = lf_ideal.update(w[n], 0)\n\n mses = []\n bit_range = range(min_bits-int_bits-1, max_bits-int_bits)\n for frac_bits in bit_range:\n # use a large number of int bits to avoid overflow. Tuning here is with frac bits as\n lf_quant = LoopFilterIIRPhase(ignore_clk=True, int_bits=32, frac_bits=frac_bits, quant_filt=False, **lf_params)\n x_quant = np.zeros(sim_steps)\n for n in range(sim_steps):\n x_quant[n] = lf_quant.update(w[n], 0)\n mse = np.var(x_ideal-x_quant)\n print(\"\\tN bits = %d\\tQuant noise power = %E LSB^2\"%(frac_bits+int_bits+sign_bits, mse))\n mses.append(mse)\n n = len(mses)-1\n threshold = (10**(noise_figure/10.0) - 1)*pow_ntdc_post_lf\n print(\"!&!&&!\", threshold, pow_ntdc_post_lf)\n while n>=0:\n if mses[n] > threshold:\n n = n+1 if n < len(mses) - 1 else len(mses) - 1\n break\n n -= 1\n opt_frac_bits_qn = bit_range[n]\n print(\"* Optimum int bits = %d, frac bits = %d, sign bits = 1, quant noise = %.3f LSB^2\"%(int_bits, opt_frac_bits_qn, mses[n]))\n\n \"\"\" Optimization for filter accuracy\n \"\"\"\n print(\"\\n* Optimizing for filter design accuracy:\")\n fmin = 1e2\n fclk = lf_params[\"fclk\"]\n\n a = [lf_params[\"a0\"], lf_params[\"a1\"]]\n b = [lf_params[\"b0\"], lf_params[\"b1\"], lf_params[\"b2\"]]\n f, h_ideal = scipy.signal.freqz(a, b, np.geomspace(fmin, fclk/2, fpoints), fs=fclk)\n s = 2j*np.pi*f\n l = (lf_params[\"m\"]/lf_params[\"n\"])*lf_params[\"kdco\"]*h_ideal/s\n g = l/(1+l)\n bit_range = range(min_bits-int_bits-1, max_bits-int_bits)\n mses = []\n for frac_bits in bit_range:\n _lf_params = quant_lf_params(lf_params, int_bits, frac_bits)\n a = [_lf_params[\"a0\"], _lf_params[\"a1\"]]\n b = [_lf_params[\"b0\"], _lf_params[\"b1\"], _lf_params[\"b2\"]]\n f, h = scipy.signal.freqz(a, b, np.geomspace(fmin, fclk/2, fpoints), fs=fclk)\n s = 2j*np.pi*f\n l = (_lf_params[\"m\"]/_lf_params[\"n\"])*_lf_params[\"kdco\"]*h/s\n g = l/(1+l)\n # w, h = scipy.signal.freqz(a, b, points)\n mses.append(np.var(20*np.log10(np.abs(h[1:]))-20*np.log10(np.abs(h_ideal[1:]))))\n print(\"\\tN bits = %d\\tMSE = %E dB^2\"%(frac_bits+int_bits+sign_bits, mses[-1]))\n n = len(mses)-1\n while n>=0:\n if mses[n] > rms_filt_error**2:\n n = n+1 if n < len(mses) - 1 else len(mses) - 1\n break\n n -= 1\n opt_frac_bits_filt_acc = bit_range[n]\n print(\"* Optimum int bits = %d, frac bits = %d, sign_bits=1, quant noise = %E LSB^2\"%(int_bits, opt_frac_bits_filt_acc, mses[n]))\n\n frac_bits = max(opt_frac_bits_qn, opt_frac_bits_filt_acc)\n print(\"\\n* Optimization complete:\")\n print(\"\\tInt bits = %d, frac bits = %d, sign bits = 1\"%(int_bits, frac_bits))\n print(\"\\tTotal number bits = %d\"%(int_bits+frac_bits+sign_bits))\n return int_bits, frac_bits",
"def grpdelay(b, a=1, nfft=512, whole='none', Fs=2.*pi):\n#==================================================================\n if whole !='whole':\n nfft = 2*nfft\n#\n w = Fs * np.arange(0, nfft)/nfft\n \n try: len(a)\n except TypeError: \n a = 1; oa = 0\n c = b\n try: len(b)\n except TypeError: print 'No proper filter coefficients: len(a) = len(b) = 1 !'\n else: \n oa = len(a)-1; # order of a(z)\n c = np.convolve(b,a[::-1]) # c(z) = b(z)*a(1/z)*z^(-oa); a[::-1] reverses a\n try: len(b)\n except TypeError: b=1; ob=0; \n else: \n ob = len(b)-1; # order of b(z) \n\n oc = oa + ob; # order of c(z)\n \n cr = c * np.arange(0,oc+1) # multiply with ramp -> derivative of c wrt 1/z\n\n num = np.fft.fft(cr,nfft)\n den = np.fft.fft(c,nfft)\n#\n minmag = 10 * np.spacing(1) # equivalent to matlab \"eps\"\n polebins = np.where(abs(den)<minmag)[0] # find zeros, convert tuple to array\n if np.size(polebins) > 0: # check whether polebins array is empty\n print '*** grpdelay warning: group delay singular -> setting to 0 at:'\n for i in polebins:\n print 'f = {0} '.format((Fs*i/nfft))\n num[i] = 0;\n den[i] = 1; \n\n tau_g = np.real(num / den) - oa;\n# \n if whole !='whole':\n nfft = nfft/2\n tau_g = tau_g[0:nfft]\n w = w[0:nfft]\n\n return tau_g, w",
"def stereo_fm(x, fs=2.4e6, file_name='test.wav'):\n N1 = 10\n b = signal.firwin(64, 2 * 200e3 / float(fs))\n # Filter and decimate (should be polyphase)\n y = signal.lfilter(b, 1, x)\n z = ss.downsample(y, N1)\n # Apply complex baseband discriminator\n z_bb = discrim(z)\n # Work with the (3) stereo multiplex signals:\n # Begin by designing a lowpass filter for L+R and DSP demoded (L-R)\n # (fc = 12 KHz)\n b12 = signal.firwin(128, 2 * 12e3 / (float(fs) / N1))\n # The L + R term is at baseband, we just lowpass filter to remove \n # other terms above 12 kHz.\n y_lpr = signal.lfilter(b12, 1, z_bb)\n b19 = signal.firwin(128, 2 * 1e3 * np.array([19 - 5, 19 + 5]) / (float(fs) / N1),\n pass_zero=False);\n z_bb19 = signal.lfilter(b19, 1, z_bb)\n # Lock PLL to 19 kHz pilot\n # A type 2 loop with bandwidth Bn = 10 Hz and damping zeta = 0.707 \n # The VCO quiescent frequency is set to 19000 Hz.\n theta, phi_error = pilot_pll(z_bb19, 19000, fs / N1, 2, 10, 0.707)\n # Coherently demodulate the L - R subcarrier at 38 kHz.\n # theta is the PLL output phase at 19 kHz, so to double multiply \n # by 2 and wrap with cos() or sin().\n # First bandpass filter\n b38 = signal.firwin(128, 2 * 1e3 * np.array([38 - 5, 38 + 5]) / (float(fs) / N1),\n pass_zero=False);\n x_lmr = signal.lfilter(b38, 1, z_bb)\n # Coherently demodulate using the PLL output phase\n x_lmr = 2 * np.sqrt(2) * np.cos(2 * theta) * x_lmr\n # Lowpass at 12 kHz to recover the desired DSB demod term\n y_lmr = signal.lfilter(b12, 1, x_lmr)\n # Matrix the y_lmr and y_lpr for form right and left channels:\n y_left = y_lpr + y_lmr\n y_right = y_lpr - y_lmr\n\n # Decimate by N2 (nominally 5)\n N2 = 5\n fs2 = float(fs) / (N1 * N2) # (nominally 48 ksps)\n y_left_DN2 = ss.downsample(y_left, N2)\n y_right_DN2 = ss.downsample(y_right, N2)\n # Deemphasize with 75 us time constant to 'undo' the preemphasis \n # applied at the transmitter in broadcast FM.\n # A 1-pole digital lowpass works well here.\n a_de = np.exp(-2.1 * 1e3 * 2 * np.pi / fs2)\n z_left = signal.lfilter([1 - a_de], [1, -a_de], y_left_DN2)\n z_right = signal.lfilter([1 - a_de], [1, -a_de], y_right_DN2)\n # Place left and righ channels as side-by-side columns in a 2D array\n z_out = np.hstack((np.array([z_left]).T, (np.array([z_right]).T)))\n\n ss.to_wav(file_name, 48000, z_out / 2)\n print('Done!')\n # return z_bb, z_out\n return z_bb, theta, y_lpr, y_lmr, z_out",
"def sccs_bit_sync(y, ns):\n # decimated symbol sequence for SEP\n rx_symb_d = np.zeros(int(np.fix(len(y) / ns)))\n track = np.zeros(int(np.fix(len(y) / ns)))\n bit_count = -1\n y_abs = np.zeros(len(y))\n clk = np.zeros(len(y))\n k = ns + 1 # initial 1-of-Ns symbol synch clock phase\n # Sample-by-sample processing required\n for i in range(len(y)):\n # y_abs(i) = abs(round(real(y(i))))\n if i >= ns: # do not process first Ns samples\n # Collect timing decision unit (TDU) samples\n y_abs[i] = np.abs(np.sum(y[i - ns + 1:i + 1]))\n # Update sampling instant and take a sample\n # For causality reason the early sample is 'i',\n # the on-time or prompt sample is 'i-1', and \n # the late sample is 'i-2'.\n if (k == 0):\n # Load the samples into the 3x1 TDU register w_hat.\n # w_hat[1] = late, w_hat[2] = on-time; w_hat[3] = early.\n w_hat = y_abs[i - 2:i + 1]\n bit_count += 1\n if w_hat[1] != 0:\n if w_hat[0] < w_hat[2]:\n k = ns - 1\n clk[i - 2] = 1\n rx_symb_d[bit_count] = y[i - 2 - int(np.round(ns / 2)) - 1]\n elif w_hat[0] > w_hat[2]:\n k = ns + 1\n clk[i] = 1\n rx_symb_d[bit_count] = y[i - int(np.round(ns / 2)) - 1]\n else:\n k = ns\n clk[i - 1] = 1\n rx_symb_d[bit_count] = y[i - 1 - int(np.round(ns / 2)) - 1]\n else:\n k = ns\n clk[i - 1] = 1\n rx_symb_d[bit_count] = y[i - 1 - int(np.round(ns / 2))]\n track[bit_count] = np.mod(i, ns)\n k -= 1\n # Trim the final output to bit_count\n rx_symb_d = rx_symb_d[:bit_count]\n return rx_symb_d, clk, track",
"def set_damping_factor(self, *args, **kwargs):\n return _digital_swig.digital_pfb_clock_sync_fff_sptr_set_damping_factor(self, *args, **kwargs)",
"def bandpass_ifft(t, flux, low_cutoff, high_cutoff, sample=1, \n M=None, inv_box=False, gf_sig = 1, Filter='box', Plot=''): \n #perform fft\n spectrum = np.fft.rfft(flux) \n freq = np.fft.rfftfreq(len(flux), sample)\n freq_sort = np.sort(spectrum)\n \n #calculate the index of the cut off points\n lc = np.abs(freq) < Low_cutoff\n hc = np.abs(freq) > High_cutoff\n between = ~(lc + hc)\n \n ps = np.abs(spectrum)**2\n if ('PS' in Plot) or ('All' in Plot):\n plt.plot(freq, ps)\n plt.title(\"power spectrum\")\n plt.xlabel('Frequency (1/day)')\n plt.ylabel('Power Spectral Density')\n #plt.xlim(0,100)\n #plt.savefig('Figures/spec.png', bbox_inches='tight', pad_inches=0.5)\n plt.show()\n\n if ('DFT' in Plot) or ('All' in Plot):\n plt.plot(freq, spectrum)\n #plt.plot(freq[between], spectrum[between], alpha=0.5)\n plt.title(\"real fourier transform \")\n plt.xlabel('Frequency (1/day)')\n plt.ylabel('Amplitude')\n #plt.xlim(0,100)\n #plt.savefig('Figures/fft.png', bbox_inches='tight', pad_inches=0.5)\n plt.show()\n \n \n \n if Filter == 'box':\n \n #filtered_spectrum = spectrum.copy()\n \n if inv_box == True:\n x_1 = np.arange(0, Low_cutoff, 0.1)\n x_2 = np.arange(High_cutoff, np.max(freq), 0.1)\n plt.plot(freq, spectrum)\n plt.fill_between(x_1, [plt.ylim()[0]] * len(x_1), \n [plt.ylim()[1]] * len(x_1), color='r', alpha=0.3)\n plt.fill_between(x_2, [plt.ylim()[0]] * len(x_2), \n [plt.ylim()[1]] * len(x_2), color='r', alpha=0.3)\n plt.title(\"range to suppress\")\n plt.figure()\n filtered_spectrum[lc] = 0.\n filtered_spectrum[hc] = 0.\n else:\n x_ = np.arange(Low_cutoff, High_cutoff, 0.1)\n plt.plot(freq, spectrum)\n plt.fill_between(x_, [plt.ylim()[0]] * len(x_), \n [plt.ylim()[1]] * len(x_), color='r', alpha=0.3)\n plt.title(\"range to suppress\")\n plt.figure()\n filtered_spectrum[between] = 0.\n \n if Filter == 'Gaussian':\n ig = invgaussian(1, np.median([low_cutoff,high_cutoff]), gf_sig, freq)\n filtered_spectrum = spectrum * ig\n if ('filter' in Plot) or ('All' in Plot):\n plt.plot(freq, ig)\n plt.title('Gaussian Filter')\n #plt.savefig('Figures/gfilter.png')\n #plt.xlim(0,100)\n plt.figure()\n\n if ('spec_filtered' in Plot) or ('All' in Plot):\n plt.plot(freq, filtered_spectrum, label=\"filtered spectrum\")\n plt.plot(freq, spectrum, c='k', ls=\"--\", label=\"spectrum\", alpha=0.5)\n plt.title(\"Unfiltered vs. Filtered Spectrum\")\n plt.xlabel('Frequency (1/day)')\n plt.ylabel('Amplitude')\n ldg = plt.legend(fontsize=12)\n #plt.xlim(0,100)\n #plt.savefig('Figures/filter_compare.png', bbox_inches='tight', pad_inches=0.5)\n plt.figure()\n\n filtered_signal = np.fft.irfft(filtered_spectrum) # Construct filtered signal\n\n if ('signal_filtered' in Plot) or ('All' in Plot):\n fig = plt.figure(figsize=(15,10)) \n plt.plot(t, filtered_signal, label=\"filtered signal\")\n plt.plot(t, flux, c='k', ls=\"--\", label=\"original signal\", alpha=0.5)\n plt.xlabel('Time')\n plt.ylabel('Amplitude')\n plt.title(\"Unfiltered vs. Filtered Signal\")\n #plt.savefig('Figures/filtered_signal.png', bbox_inches='tight', pad_inches=0.5)\n plt.legend()\n #Filtered_signal = np.zeros_like(Filtered_signal)\n return spectrum, freq, filtered_spectrum, filtered_signal, Low_cutoff, High_cutoff",
"def dpss_filters(freqs, times, freq_scale=10, time_scale=1800, eigenval_cutoff=1e-9):\n delay_scale = (freq_scale * 1e6) ** -1 # Puts it in seconds\n fringe_scale = (time_scale) ** -1 # fringe scale in Hz\n time_in_seconds = (times - times.min()) * 60 * 60 * 24 # time array in seconds\n\n # Compute the time and frequency filters using hera_filters\n time_filters = hera_filters.dspec.dpss_operator(\n time_in_seconds, [0], [fringe_scale], eigenval_cutoff=[eigenval_cutoff]\n )[0].real\n freq_filters = hera_filters.dspec.dpss_operator(\n freqs, [0], [delay_scale], eigenval_cutoff=[eigenval_cutoff]\n )[0].real\n\n return time_filters, freq_filters",
"def FIRDesign(taps: int, cutoff: float, width: float, type='lowpass', fs=48000, window='Rectangle', plot=False):\n window = window.lower()\n if window == 'rectangle':\n window = 'boxcar'\n\n if type == 'lowpass':\n b = signal.firwin(taps, cutoff, width=width, pass_zero=True, window=window, fs=fs)\n elif type == 'highpass':\n b = signal.firwin(taps, cutoff, width=width, pass_zero=False, window=window, fs=fs)\n\n if plot:\n w, h = signal.freqz(b, 1, worN=fs)\n plt.figure('Filter Frequency Response')\n plt.subplot(2, 1, 1)\n plt.title('Impulse response')\n plt.plot(b)\n plt.grid()\n plt.xlabel('Taps [.]')\n plt.ylabel('Amplitude [.]')\n #frequency response\n plt.subplot(2, 1, 2)\n plt.title('Frequency response')\n plt.semilogx((fs * 0.5 / np.pi) * w, 20 * np.log10(abs(h)))\n plt.grid()\n plt.xlabel('Frequency [Hz]')\n plt.ylabel('Magnitude [dB]')\n plt.show()\n\n return b",
"def filter_psr(psr, bw=1.1, dt=7, filter_dict=None, min_toas=10,\n frequency_filter=True, fmax=3000, verbose=True, plot=False,\n low_freq_cut=False, legacy_cut=False):\n psr.deleted[:] = 1\n print('Working on PSR {}'.format(psr.name))\n\n # Flag filtering\n flag_keep = []\n if filter_dict:\n for key, val in filter_dict.items():\n if verbose: print('Keeping TOAs corresponding to {} {}'\n .format(key, val))\n if type(val) is not list:\n val = [val]\n if legacy_cut:\n print('Cutting legacy data!')\n flag_conds = [psr.flagvals(key)==v for v in val]\n else:\n flag_conds = [psr.flagvals(key)==psr.flagvals(key)]\n # if TOA has ANY acceptable value for this flag\n flag_keep.append(np.any(flag_conds, axis=0))\n\n # if TOA satisfies all flags\n idx_flag = np.flatnonzero(np.alltrue(flag_keep, axis=0))\n\n # filter low frequency observations\n if low_freq_cut:\n if verbose: print(\"Cutting data with frequency < 1000MHz\")\n idx_freq = []\n idx_freq = list(np.argwhere(psr.freqs < 1000))\n # check for empty list (i.e. there is no multi-frequency data)\n if not idx_freq:\n print(\"No low-frequency data, returning original psr\")\n return psr\n # delete\n idx = np.unique(np.concatenate(idx_freq))\n else:\n idx = idx_flag\n\n psr.deleted[idx] = 0 # mark filtered TOAs as \"deleted\"\n\n # filter for frequency coverage\n if frequency_filter:\n if verbose: print(\"Running multi-frequency filter\")\n bins = get_dm_bins(psr.toas()*86400, dt=dt)\n idx_freq = []\n for bn in bins:\n if sum(bn) > 1:\n ix = list(filter(lambda x: x in idx_flag, np.flatnonzero(bn)))\n if len(ix) > 0:\n if psr.freqs[ix].max() / psr.freqs[ix].min() >= bw:\n idx_freq.append(ix)\n elif psr.freqs[ix].max() >= fmax:\n idx_freq.append(ix)\n\n # check for empty list (i.e. there is no multi-frequency data)\n if not idx_freq:\n print(\"No multi-frequency data, returning original psr\")\n return psr\n\n # delete\n idx = np.unique(np.concatenate(idx_freq))\n else:\n idx = idx_flag\n psr.deleted[idx] = 0 # mark filtered TOAs as \"deleted\"\n\n # check for \"orphan\" backends (less than min_toas obsv.)\n orphans = []\n for gr in np.unique(psr.flagvals('group')):\n in_group = [gr == b for b in psr.flagvals('group')]\n mask = np.logical_and(in_group, ~psr.deletedmask())\n N = np.sum(mask)\n if N>0 and N<min_toas:\n psr.deleted[mask] = True\n orphans.append([gr, N])\n if verbose and len(orphans): print(\"backends marked as 'orphan': {}\".format(orphans))\n\n # filter design matrix\n mask = np.logical_not(psr.deleted)\n if not sum(mask):\n print(\"all TOAs cut, returning original psr\")\n return psr\n\n M = psr.designmatrix()[mask, :]\n dpars = []\n for ct, (par, val) in enumerate(zip(psr.pars(), M.sum(axis=0)[1:])):\n if val == 0:\n dpars.append(par)\n psr[par].fit = False\n psr[par].val = 0.0\n\n if verbose:\n print('Cutting {} TOAs'.format(np.sum(~mask)))\n if len(dpars): print('Turning off fit for {}'.format(dpars))\n\n if np.sum(~mask) == psr.nobs:\n print(\"Cutting all TOAs, so returning None\")\n else:\n fix_jumps(psr)\n\n if plot:\n plt.figure(figsize=(8,3))\n for pta in np.unique(psr.flagvals('pta')):\n nix = psr.flagvals('pta') == pta\n plt.plot(psr.toas()[nix], psr.freqs[nix], '.', label=pta)\n plt.plot(psr.toas()[~psr.deletedmask()], psr.freqs[~psr.deletedmask()], '.',\n color='C3', alpha=0.3, label='filtered')\n plt.legend(loc='best', frameon=False)\n plt.title(psr.name)\n\n return psr",
"def test_sine_sff():\n # Retrieve the custom, known signal properties\n tpf = KeplerTargetPixelFile(filename_synthetic_sine)\n true_period = float(tpf.hdu[3].header[\"PERIOD\"])\n true_amplitude = float(tpf.hdu[3].header[\"SINE_AMP\"])\n\n # Run the SFF algorithm\n lc = tpf.to_lightcurve()\n corrector = SFFCorrector(lc)\n cor_lc = corrector.correct(\n tpf.pos_corr2,\n tpf.pos_corr1,\n niters=4,\n windows=1,\n bins=7,\n restore_trend=True,\n timescale=0.5,\n )\n\n # Verify that we get the period within ~20%\n pg = cor_lc.to_periodogram(\n method=\"lombscargle\", minimum_period=1, maximum_period=10, oversample_factor=10\n )\n ret_period = pg.period_at_max_power.value\n threshold = 0.2\n assert (ret_period > true_period * (1 - threshold)) & (\n ret_period < true_period * (1 + threshold)\n )\n\n # Verify that we get the amplitude to within 10%\n n_cad = len(tpf.time)\n design_matrix = np.vstack(\n [\n np.ones(n_cad),\n np.sin(2.0 * np.pi * cor_lc.time.value / ret_period),\n np.cos(2.0 * np.pi * cor_lc.time.value / ret_period),\n ]\n ).T\n ATA = np.dot(design_matrix.T, design_matrix / cor_lc.flux_err[:, None] ** 2)\n least_squares_coeffs = np.linalg.solve(\n ATA, np.dot(design_matrix.T, cor_lc.flux / cor_lc.flux_err ** 2)\n )\n const, sin_weight, cos_weight = least_squares_coeffs\n\n fractional_amplitude = (sin_weight ** 2 + cos_weight ** 2) ** (0.5) / const\n assert (fractional_amplitude > true_amplitude / 1.1) & (\n fractional_amplitude < true_amplitude * 1.1\n )",
"def compute_filter_banks(pow_frames, nfft, sample_rate):\n nfilt = 40 #40 filters\n low_freq_mel = 0\n high_freq_mel = (2595 * numpy.log10(1 + (sample_rate / 2) / 700)) # Convert Hz to Mel\n mel_points = numpy.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale\n hz_points = (700 * (10 ** (mel_points / 2595) - 1)) # Convert Mel to Hz\n b = numpy.floor((nfft + 1) * hz_points / sample_rate)\n\n fbank = numpy.zeros((nfilt, int(numpy.floor(nfft / 2 + 1))))\n for m in range(1, nfilt + 1):\n f_m_minus = int(b[m - 1]) # left\n f_m = int(b[m]) # center\n f_m_plus = int(b[m + 1]) # right\n\n for k in range(f_m_minus, f_m):\n fbank[m - 1, k] = (k - b[m - 1]) / (b[m] - b[m - 1])\n for k in range(f_m, f_m_plus):\n fbank[m - 1, k] = (b[m + 1] - k) / (b[m + 1] - b[m])\n filter_banks = numpy.dot(pow_frames, fbank.T)\n filter_banks = numpy.where(filter_banks == 0, numpy.finfo(float).eps, filter_banks) # Numerical Stability\n filter_banks = 20 * numpy.log10(filter_banks) # dB\n return filter_banks",
"def p_fps(self,):\n # Loop the receivers\n self.pres_s = []\n for js, s_coord in enumerate(self.sources.coord):\n hs = s_coord[2] # source height\n pres_rec = np.zeros((self.receivers.coord.shape[0], len(self.controls.freq)), dtype = np.csingle)\n for jrec, r_coord in enumerate(self.receivers.coord):\n xdist = (s_coord[0] - r_coord[0])**2.0\n ydist = (s_coord[1] - r_coord[1])**2.0\n r = (xdist + ydist)**0.5 # horizontal distance source-receiver\n zr = r_coord[2] # receiver height\n r1 = (r ** 2 + (hs - zr) ** 2) ** 0.5\n r2 = (r ** 2 + (hs + zr) ** 2) ** 0.5\n # print('Calculate p_scat and p_fp for rec: {}'.format(r_coord))\n print('Calculate sound pressure for source {} at ({}) and receiver {} at ({})'.format(js+1, s_coord, jrec+1, r_coord))\n # bar = ChargingBar('Processing sound pressure at field point', max=len(self.controls.k0), suffix='%(percent)d%%')\n bar = tqdm(total = len(self.controls.k0),\n desc = 'Processing sound pressure at field point')\n for jf, k0 in enumerate(self.controls.k0):\n # print('the ps passed is: {}'.format(self.p_surface[:,jf]))\n # fakebeta = np.array(0.02+1j*0.2)\n # r_coord = np.reshape(np.array([0, 0, 0.01], dtype = np.float32), (1,3))\n p_scat = insitu_cpp._bemflush_pscat(r_coord, self.node_x, self.node_y,\n self.Nzeta, self.Nweights.T, k0, self.beta[jf], self.p_surface[:,jf])\n pres_rec[jrec, jf] = (np.exp(-1j * k0 * r1) / r1) +\\\n (np.exp(-1j * k0 * r2) / r2) + p_scat\n bar.update(1)\n bar.close()\n # bar.next()\n # bar.finish()\n # print('p_fp for freq {} Hz is: {}'.format(self.controls.freq[jf], pres_rec[jrec, jf]))\n self.pres_s.append(pres_rec)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
__init__(self) > digital_pn_correlator_cc_sptr __init__(self, p) > digital_pn_correlator_cc_sptr | def __init__(self, *args):
this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)
try: self.this.append(this)
except: self.this = this | [
"def pn_correlator_cc(*args, **kwargs):\n return _digital_swig.pn_correlator_cc(*args, **kwargs)",
"def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, name=None):\n self._mng = pn_messenger(name)",
"def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self):\n\t\tself.communicator_list = []\n\t\tself.NETWORK_TIMER = 500",
"def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self):\n super(CorrelogramPooling3D, self).__init__()",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, protocol):\r\n\r\n self.protocol = protocol\r\n self.protocol.protocol_flags['MCCP'] = False\r\n # ask if client will mccp, connect callbacks to handle answer\r\n self.protocol.will(MCCP).addCallbacks(self.do_mccp, self.no_mccp)",
"def __init__(self):\n this = _coin.new_SoConeDetail()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, params=None):\n super(NetPositionsMe, self).__init__()\n self.params = params",
"def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, tensor_rep):\n super(ComponentPlotCPD, self).__init__(tensor_rep=tensor_rep)",
"def __init__(self,\n *,\n peer_cidrs: List[str] = None) -> None:\n self.peer_cidrs = peer_cidrs"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
pn_correlator_cc(int degree, int mask = 0, int seed = 1) > digital_pn_correlator_cc_sptr PN code sequential search correlator. Receives complex baseband signal, outputs complex correlation against reference PN code, one sample per PN code period. The PN sequence is generated using a GLFSR. | def pn_correlator_cc(*args, **kwargs):
return _digital_swig.pn_correlator_cc(*args, **kwargs) | [
"def create_compcorr(name='CompCor'):\n compproc = pe.Workflow(name=name)\n inputspec = pe.Node(util.IdentityInterface(fields=['num_components',\n 'realigned_file',\n 'mean_file',\n 'reg_file',\n 'fsaseg_file',\n 'selector']),\n name='inputspec')\n # selector input is bool list [True,True] where first is referring to\n # tCompcorr and second refers to aCompcorr\n outputspec = pe.Node(util.IdentityInterface(fields=['noise_components',\n 'stddev_file',\n 'tsnr_file',\n 'csf_mask',\n 'tsnr_detrended']),\n name='outputspec')\n # extract the principal components of the noise\n tsnr = pe.MapNode(TSNR(regress_poly=2), #SG: advanced parameter\n name='tsnr',\n iterfield=['in_file'])\n\n # additional information for the noise prin comps\n getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 98'),\n name='getthreshold',\n iterfield=['in_file'])\n\n # and a bit more...\n threshold_stddev = pe.MapNode(fsl.Threshold(),\n name='threshold',\n iterfield=['in_file', 'thresh'])\n\n acomp = extract_csf_mask()\n\n # compcor actually extracts the components\n compcor = pe.MapNode(util.Function(input_names=['realigned_file',\n 'noise_mask_file',\n 'num_components',\n 'csf_mask_file',\n 'selector'],\n output_names=['noise_components'],\n function=extract_noise_components),\n name='compcor_components',\n iterfield=['realigned_file',\n 'noise_mask_file'])\n # Make connections\n compproc.connect(inputspec, 'mean_file',\n acomp, 'inputspec.mean_file')\n compproc.connect(inputspec, 'reg_file',\n acomp, 'inputspec.reg_file')\n compproc.connect(inputspec, 'fsaseg_file',\n acomp, 'inputspec.fsaseg_file')\n compproc.connect(inputspec, 'selector',\n compcor, 'selector')\n compproc.connect(acomp, ('outputspec.csf_mask',pickfirst),\n compcor, 'csf_mask_file')\n compproc.connect(inputspec, 'realigned_file',\n tsnr, 'in_file')\n compproc.connect(inputspec, 'num_components',\n compcor, 'num_components')\n compproc.connect(inputspec, 'realigned_file',\n compcor, 'realigned_file')\n compproc.connect(getthresh, 'out_stat',\n threshold_stddev, 'thresh')\n compproc.connect(threshold_stddev, 'out_file',\n compcor, 'noise_mask_file')\n compproc.connect(tsnr, 'stddev_file',\n threshold_stddev, 'in_file')\n compproc.connect(tsnr, 'stddev_file',\n getthresh, 'in_file')\n compproc.connect(tsnr, 'stddev_file',\n outputspec, 'stddev_file')\n compproc.connect(tsnr, 'tsnr_file',\n outputspec, 'tsnr_file')\n compproc.connect(tsnr, 'detrended_file',\n outputspec, 'tsnr_detrended')\n compproc.connect(compcor, 'noise_components',\n outputspec, 'noise_components')\n return compproc",
"def correlate(self, pattern, signal):\n\treturn sc.spearmanr( signal, pattern )[0]",
"def pierre_corr_plot(real_cond_samples, fake_cond_samples, ax):\n def pierre_corr(samples):\n \"\"\"Compute the pierre correlation coefficients.\n\n Args:\n samples (np.array): 2D array of shape (n_sample, seq_len)\n\n Returns:\n np.array: 1D array of length (seq_len - 1).\n The j-th element (the first element is indexed with 1) represents the correlation between X_{1:j} and X_{j+1:p}.\n \"\"\"\n seq_len = np.shape(samples)[1]\n corr_ls = np.zeros(seq_len - 1)\n for i in range(1, seq_len):\n sum_x_i = np.sum(samples[:, :i], axis=1)\n sum_x_ip = np.sum(samples[:, i:], axis=1)\n corr_ls[i - 1] = np.corrcoef(sum_x_i, sum_x_ip)[0, 1]\n return corr_ls\n cond_len = np.shape(real_cond_samples)[1]\n real_pierre_corr = pierre_corr(real_cond_samples)\n fake_pierre_corr = pierre_corr(fake_cond_samples)\n ax.plot(real_pierre_corr, label='Real samples')\n ax.plot(fake_pierre_corr, label='Fake samples')\n ax.set_xticks(np.arange(1, cond_len, int(cond_len)/10))\n ax.set_xlabel('$j$')\n ax.set_ylabel('$Corr(X_{1:j}, X_{j+1:p})$')\n ax.set_title('Pierre correlation')\n ax.legend()",
"def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def correlate(*args):\n return _seb.correlate(*args)",
"def lpcc(sig,\n fs=16000,\n num_ceps=13,\n pre_emph=1,\n pre_emph_coeff=0.97,\n win_type=\"hann\",\n win_len=0.025,\n win_hop=0.01,\n do_rasta=True,\n lifter=1,\n normalize=1,\n dither=1):\n lpcs = lpc(sig=sig,\n fs=fs,\n num_ceps=num_ceps,\n pre_emph=pre_emph,\n pre_emph_coeff=pre_emph_coeff,\n win_len=win_len,\n win_hop=win_hop,\n do_rasta=True,\n dither=dither)\n lpccs = lpc2cep(lpcs.T)\n\n # liftering\n if lifter > 0:\n lpccs = lifter_ceps(lpccs, lifter)\n\n # normalization\n if normalize:\n lpccs = cmvn(cms(lpccs))\n\n lpccs = lpccs.T\n return lpccs[:, :]",
"def induceRankCorr(R, Cstar):\r\n\r\n \"\"\"Define inverse complimentary error function (erfcinv in matlab)\r\n x is on interval [0,2]\r\n its also defined in scipy.special\"\"\"\r\n #erfcinv = lambda x: -stats.norm.ppf(x/2)/sqrt(2)\r\n\r\n C = Cstar\r\n N, k = R.shape\r\n \"\"\"Calculate the sample correlation matrix T\"\"\"\r\n T = np.corrcoef(R.T)\r\n\r\n \"\"\"Calculate lower triangular cholesky\r\n decomposition of Cstar (i.e. P*P' = C)\"\"\"\r\n P = cholesky(C).T\r\n\r\n \"\"\"Calculate lower triangular cholesky decomposition of T, i.e. Q*Q' = T\"\"\"\r\n Q = cholesky(T).T\r\n\r\n \"\"\"S*T*S' = C\"\"\"\r\n S = P.dot(inv(Q))\r\n\r\n \"\"\"Replace values in samples with corresponding\r\n rank-indices and convert to van der Waerden scores\"\"\"\r\n\r\n RvdW = -np.sqrt(2) * special.erfcinv(2*((_columnRanks(R)+1)/(N+1)))\r\n\r\n \"\"\"Matrix RBstar has a correlation matrix exactly equal to C\"\"\"\r\n RBstar = RvdW.dot(S.T)\r\n \r\n \"\"\"Match up the rank pairing in R according to RBstar\"\"\"\r\n ranks = _columnRanks(RBstar)\r\n sortedR = np.sort(R, axis=0)\r\n corrR = np.zeros(R.shape)\r\n for j in np.arange(k):\r\n corrR[:, j] = sortedR[ranks[:, j], j]\r\n\r\n return corrR",
"def TileRawChannelCorrectionAlgCfg(flags, **kwargs):\n\n acc = ComponentAccumulator()\n\n kwargs.setdefault('InputRawChannelContainer', 'TileRawChannelCnt')\n kwargs.setdefault('OutputRawChannelContainer', 'TileRawChannelCntCorrected')\n\n if 'NoiseFilterTools' not in kwargs:\n kwargs['NoiseFilterTools'] = acc.popToolsAndMerge( TileRawChannelCorrectionToolsCfg(flags) )\n\n TileRawChannelCorrectionAlg=CompFactory.TileRawChannelCorrectionAlg\n acc.addEventAlgo(TileRawChannelCorrectionAlg(**kwargs), primary = True)\n\n return acc",
"def xcorr_aftan_mp(self, outdir, channel='ZZ', tb=0., inftan=pyaftan.InputFtanParam(), basic1=True, basic2=True,\n pmf1=True, pmf2=True, verbose=True, prephdir=None, f77=True, pfx='DISP', subsize=1000, deletedisp=True, nprocess=None):\n print 'Preparing data for aftan analysis !'\n staLst=self.waveforms.list()\n inputStream=[]\n for staid1 in staLst:\n if not os.path.isdir(outdir+'/'+pfx+'/'+staid1):\n os.makedirs(outdir+'/'+pfx+'/'+staid1)\n for staid2 in staLst:\n netcode1, stacode1=staid1.split('.')\n netcode2, stacode2=staid2.split('.')\n if staid1 >= staid2: continue\n try:\n channels1=self.auxiliary_data.NoiseXcorr[netcode1][stacode1][netcode2][stacode2].list()\n channels2=self.auxiliary_data.NoiseXcorr[netcode1][stacode1][netcode2][stacode2][channels1[0]].list()\n for chan in channels1:\n if chan[2]==channel[0]: chan1=chan\n for chan in channels2:\n if chan[2]==channel[1]: chan2=chan\n except KeyError:\n continue\n try:\n tr=self.get_xcorr_trace(netcode1, stacode1, netcode2, stacode2, chan1, chan2)\n except NameError:\n print netcode1+'.'+stacode1+'_'+netcode2+'.'+stacode2+'_'+channel+' not exists!'\n continue\n if verbose:\n print 'Preparing aftan data: '+ netcode1+'.'+stacode1+'_'+netcode2+'.'+stacode2+'_'+channel\n aftanTr=pyaftan.aftantrace(tr.data, tr.stats)\n inputStream.append(aftanTr)\n print 'Start multiprocessing aftan analysis !'\n if len(inputStream) > subsize:\n Nsub = int(len(inputStream)/subsize)\n for isub in xrange(Nsub):\n print 'Subset:', isub,'in',Nsub,'sets'\n cstream = inputStream[isub*subsize:(isub+1)*subsize]\n AFTAN = partial(aftan4mp, outdir=outdir, inftan=inftan, prephdir=prephdir, f77=f77, pfx=pfx)\n pool = multiprocessing.Pool(processes=nprocess)\n pool.map(AFTAN, cstream) #make our results with a map call\n pool.close() #we are not adding any more processes\n pool.join() #tell it to wait until all threads are done before going on\n cstream = inputStream[(isub+1)*subsize:]\n AFTAN = partial(aftan4mp, outdir=outdir, inftan=inftan, prephdir=prephdir, f77=f77, pfx=pfx)\n pool = multiprocessing.Pool(processes=nprocess)\n pool.map(AFTAN, cstream) #make our results with a map call\n pool.close() #we are not adding any more processes\n pool.join() #tell it to wait until all threads are done before going on\n else:\n AFTAN = partial(aftan4mp, outdir=outdir, inftan=inftan, prephdir=prephdir, f77=f77, pfx=pfx)\n pool = multiprocessing.Pool(processes=nprocess)\n pool.map(AFTAN, inputStream) #make our results with a map call\n pool.close() #we are not adding any more processes\n pool.join() #tell it to wait until all threads are done before going on\n print 'End of multiprocessing aftan analysis !'\n print 'Reading aftan results into ASDF Dataset !'\n for staid1 in staLst:\n for staid2 in staLst:\n netcode1, stacode1=staid1.split('.')\n netcode2, stacode2=staid2.split('.')\n if stacode1 >= stacode2: continue\n try:\n channels1=self.auxiliary_data.NoiseXcorr[netcode1][stacode1][netcode2][stacode2].list()\n channels2=self.auxiliary_data.NoiseXcorr[netcode1][stacode1][netcode2][stacode2][channels1[0]].list()\n for chan in channels1:\n if chan[2]==channel[0]: chan1=chan\n for chan in channels2:\n if chan[2]==channel[1]: chan2=chan\n except KeyError: continue\n finPR=pfx+'/'+netcode1+'.'+stacode1+'/'+ \\\n pfx+'_'+netcode1+'.'+stacode1+'_'+chan1+'_'+netcode2+'.'+stacode2+'_'+chan2+'.SAC'\n try:\n f10=np.load(outdir+'/'+finPR+'_1_DISP.0.npz')\n f11=np.load(outdir+'/'+finPR+'_1_DISP.1.npz')\n f20=np.load(outdir+'/'+finPR+'_2_DISP.0.npz')\n f21=np.load(outdir+'/'+finPR+'_2_DISP.1.npz')\n except IOError:\n print 'NO aftan results: '+ netcode1+'.'+stacode1+'_'+netcode2+'.'+stacode2+'_'+channel\n continue\n print 'Reading aftan results '+ netcode1+'.'+stacode1+'_'+netcode2+'.'+stacode2+'_'+channel\n if deletedisp:\n os.remove(outdir+'/'+finPR+'_1_DISP.0.npz')\n os.remove(outdir+'/'+finPR+'_1_DISP.1.npz')\n os.remove(outdir+'/'+finPR+'_2_DISP.0.npz')\n os.remove(outdir+'/'+finPR+'_2_DISP.1.npz')\n arr1_1=f10['arr_0']\n nfout1_1=f10['arr_1']\n arr2_1=f11['arr_0']\n nfout2_1=f11['arr_1']\n arr1_2=f20['arr_0']\n nfout1_2=f20['arr_1']\n arr2_2=f21['arr_0']\n nfout2_2=f21['arr_1']\n staid_aux=netcode1+'/'+stacode1+'/'+netcode2+'/'+stacode2+'/'+channel\n if basic1:\n parameters={'Tc': 0, 'To': 1, 'Vgr': 2, 'Vph': 3, 'ampdb': 4, 'dis': 5, 'snrdb': 6, 'mhw': 7, 'amp': 8, 'Np': nfout1_1}\n self.add_auxiliary_data(data=arr1_1, data_type='DISPbasic1', path=staid_aux, parameters=parameters)\n if basic2:\n parameters={'Tc': 0, 'To': 1, 'Vgr': 2, 'Vph': 3, 'ampdb': 4, 'snrdb': 5, 'mhw': 6, 'amp': 7, 'Np': nfout2_1}\n self.add_auxiliary_data(data=arr2_1, data_type='DISPbasic2', path=staid_aux, parameters=parameters)\n if inftan.pmf:\n if pmf1:\n parameters={'Tc': 0, 'To': 1, 'Vgr': 2, 'Vph': 3, 'ampdb': 4, 'dis': 5, 'snrdb': 6, 'mhw': 7, 'amp': 8, 'Np': nfout1_2}\n self.add_auxiliary_data(data=arr1_2, data_type='DISPpmf1', path=staid_aux, parameters=parameters)\n if pmf2:\n parameters={'Tc': 0, 'To': 1, 'Vgr': 2, 'Vph': 3, 'ampdb': 4, 'snrdb': 5, 'mhw': 6, 'amp': 7, 'snr':8, 'Np': nfout2_2}\n self.add_auxiliary_data(data=arr2_2, data_type='DISPpmf2', path=staid_aux, parameters=parameters)\n if deletedisp: shutil.rmtree(outdir+'/'+pfx)\n return",
"def xcorr_aftan(self, channel='ZZ', tb=0., outdir=None, inftan=pyaftan.InputFtanParam(), basic1=True, basic2=True, \\\n pmf1=True, pmf2=True, verbose=True, prephdir=None, f77=True, pfx='DISP'):\n print 'Start aftan analysis!'\n staLst=self.waveforms.list()\n for staid1 in staLst:\n for staid2 in staLst:\n netcode1, stacode1=staid1.split('.')\n netcode2, stacode2=staid2.split('.')\n if staid1 >= staid2: continue\n try:\n channels1=self.auxiliary_data.NoiseXcorr[netcode1][stacode1][netcode2][stacode2].list()\n channels2=self.auxiliary_data.NoiseXcorr[netcode1][stacode1][netcode2][stacode2][channels1[0]].list()\n for chan in channels1:\n if chan[2]==channel[0]: chan1=chan\n for chan in channels2:\n if chan[2]==channel[1]: chan2=chan\n except KeyError:\n continue\n try:\n tr=self.get_xcorr_trace(netcode1, stacode1, netcode2, stacode2, chan1, chan2)\n except NameError:\n print netcode1+'.'+stacode1+'_'+netcode2+'.'+stacode2+'_'+channel+' not exists!'\n continue\n aftanTr=pyaftan.aftantrace(tr.data, tr.stats)\n if abs(aftanTr.stats.sac.b+aftanTr.stats.sac.e)<aftanTr.stats.delta:\n aftanTr.makesym()\n if prephdir !=None:\n phvelname = prephdir + \"/%s.%s.pre\" %(netcode1+'.'+stacode1, netcode2+'.'+stacode2)\n else:\n phvelname =''\n if f77:\n aftanTr.aftanf77(pmf=inftan.pmf, piover4=inftan.piover4, vmin=inftan.vmin, vmax=inftan.vmax, tmin=inftan.tmin, tmax=inftan.tmax,\n tresh=inftan.tresh, ffact=inftan.ffact, taperl=inftan.taperl, snr=inftan.snr, fmatch=inftan.fmatch, nfin=inftan.nfin,\n npoints=inftan.npoints, perc=inftan.perc, phvelname=phvelname)\n else:\n aftanTr.aftan(pmf=inftan.pmf, piover4=inftan.piover4, vmin=inftan.vmin, vmax=inftan.vmax, tmin=inftan.tmin, tmax=inftan.tmax,\n tresh=inftan.tresh, ffact=inftan.ffact, taperl=inftan.taperl, snr=inftan.snr, fmatch=inftan.fmatch, nfin=inftan.nfin,\n npoints=inftan.npoints, perc=inftan.perc, phvelname=phvelname)\n if verbose:\n print 'aftan analysis for: ' + netcode1+'.'+stacode1+'_'+netcode2+'.'+stacode2+'_'+channel\n aftanTr.get_snr(ffact=inftan.ffact) # SNR analysis\n staid_aux=netcode1+'/'+stacode1+'/'+netcode2+'/'+stacode2+'/'+channel\n # save aftan results to ASDF dataset\n if basic1:\n parameters={'Tc': 0, 'To': 1, 'Vgr': 2, 'Vph': 3, 'ampdb': 4, 'dis': 5, 'snrdb': 6, 'mhw': 7, 'amp': 8, 'Np': aftanTr.ftanparam.nfout1_1}\n self.add_auxiliary_data(data=aftanTr.ftanparam.arr1_1, data_type='DISPbasic1', path=staid_aux, parameters=parameters)\n if basic2:\n parameters={'Tc': 0, 'To': 1, 'Vgr': 2, 'Vph': 3, 'ampdb': 4, 'snrdb': 5, 'mhw': 6, 'amp': 7, 'Np': aftanTr.ftanparam.nfout2_1}\n self.add_auxiliary_data(data=aftanTr.ftanparam.arr2_1, data_type='DISPbasic2', path=staid_aux, parameters=parameters)\n if inftan.pmf:\n if pmf1:\n parameters={'Tc': 0, 'To': 1, 'Vgr': 2, 'Vph': 3, 'ampdb': 4, 'dis': 5, 'snrdb': 6, 'mhw': 7, 'amp': 8, 'Np': aftanTr.ftanparam.nfout1_2}\n self.add_auxiliary_data(data=aftanTr.ftanparam.arr1_2, data_type='DISPpmf1', path=staid_aux, parameters=parameters)\n if pmf2:\n parameters={'Tc': 0, 'To': 1, 'Vgr': 2, 'Vph': 3, 'ampdb': 4, 'snrdb': 5, 'mhw': 6, 'amp': 7, 'snr':8, 'Np': aftanTr.ftanparam.nfout2_2}\n self.add_auxiliary_data(data=aftanTr.ftanparam.arr2_2, data_type='DISPpmf2', path=staid_aux, parameters=parameters)\n if outdir != None:\n if not os.path.isdir(outdir+'/'+pfx+'/'+staid1):\n os.makedirs(outdir+'/'+pfx+'/'+staid1)\n foutPR=outdir+'/'+pfx+'/'+netcode1+'.'+stacode1+'/'+ \\\n pfx+'_'+netcode1+'.'+stacode1+'_'+chan1+'_'+netcode2+'.'+stacode2+'_'+chan2+'.SAC'\n aftanTr.ftanparam.writeDISP(foutPR)\n print 'End aftan analysis!'\n return",
"def calc_corr(power_arr, conn_params):\n\n blp_corr = np.zeros((power_arr.shape[0], len(conn_params['conn_pairs'][0]),\n power_arr.shape[2], power_arr.shape[3]))\n lab_pairs = zip(conn_params['conn_pairs'][0], conn_params['conn_pairs'][1])\n # Loop over each trial\n for ti in range(power_arr.shape[0]):\n # Loop over each label pair\n for match_i, (li_1, li_2) in enumerate(lab_pairs):\n # Loop over each power band\n for bp_i in range(power_arr.shape[2]):\n # Calculate sliding correlation\n blp_corr[ti, match_i, bp_i, :] = \\\n rolling_corr(power_arr[ti, li_1, bp_i, :],\n power_arr[ti, li_2, bp_i, :],\n window=corr_len)\n\n return blp_corr",
"def Inference_PLMDCA(Jscore, matrix_contacts):\n val,cts = np.unique(matrix_contacts,return_counts = True)\n nbrcontacts = cts[val == 1]\n \n # inverse of the correlation matrix to get the couplings\n # inferred_couplings = np.linalg.inv(mat_corr)\n\n TP = []\n\n # order the 2d array and find the index of the sorted values in the matrix\n index_sorted_array_x, index_sorted_array_y = np.unravel_index(np.argsort(-Jscore, axis=None), Jscore.shape)\n\n\n idx_flip = list(index_sorted_array_x)\n idy_flip = list(index_sorted_array_y)\n\n\n FP = []\n\n TP_coords = []\n all_coords = []\n N = 0 \n number_pairs = []\n \n listFPJij = []\n # §TP_coords = []\n listTPJij = []\n \n \n list_tp = []\n TP = 0\n\n list_tp_fraction_allpairs = []\n\n\n for x, y in zip(idx_flip, idy_flip):\n\n # just look at the elements above the diagonal as symmetric matrix\n # to not count twice each contact\n if y > x:\n\n N = N + 1\n\n number_pairs.append(N)\n\n\n if matrix_contacts[x,y] == 1:\n TP = TP + 1\n if N <= nbrcontacts:\n TP_coords.append([x,y])\n listTPJij.append(Jscore[x,y])\n else:\n\n if N <= nbrcontacts:\n FP.append([x,y])\n listFPJij.append(Jscore[x,y])\n\n\n list_tp.append(TP)\n\n all_coords.append([x,y])\n\n list_tp_fraction_allpairs.append(TP/N)\n\n return list_tp_fraction_allpairs, FP,listFPJij,TP_coords,listTPJij",
"def occbin_test(p = None):\n if p is None:\n p = getp_default()\n\n p_nozlb = copy.deepcopy(p)\n p_nozlb['monetary'] = 'taylor'\n inputdict_nozlb = getinputdict(p_nozlb)\n\n p_zlb = copy.deepcopy(p)\n p_zlb['monetary'] = 'zlb'\n inputdict_zlb = getinputdict(p_zlb)\n\n shocks = shockpath_default\n\n # simperiods determines how many periods simulate forward and also length of IRF\n sys.path.append(str(__projectdir__ / Path('submodules/dsge-perturbation/')))\n from calloccbin_func import calloccbin_oneconstraint\n calloccbin_oneconstraint(inputdict_nozlb, inputdict_zlb, 'Ihat < -log(I_ss)', 'Ihat > -log(I_ss)', shocks, os.path.join(__projectdir__, 'regimes/temp/occbin/'), run = True, irf = True, simperiods = simperiods_default)",
"def calc_correction_per_corrprod(dump, channels, params):\n n_channels = channels.stop - channels.start\n g_per_input = np.ones((len(params.inputs), n_channels), dtype='complex64')\n for product in params.products.values():\n for n in range(len(params.inputs)):\n sensor = product[n]\n g_product = sensor[dump]\n if np.shape(g_product) != ():\n g_product = g_product[channels]\n g_per_input[n] *= g_product\n # Transpose to (channel, input) order, and ensure C ordering\n g_per_input = np.ascontiguousarray(g_per_input.T)\n g_per_cp = np.empty((n_channels, len(params.input1_index)), dtype='complex64')\n _correction_inputs_to_corrprods(g_per_cp, g_per_input, params.input1_index, params.input2_index)\n return g_per_cp",
"def chirpf(self,cr=160e3):\n L=self.conf.n_samples_per_block\n sr=self.conf.sample_rate\n f0=0.0\n tv=n.arange(L,dtype=n.float64)/float(sr)\n dphase=0.5*tv**2*cr*2*n.pi\n chirp=n.exp(1j*n.mod(dphase,2*n.pi))*n.exp(1j*2*n.pi*f0*tv)\n return(n.array(chirp,dtype=n.complex64))",
"def RuCoref2CoNLL(path, out_path, language='russian'):\n data = {\"doc_id\": [],\n \"part_id\": [],\n \"word_number\": [],\n \"word\": [],\n \"part_of_speech\": [],\n \"parse_bit\": [],\n \"lemma\": [],\n \"sense\": [],\n \"speaker\": [],\n \"entiti\": [],\n \"predict\": [],\n \"coref\": []}\n\n part_id = '0'\n speaker = 'spk1'\n sense = '-'\n entiti = '-'\n predict = '-'\n\n tokens_ext = \"txt\"\n groups_ext = \"txt\"\n tokens_fname = \"Tokens\"\n groups_fname = \"Groups\"\n\n tokens_path = os.path.join(path, \".\".join([tokens_fname, tokens_ext]))\n groups_path = os.path.join(path, \".\".join([groups_fname, groups_ext]))\n print('Convert rucoref corpus into conll format ...')\n start = time.time()\n coref_dict = {}\n with open(groups_path, \"r\") as groups_file:\n for line in groups_file:\n doc_id, variant, group_id, chain_id, link, shift, lens, content, tk_shifts, attributes, head, hd_shifts = line[\n :-1].split('\\t')\n\n if doc_id not in coref_dict:\n coref_dict[doc_id] = {'unos': defaultdict(list), 'starts': defaultdict(list), 'ends': defaultdict(list)}\n\n if len(tk_shifts.split(',')) == 1:\n coref_dict[doc_id]['unos'][shift].append(chain_id)\n else:\n tk = tk_shifts.split(',')\n coref_dict[doc_id]['starts'][tk[0]].append(chain_id)\n coref_dict[doc_id]['ends'][tk[-1]].append(chain_id)\n groups_file.close()\n\n # Write conll structure\n with open(tokens_path, \"r\") as tokens_file:\n k = 0\n doc_name = '0'\n for line in tokens_file:\n doc_id, shift, length, token, lemma, gram = line[:-1].split('\\t')\n \n if doc_id == 'doc_id':\n continue\n \n if doc_id != doc_name:\n doc_name = doc_id\n w = watcher()\n k = 0\n \n data['word'].append(token) \n data['doc_id'].append(doc_id)\n data['part_id'].append(part_id)\n data['lemma'].append(lemma)\n data['sense'].append(sense)\n data['speaker'].append(speaker)\n data['entiti'].append(entiti)\n data['predict'].append(predict)\n data['parse_bit'].append('-')\n\n opens = coref_dict[doc_id]['starts'][shift] if shift in coref_dict[doc_id]['starts'] else []\n ends = coref_dict[doc_id]['ends'][shift] if shift in coref_dict[doc_id]['ends'] else []\n unos = coref_dict[doc_id]['unos'][shift] if shift in coref_dict[doc_id]['unos'] else []\n s = []\n s += ['({})'.format(el) for el in unos]\n s += ['({}'.format(el) for el in opens]\n s += ['{})'.format(el) for el in ends]\n s = '|'.join(s)\n if len(s) == 0:\n s = '-'\n data['coref'].append(s)\n else:\n data['coref'].append(s)\n \n closed = w.mentions_closed(s)\n if gram == 'SENT' and not closed:\n data['part_of_speech'].append('.')\n data['word_number'].append(k)\n k += 1 \n \n elif gram == 'SENT' and closed:\n data['part_of_speech'].append(gram)\n data['word_number'].append(k)\n k = 0\n else:\n data['part_of_speech'].append(gram)\n data['word_number'].append(k)\n k += 1\n \n tokens_file.close()\n \n \n # Write conll structure in file\n conll = os.path.join(out_path, \".\".join([language, 'v4_conll']))\n with open(conll, 'w') as CoNLL:\n for i in tqdm(range(len(data['doc_id']))):\n if i == 0:\n CoNLL.write('#begin document ({}); part {}\\n'.format(data['doc_id'][i], data[\"part_id\"][i]))\n CoNLL.write(u'{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(data['doc_id'][i],\n data[\"part_id\"][i],\n data[\"word_number\"][i],\n data[\"word\"][i],\n data[\"part_of_speech\"][i],\n data[\"parse_bit\"][i],\n data[\"lemma\"][i],\n data[\"sense\"][i],\n data[\"speaker\"][i],\n data[\"entiti\"][i],\n data[\"predict\"][i],\n data[\"coref\"][i]))\n elif i == len(data['doc_id']) - 1:\n CoNLL.write(u'{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(data['doc_id'][i],\n data[\"part_id\"][i],\n data[\"word_number\"][i],\n data[\"word\"][i],\n data[\"part_of_speech\"][i],\n data[\"parse_bit\"][i],\n data[\"lemma\"][i],\n data[\"sense\"][i],\n data[\"speaker\"][i],\n data[\"entiti\"][i],\n data[\"predict\"][i],\n data[\"coref\"][i]))\n CoNLL.write('\\n')\n CoNLL.write('#end document\\n')\n else:\n if data['doc_id'][i] == data['doc_id'][i + 1]:\n CoNLL.write(u'{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(data['doc_id'][i],\n data[\"part_id\"][i],\n data[\"word_number\"][i],\n data[\"word\"][i],\n data[\"part_of_speech\"][i],\n data[\"parse_bit\"][i],\n data[\"lemma\"][i],\n data[\"sense\"][i],\n data[\"speaker\"][i],\n data[\"entiti\"][i],\n data[\"predict\"][i],\n data[\"coref\"][i]))\n if data[\"word_number\"][i + 1] == 0:\n CoNLL.write('\\n')\n else:\n CoNLL.write(u'{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(data['doc_id'][i],\n data[\"part_id\"][i],\n data[\"word_number\"][i],\n data[\"word\"][i],\n data[\"part_of_speech\"][i],\n data[\"parse_bit\"][i],\n data[\"lemma\"][i],\n data[\"sense\"][i],\n data[\"speaker\"][i],\n data[\"entiti\"][i],\n data[\"predict\"][i],\n data[\"coref\"][i]))\n CoNLL.write('\\n')\n CoNLL.write('#end document\\n')\n CoNLL.write('#begin document ({}); part {}\\n'.format(data['doc_id'][i + 1], data[\"part_id\"][i + 1]))\n\n print('End of convertion. Time - {}'.format(time.time() - start))\n return None",
"def find_PiPi(pdb_file, lig_name, centroid_distance=5.0, dih_parallel=25, dih_tshape=80, verbose=1):\n # Get ligand residue and print its name.\n ligAtomList = []\n ligAtomIdList = []\n mol = next(pybel.readfile('pdb', pdb_file))\n if verbose: print(\"A total of %s residues\" % mol.OBMol.NumResidues())\n lig = None\n for res in ob.OBResidueIter(mol.OBMol):\n # print res.GetName()\n if res.GetName() == lig_name:\n lig = res\n if verbose: print(\"Ligand residue name is:\", lig.GetName())\n break\n if not lig:\n if verbose: print(\"No ligand residue %s found, please confirm.\" % lig_name)\n return -1\n else:\n for atom in ob.OBResidueAtomIter(lig):\n # print atom.GetIdx()\n ligAtomList.append(atom)\n ligAtomIdList.append(atom.GetIdx())\n\n # Set ring_id\n i = 0\n for ring in mol.sssr:\n ring.ring_id = i\n i += 1\n # print ring.ring_id\n\n # Determine which rings are from ligand.\n ligRingList = []\n ligAroRingList = []\n ligRingIdList = []\n recRingList = []\n recAroRingList = []\n for ring in mol.sssr:\n for atom in ligAtomList:\n if ring.IsMember(atom):\n if ring not in ligRingList:\n ligRingList.append(ring)\n ligRingIdList.append(ring.ring_id)\n if verbose: print(\"ligand ring_ID: \", ring.ring_id, end=' ')\n if ring.IsAromatic():\n if verbose: print(\"aromatic\")\n ligAroRingList.append(ring)\n else:\n if verbose: print(\"saturated\")\n for ring in mol.sssr:\n if ring.ring_id not in ligRingIdList:\n recRingList.append(ring)\n if ring.IsAromatic():\n recAroRingList.append(ring)\n if verbose: print(\"\\nReceptor has \", len(recRingList), \" rings,\", end=' ')\n if verbose: print(\" has \", len(recAroRingList), \" aromatic rings.\")\n\n # Find and show the rings\n ligRingCenter = ob.vector3()\n recRingCenter = ob.vector3()\n ligNorm1 = ob.vector3()\n ligNorm2 = ob.vector3()\n recNorm1 = ob.vector3()\n recNorm2 = ob.vector3()\n count = 0\n lig_ring_index = 0\n for ligRing in ligAroRingList:\n lig_ring_index += 1\n ligRing.findCenterAndNormal(ligRingCenter, ligNorm1, ligNorm2)\n rec_ring_index = 0\n for recRing in recAroRingList:\n rec_ring_index += 1\n recRing.findCenterAndNormal(recRingCenter, recNorm1, recNorm2)\n dist = ligRingCenter.distSq(recRingCenter) ** 0.5\n angle = vecAngle(ligNorm1, recNorm1)\n if (dist < centroid_distance and (angle < dih_parallel or angle > dih_tshape)): # the criteria\n count += 1\n if verbose: print(\"Pi-Pi ring pairs: %3s,%3s Angle(deg.): %5.2f Distance(A): %.2f\" % (recRing.ring_id, ligRing.ring_id, angle, dist))\n if verbose: print(\"Total Pi-Pi interactions:\", count)\n return count",
"def ContactProbTraj (i,j,conf_traj, num_sample, prob_traj_length\\\n , protein_name = 'mer15' ):\n p_traj = []\n #probability list of contact formation to be returned\n \n has_contact = []\n #list of 0s and 1s of the conformation trajectory \n #1 if contact exists in reference frame, 0 otherwise\n \n valid_sample_ind = []\n #list of indices of element 0 from the has_contact trajectory\n #valid initial reference frame indices \n \n for e in range (len(conf_traj)):\n #appends boolean flags to has_contact\n exists = isContact(i,j,conf_traj[e])\n has_contact.append (exists)\n \n #if contact exists, index of conformation in the traj is appended \n if exists == 0 and (e < len(conf_traj) - prob_traj_length):\n valid_sample_ind.append(e)\n \n for i in range (prob_traj_length):\n if i == int(prob_traj_length / 4):\n print(\"25.0 % done.\")\n elif i == int(prob_traj_length / 2):\n print(\"50.0 % done.\")\n elif i == int(0.75 * prob_traj_length):\n print(\"75.0 % done.\")\n\n occurs = 0\n \n for p in range(num_sample):\n \n # random conf selected from list of valid indices\n # n is the index of nth MC step\n n = valid_sample_ind [random.randint (0,len(valid_sample_ind)-1)]\n \n \n #records num times contact occurs at chosen MC step (either 0 or 1)\n occurs += has_contact [n + i]\n \n p_traj.append(occurs/num_sample)\n \n return p_traj",
"def gridsearch(self, func, **kwargs):\n \n # avoid using \"dots\" in loops for performance\n rotate = core.rotate\n lag = core.lag\n chop = core.chop\n unsplit = core.unsplit\n \n # ensure trace1 at zero angle\n copy = self.data.copy()\n copy.rotateto(0)\n x, y = copy.x, copy.y\n \n # pre-apply receiver correction\n if 'rcvcorr' in kwargs:\n rcvphi, rcvlag = self.__rcvcorr\n x, y = unsplit(x, y, rcvphi, rcvlag)\n \n ###################### \n # inner loop function\n ######################\n \n # source correction \n \n if 'srccorr' in kwargs:\n srcphi, srclag = self.__srccorr\n def srccorr(x, y, ang):\n x, y = unsplit(x, y, srcphi-ang, srclag)\n return x, y\n else:\n def srccorr(x, y, ang):\n return x, y\n \n # rotate to polaristation (needed for tranverse min)\n if 'mode' in kwargs and kwargs['mode'] == 'rotpol':\n pol = self.data.pol\n def rotpol(x, y, ang):\n # rotate to pol\n x, y = rotate(x, y, pol-ang)\n return x, y\n else:\n def rotpol(x, y, ang):\n return x, y\n \n # actual inner loop function \n def getout(x, y, ang, shift):\n # remove shift\n x, y = lag(x, y, -shift)\n x, y = srccorr(x, y, ang)\n x, y = chop(x, y, window=self.data.window)\n x, y = rotpol(x, y, ang)\n return func(x, y)\n\n \n # Do the grid search\n prerot = [ (rotate(x, y, ang), ang) for ang in self.__degs ]\n \n out = [ [ getout(data[0], data[1], ang, shift) for shift in self.__slags ]\n for (data,ang) in prerot ]\n \n return out"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
__init__(self) > digital_probe_density_b_sptr __init__(self, p) > digital_probe_density_b_sptr | def __init__(self, *args):
this = _digital_swig.new_digital_probe_density_b_sptr(*args)
try: self.this.append(this)
except: self.this = this | [
"def __init__(self, params: parameters_lib.SwirlLMParameters):\n super(ConstantDensity, self).__init__(params)\n\n self.rho = params.rho",
"def probe_density_b(*args, **kwargs):\n return _digital_swig.probe_density_b(*args, **kwargs)",
"def __init__(self, position, spectrum, brightness):\n pass",
"def __init__(self, b=1009, hashFunction=None, probeFunction=None):\r\n self.b = b\r\n self.bins = [None] * b\r\n self.deleted = [False] * b\r\n \r\n if hashFunction:\r\n self.hashFunction = hashFunction\r\n else:\r\n self.hashFunction = defaultHash\r\n \r\n if probeFunction:\r\n self.probeFunction = probeFunction\r\n else:\r\n self.probeFunction = lambda hk, size, i : (hk + 37) % size",
"def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self,p_r={'type':'laguerre','paras':{'deg':5}},p_phi={'type':'legendre','paras':{'deg':5}},\r\n p_dI={'type':'normal','paras':{'cov':.1,'mu':0.}},eps=0.05,explore=True):\r\n \r\n self.p_phi = p_phi\r\n \r\n #probability related\r\n self.p_r_implemented = ['laguerre','normal']\r\n assert p_r['type'] in self.p_r_implemented, \"Assertion failed - functional type {} not one of the implemented type for r: {}\".format(p_r['type'],self.p_r_implemented)\r\n self.p_r = p_r\r\n \r\n self.p_phi_implemented = ['legendre','uniform']\r\n assert p_phi['type'] in self.p_phi_implemented, \"Assertion failed - functional type {} not one of the implemented type for phi: {}\".format(p_phi['type'],self.p_phi_implemented)\r\n self.p_phi = p_phi\r\n \r\n self.p_dI_implemented = ['normal','uniform']\r\n assert p_dI['type'] in self.p_dI_implemented, \"Assertion failed - functional type {} not one of the implemented type for intensity difference: {}\".format(p_dI['type'],self.p_dI_implemented)\r\n self.p_dI = p_dI\r\n \r\n self.p_r_fun = None #will be set after fitting\r\n self.p_phi_fun = None #will be set after fitting\r\n self.p_dI_fun = None #will be set after fitting\r\n self.p_nd = None #probability of non-disappearance from frame to frame\r\n self.p_nondisappearance_fun = None #will be set after fitting using Binomial distribution and self.p_nd the probability of non-disappearance from frame to frame\r\n \r\n self.rs_accumulated = None #will contain the rs for the linked trajectories\r\n self.phis_accumnulated = None #will contain the phis for the linked trajectories\r\n self.dIs_accumnulated = None #will contain the dIs for the linked trajectories\r\n \r\n self.r_mean = None\r\n \r\n #particle motion related\r\n self.rs_storage = []\r\n self.phis_storage = []\r\n self.dIs_storage = []\r\n self.rs_range = [] #min and max value for pdf\r\n self.num_pos_vs_t = None #continuously updated number of particles observed each frame\r\n self.avg_num_pos = None\r\n self.expected_num_disappearing = None #continously updated value of atoms disappearing from frame to frame\r\n self.positions = None\r\n self.intensities = None\r\n self.dim = None\r\n self.Nposmax = None\r\n self.Nt = None\r\n self.Nt_new = None #number of time steps added due to update of positions\r\n self.LM = None #row corresponds to time step into the future (row 0 = 1 step into the future), columns correspond ot the indices of particle position entries in self.positions\r\n self.LM_traj = None #keeps the dense form of the trajectories (row = trajectory, column = timestep, entries = int referring to entry in self.positions)\r\n self.LL = None #locked list - length of max number of particles in any frame, entries integers indicating for how many steps a trajectory shall not be updated\r\n \r\n #trajectory linking mode related\r\n self.eps = eps\r\n self.explore = explore",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, spec_data, lvol, dp=0.0, innout=0, plusminus=+1):\n super().__init__(spec_data, lvol)\n\n self.fortran_module.specpjh.init_pjh(dp, innout, plusminus)\n self.dp = dp\n self.innout = innout\n self.plusminus = plusminus\n self.initialized = True\n\n ## the size of the problem, 2 for 1.5 or 2D system\n self.problem_size = 2\n ## choose the variable for Poincare plot\n self.poincare_plot_type = \"yx\"\n ## the x label of Poincare plot\n self.poincare_plot_xlabel = \"theta\"\n ## the y label of Poincare plot\n self.poincare_plot_ylabel = \"p_theta\"",
"def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersD(*args))",
"def __init__(self, rho):\n super(MarginalGenerator, self).__init__(rho)",
"def __init__(self, pin, freq, dc_left, dc_right):\n\n pin = machine.Pin(pin)\n self.pwm = machine.PWM(pin, freq=freq)\n self.left, self.right = dc_left, dc_right",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersHelperD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersHelperD(*args))",
"def __init__(self,\n bins = 4,\n doBackground = True,\n scaleDetected = 10.0,\n sigmaSmooth = 1.0,\n thetaTolerance = 0.15,\n \n luminosityLimit = 0.02, \n centerLimit = 1.2, \n eRange = 0.08, \n bLimit = 1.4,\n skewLimit = 10.0, \n \n kernelSigma = 7, \n kernelWidth = 11, \n growKernel = 1.4,\n \n houghBins = 200, \n houghThresh = 40,\n \n maxTrailWidth = 2.1,\n maskAndBits = (),\n \n log = None,\n verbose = False,\n ):\n \n self.bins = bins\n self.doBackground = doBackground\n self.scaleDetected = scaleDetected\n self.sigmaSmooth = sigmaSmooth\n self.thetaTolerance = thetaTolerance\n \n self.kernelSigma = kernelSigma \n self.kernelWidth = kernelWidth\n self.growKernel = 1.4\n\n self.centerLimit = centerLimit\n self.eRange = eRange\n self.houghThresh = houghThresh\n self.houghBins = houghBins\n self.luminosityLimit = luminosityLimit\n self.skewLimit = skewLimit\n self.bLimit = bLimit\n\n self.maxTrailWidth = maxTrailWidth\n self.maskAndBits = maskAndBits\n \n if log is None:\n logLevel = pexLog.Log.INFO\n if verbose:\n logLevel = pexLog.Log.DEBUG\n log = pexLog.Log(pexLog.Log.getDefaultLog(), 'satelliteFinder', logLevel)\n self.log = log\n \n self.debugInfo = {}",
"def __init__(self, *args):\n _itkImagePython.vectoritkImageVD44_swiginit(self, _itkImagePython.new_vectoritkImageVD44(*args))",
"def __init__(self, *args):\n _itkImagePython.vectoritkImageVD43_swiginit(self, _itkImagePython.new_vectoritkImageVD43(*args))",
"def __init__(self, dim, distribution, proposal_distribution): \n self.dim = dim\n self.distribution = distribution\n self.proposal_distribution = proposal_distribution\n self.x = None\n self.init_x()",
"def __init__(self, *args, **kwargs):\n super(BarnesInterpolationND, self).__init__(*args, **kwargs)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
probe_density_b(double alpha) > digital_probe_density_b_sptr This block maintains a running average of the input stream and makes it available as an accessor function. The input stream is type unsigned char. If you send this block a stream of unpacked bytes, it will tell you what the bit density is. | def probe_density_b(*args, **kwargs):
return _digital_swig.probe_density_b(*args, **kwargs) | [
"def __init__(self, *args):\n this = _digital_swig.new_digital_probe_density_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __call__(self, rgba, alt_prior=None, grayscale=True):\n rgb = rgb_bin(rgba, self.binsize)\n prob = self.table.get(rgb, 0)\n prior = self.prior\n if self.grayscale and not grayscale:\n # They want 0-1, we have grayscale.\n prob = prob / 255.0\n prior = prior / 255.0\n elif not self.grayscale and grayscale:\n # They want grayscale, we have 0-1.\n prob = prob * 255\n prior = prior * 255\n #if prob != 0 and alt_prior != None:\n # !!!!! This is incorrect. Do not support this until it is fixed.\n #prob = prob * alt_prior / self.prior\n if grayscale:\n # Values are already in the desired range, but need to set the type.\n prob = int(round(prob))\n return prob",
"def cvar_importance_sampling_biasing_density(pdf,function,beta,VaR,tau,x):\n if np.isscalar(x):\n x = np.array([[x]])\n assert x.ndim==2\n vals = np.atleast_1d(pdf(x))\n assert vals.ndim==1 or vals.shape[1]==1\n y = function(x)\n assert y.ndim==1 or y.shape[1]==1\n I = np.where(y<VaR)[0]\n J = np.where(y>=VaR)[0]\n vals[I]*=beta/tau\n vals[J]*=(1-beta)/(1-tau)\n return vals",
"def flatness(a,b, alpha, mu): \n flatness = 1/(b-a) * (-np.sqrt(mu)*(b**2-a**2)+ alpha/3*(b**3-a**3))\n \n return flatness",
"def fisrt_layer_bias_fourier( A, train_x, train_y, epsilon=None, pdf='Gaussian', activation='sigmoid' ):\n # check the probability density of input\n if pdf != 'Gaussian':\n raise ValueError(\"the probability density of input is not Gaussian, not support.\")\n\n # consistency check for train data, train_x and train_y\n if train_x.shape[0] != train_y.shape[0]:\n raise ValueError(\"input size is not consistent with output size.\") \n\n num_neurons = A.shape[1]\n n = train_x.shape[0]\n d = train_x.shape[1]\n epsilon = 1/np.sqrt(n)\n \n # draw n i.i.d. frequencies w from a cap of sphere {||w|| = 0.5, <w, a> >= (1-epsilon**2/2)/2 }\n \n # compute bias b1 in this main loop\n b1 = np.zeros(num_neurons) # initialization for b1\n for k in range(num_neurons):\n #omega = np.zeros((d, n))\n indicate_num = 0\n v = 0\n omega_sample = []\n while 1:\n # generate sample on sphere \n sample_sphere = np.random.randn(d)\n norm_sample = np.linalg.norm(sample_sphere)\n if norm_sample >= 1e-4 # detect underflow\n sample_sphere = 0.5 * sample_sphere/norm_sample\n\n # judge if it on cap\n if abs(np.dot(sample_sphere, A[:,k])) > (1-epsilon**2/2.)/2.:\n indicate_num = indicate_num + 1\n omega_sample.append(sample_sphere)\n\n # if reach the number of samples of frequencies w, break\n if indicate_num >= n:\n omega_sample = np.array(omega_sample)\n break\n\n print('{} -th process of sampling on cap done...' . format(k+1)) \n\n #omega[:,k] = sample_sphere\n # compute intermidiate variable for computing bias b1\n for kk in range(n):\n density_value = 1./np.sqrt(2*np.pi) * np.exp( -np.linalg.norm(train_x[kk,:])**2/2 )\n v = v + train_y[kk]/density_value * np.exp(complex(0, -np.dot(omega_sample[kk,:], train_x[kk,:]))) \n\n # compute the fourier variable for bias b1\n v = v / n\n magnit_v = abs(v)\n phase_v = np.arccos((v/magnit_v).real)\n #phase_f\n if activation == 'sigmoid':\n fun_real = lambda x: np.cos(-.5*x)/(1+np.exp(-x))\n fun_imag = lambda x: np.sin(-.5*x)/(1+np.exp(-x))\n spectrum_real = quad(fun_real, -20, 20)\n spectrum_imag = quad(fun_imag, -20, 20)\n phase_f = np.arctan(spectrum_imag/spectrum_real)\n elif activation == 'relu':\n fun_real = lambda x: np.cos(-.5*x)*np.maximum(0, x)\n fun_imag = lambda x: np.sin(-.5*x)*np.maximum(0, x)\n spectrum_real = quad(fun_real, -20, 20)\n spectrum_imag = quad(fun_imag, -20, 20)\n phase_f = np.arctan(spectrum_imag/spectrum_real)\n elif activation == 'tanh':\n fun_real = lambda x: np.cos(-.5*x)*(np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))\n fun_imag = lambda x: np.sin(-.5*x)*(np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))\n spectrum_real = quad(fun_real, -20, 20)\n spectrum_imag = quad(fun_imag, -20, 20)\n phase_f = np.arctan(spectrum_imag/spectrum_real)\n else:\n raise ValueError('The activation is not supported!')\n\n b1[k] = 1/np.pi * (phase_v - phase_f )\n\n return b1",
"def test_compute_butter_bp_filter(self):\n\n # Compute a bandpass filter\n parameters = {'passband_frequency': [1, 2],\n 'stopband_frequency': [0.1, 5],\n 'passband_attenuation': 1,\n 'stopband_attenuation': 80}\n self.filter_under_test.configure_filter(parameters)\n\n self.filter_under_test.filter_class = 'butterworth'\n self.filter_under_test.compute_parameters(target='passband')\n self.assertEqual(self.filter_under_test.filter_type, 'bandpass')\n self.assertEqual(self.filter_under_test.N, 7)\n self.assertAlmostEqual(self.filter_under_test.Wn[0], 6.07569169)\n self.assertAlmostEqual(self.filter_under_test.Wn[1], 12.99553026)\n\n self.filter_under_test.design()\n\n self.assertAlmostEqual(self.filter_under_test.B[0], 759751.80527519668)\n self.assertAlmostEqual(self.filter_under_test.A[0], 1)\n self.assertAlmostEqual(self.filter_under_test.A[1], 31.0974722556149)\n self.assertAlmostEqual(self.filter_under_test.A[2], 1036.224236482155)\n self.assertAlmostEqual(self.filter_under_test.A[3], 19567.149028043725)\n self.assertAlmostEqual(self.filter_under_test.A[4], 355263.81277219893)\n self.assertAlmostEqual(self.filter_under_test.A[5], 4595251.7849443173)\n self.assertAlmostEqual(self.filter_under_test.A[6], 55790492.859455325)\n self.assertAlmostEqual(self.filter_under_test.A[7], 513056794.27105772)\n self.assertAlmostEqual(self.filter_under_test.A[8], 4405040750.9169989)\n self.assertAlmostEqual(self.filter_under_test.A[9], 28647635164.403412)\n self.assertAlmostEqual(self.filter_under_test.A[10],\n 174871956719.38678)\n self.assertAlmostEqual(self.filter_under_test.A[11],\n 760477697837.74438)\n self.assertAlmostEqual(self.filter_under_test.A[12],\n 3179819056953.7124)\n self.assertAlmostEqual(self.filter_under_test.A[13],\n 7534656938190.1572)\n self.assertAlmostEqual(self.filter_under_test.A[14],\n 19130579538158.508)\n\n self.filter_under_test.compute_parameters(target='stopband')\n self.assertEqual(self.filter_under_test.filter_type, 'bandpass')\n self.assertEqual(self.filter_under_test.N, 7)\n self.assertAlmostEqual(self.filter_under_test.Wn[0], 5.81782828643783)\n self.assertAlmostEqual(self.filter_under_test.Wn[1], 13.5715307020618)\n\n self.filter_under_test.design()\n\n self.assertAlmostEqual(self.filter_under_test.B[0], 1684860.320277143)\n self.assertAlmostEqual(self.filter_under_test.A[0], 1)\n self.assertAlmostEqual(self.filter_under_test.A[1], 34.844822362404)\n self.assertAlmostEqual(self.filter_under_test.A[2], 1159.77866919475)\n self.assertAlmostEqual(self.filter_under_test.A[3], 23309.4127006002)\n self.assertAlmostEqual(self.filter_under_test.A[4], 423324.337255822)\n self.assertAlmostEqual(self.filter_under_test.A[5], 5689681.03696918)\n self.assertAlmostEqual(self.filter_under_test.A[6], 68543839.369195938)\n self.assertAlmostEqual(self.filter_under_test.A[7], 643836464.42606068)\n self.assertAlmostEqual(self.filter_under_test.A[8], 5412004629.6462231)\n self.assertAlmostEqual(self.filter_under_test.A[9], 35470506117.412323)\n self.assertAlmostEqual(self.filter_under_test.A[10],\n 208373474926.16937)\n self.assertAlmostEqual(self.filter_under_test.A[11],\n 905920861700.23743)\n self.assertAlmostEqual(self.filter_under_test.A[12],\n 3558965506031.5586)\n self.assertAlmostEqual(self.filter_under_test.A[13],\n 8442608469993.46)\n self.assertAlmostEqual(self.filter_under_test.A[14],\n 19130579538158.492)",
"def _kde_pdf(x, bin_x, bin_entries=None, band_width=None):\n # basic input checks and set up\n if not isinstance(x, (float, int, np.number, np.ndarray, list, tuple)):\n raise RuntimeError('x has wrong type')\n if bin_entries is not None:\n if bin_x.shape != bin_entries.shape:\n raise RuntimeError('bin_entries has wrong type')\n if band_width is None:\n # pick up zero-order band-width\n band_width = kde_bw(bin_x, bin_entries, n_adaptive=0)\n n_total = len(bin_x) if bin_entries is None else np.sum(bin_entries)\n if bin_entries is None:\n bin_entries = 1.0\n\n # evaluate kdf pdf at x\n if isinstance(x, (float, int, np.number)):\n p = _kde_histsum(x, bin_x, bin_entries, band_width, n_total)\n elif isinstance(x, (np.ndarray, list, tuple)):\n x = np.array(x)\n p = np.array([_kde_histsum(xi, bin_x, bin_entries, band_width, n_total) for xi in x.ravel()]).reshape(x.shape)\n return p",
"def beta_icdf(a: int, b: int, alpha: float,\n upper: bool=False) -> Tuple[float, float]:\n assert 0 < a < 1 << 44\n assert 0 < b < 1 << 44\n assert alpha >= 0\n if alpha <= 0:\n return (1.0, 0.0) if upper else (0.0, 0.0)\n alpha = min(alpha, 0.05)\n if upper:\n lo, hi = _beta_icdf_lo(b, a, alpha)\n return min(next(1 - lo), 1.0), min(next(hi - lo), 1.0)\n lo, hi = _beta_icdf_lo(a, b, alpha)\n return max(0.0, lo), min(next(hi - lo), 1.0)",
"def density(self) -> float:\n pass",
"def gas_pressure_adiabat(density, adiabat, gamma=4./3.):\n\n return adiabat * density**gamma",
"def density(alt):\r\n rho = surface_rho*np.exp(-beta*alt)\r\n return rho",
"def density_estimation(sample, X, h, kernel=\"epanechnikov\"):\n kde = KernelDensity(kernel=kernel, bandwidth=h).fit(sample.reshape(-1, 1))\n log_dens = kde.score_samples(X.reshape(-1, 1))\n density = np.exp(log_dens)\n return density",
"def hdi(self, alpha=.05):\n credible_mass = 1 - alpha\n try:\n _hdi = highest_density_interval(self.data, credible_mass)\n return (round(_hdi[0], 4), round(_hdi[1], 4))\n except Exception as e:\n logger.warn(e)\n return (None, None)",
"def BeamDiameter(a,b,c,f,zf,fieldtype='Echo'):\n\n N = NearFieldLength(a,b,c,f)\n\n K = FocusFactor(a,b,c,f,zf)\n\n k ={'Echo':0.51,'Free':0.7}\n\n lmbd = c/f\n\n k = k[fieldtype]\n\n b = (k*lmbd/a)**2\n\n BD = K*np.sqrt((-4*N**2*b)/(b - 1.))\n #\n # BD = K*k[fieldtype]*zf/(2*N)\n\n # BD = K*k*zf/(2*N)\n\n return BD",
"def calculate_b(component):\r\n Tc = float(component.CriticalTemperature)\r\n Pc = float(component.CriticalPressure)\r\n \r\n b = (0.07780*R*Tc)/Pc \r\n return b",
"def compute_energy_spectrum(wave_spectral_density, gravity, density):\n return wave_spectral_density * gravity * density",
"def test_compute_butter_bs_filter(self):\n\n parameters = {'passband_frequency': [1, 25],\n 'stopband_frequency': [2, 15],\n 'passband_attenuation': 1,\n 'stopband_attenuation': 40}\n\n self.filter_under_test.filter_class = 'butterworth'\n self.filter_under_test.configure_filter(parameters)\n self.filter_under_test.compute_parameters(target='passband')\n self.assertEqual(self.filter_under_test.filter_type, 'bandstop')\n self.assertEqual(self.filter_under_test.N, 9)\n self.assertAlmostEqual(self.filter_under_test.Wn[0], 8.0681551)\n self.assertAlmostEqual(self.filter_under_test.Wn[1], 146.79336908)\n self.filter_under_test.design()\n\n target_B_coefs = [1, 0, 10659.165019231212, 0, 50496799.514312126,\n 0, 139547260472.68924, 0, 247909546226676.62,\n 0, 2.9361208478586816e+17, 0, 2.3182664173133981e+20,\n 0, 1.176704014318369e+23, 0, 3.484078407614217e+25,\n 0, 4.5848600847778207e+27]\n target_A_coefs = [1, 798.886667535808, 329769.118807736,\n 90767049.233665258,\n 18246055317.574032, 2779764493559.9541,\n 323737622832967.31,\n 28439472756901696.0, 1.8160086510982392e+18,\n 7.8893059749655937e+19, 2.1507928764897341e+21,\n 3.9891732058277967e+22, 5.3781783783668304e+23,\n 5.4692869382179803e+24, 4.2517955086597104e+25,\n 2.5050262147266555e+26, 1.0778906831551873e+27,\n 3.092648654056598e+27, 4.5848600847778174e+27]\n\n for pos, B in enumerate(target_B_coefs):\n self.assertAlmostEqual(self.filter_under_test.B[pos], B, places=4)\n self.assertAlmostEqual(self.filter_under_test.A[pos],\n target_A_coefs[pos], places=4)\n\n self.filter_under_test.compute_parameters(target='stopband')\n self.assertEqual(self.filter_under_test.N, 9)\n self.assertAlmostEqual(self.filter_under_test.Wn[0],\n 8.19898674504612, places=4)\n self.assertAlmostEqual(self.filter_under_test.Wn[1],\n 144.451038642691, places=4)\n self.filter_under_test.design()\n\n target_B_coefs = [0.999999999999997, 0, 10659.165019231235,\n 0, 50496799.514312387,\n 0, 139547260472.68756, 0, 247909546226678.84, 0,\n 2.936120847858713e+17, 0, 2.3182664173134024e+20,\n 0, 1.1767040143183764e+23, 0,\n 3.4840784076142471e+25, 0, 4.5848600847778548e+27]\n\n target_A_coefs = [1, 784.644294735911, 318492.53822460608,\n 86261284.909445286, 17072986548.141478,\n 2562592979207.1934, 294287704148591.62,\n 25527354190801672.0, 1.6135947979406876e+18,\n 6.9745276318961222e+19, 1.9110636917136504e+21,\n 3.5806935741770867e+22, 4.8889336791349288e+23,\n 5.0419941479279025e+24, 3.9784406142229909e+25,\n 2.3806742847603819e+26, 1.0410318007203891e+27,\n 3.0375137958120487e+27, 4.5848600847778592e+27]\n\n for pos, B in enumerate(target_B_coefs):\n print(\"pos = \", pos)\n self.assertAlmostEqual(self.filter_under_test.B[pos], B, places=4)\n self.assertAlmostEqual(self.filter_under_test.A[pos],\n target_A_coefs[pos], places=4)",
"def __init__(self, b=1009, hashFunction=None, probeFunction=None):\r\n self.b = b\r\n self.bins = [None] * b\r\n self.deleted = [False] * b\r\n \r\n if hashFunction:\r\n self.hashFunction = hashFunction\r\n else:\r\n self.hashFunction = defaultHash\r\n \r\n if probeFunction:\r\n self.probeFunction = probeFunction\r\n else:\r\n self.probeFunction = lambda hk, size, i : (hk + 37) % size",
"def test_compute_cheb1_bs_filter(self):\n\n # Band-stop filter calculation\n parameters = {'passband_frequency': [1, 7],\n 'stopband_frequency': [2, 6],\n 'passband_attenuation': 1,\n 'stopband_attenuation': 80}\n self.filter_under_test.filter_class = 'chebyshev_1'\n self.filter_under_test.configure_filter(parameters)\n self.filter_under_test.compute_parameters(target='passband')\n self.assertEqual(self.filter_under_test.N, 14)\n self.assertAlmostEqual(self.filter_under_test.Wn[0],\n 10.771173962426296)\n self.assertAlmostEqual(self.filter_under_test.Wn[1],\n 43.982292294453401)\n\n self.filter_under_test.design(ripple=1)\n self.assertAlmostEqual(self.filter_under_test.B[0], 0.891250938133753)\n self.assertAlmostEqual(self.filter_under_test.B[2], 5911.10857094055)\n self.assertAlmostEqual(self.filter_under_test.B[4], 18202171.142328978)\n self.assertAlmostEqual(self.filter_under_test.B[6], 34492453326.155769)\n self.assertAlmostEqual(self.filter_under_test.B[8], 44936338221315.367)\n self.assertAlmostEqual(self.filter_under_test.B[10],\n 42576364561914056.0)\n self.assertAlmostEqual(self.filter_under_test.B[12],\n 3.0255249276953297e+19)\n self.assertAlmostEqual(self.filter_under_test.B[14],\n 1.6380742485482297e+22)\n self.assertAlmostEqual(self.filter_under_test.B[16],\n 6.7901995359355942e+24)\n self.assertAlmostEqual(self.filter_under_test.B[18],\n 2.1445302571983762e+27)\n self.assertAlmostEqual(self.filter_under_test.B[20],\n 5.079758701897002e+29)\n self.assertAlmostEqual(self.filter_under_test.B[22],\n 8.7508711592342312e+31)\n self.assertAlmostEqual(self.filter_under_test.B[24],\n 1.0364114418785841e+34)\n self.assertAlmostEqual(self.filter_under_test.B[26],\n 7.5537001784783249e+35)\n self.assertAlmostEqual(self.filter_under_test.B[28],\n 2.5560692027246954e+37)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
__init__(self) > digital_probe_mpsk_snr_est_c_sptr __init__(self, p) > digital_probe_mpsk_snr_est_c_sptr | def __init__(self, *args):
this = _digital_swig.new_digital_probe_mpsk_snr_est_c_sptr(*args)
try: self.this.append(this)
except: self.this = this | [
"def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_probe_density_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, name, smarts, score) -> None:\n ...",
"def __init__(self, *args, **kwargs):\n self.npumps = kwargs.pop('npumps', 1)\n self.nsetups = kwargs.pop('nsetups', 4)\n IPSerial.__init__(self, *args, **kwargs)",
"def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n _ida_pro.sval_pointer_swiginit(self, _ida_pro.new_sval_pointer(*args))",
"def __init__(self, pscu, sensor_idx):\n self.pscu = pscu\n self.sensor_idx = sensor_idx\n\n self.param_tree = ParameterTree({\n \"leak_impedance\": (self.get_leak_impedance, None),\n \"leak_volts\": (self.get_leak_volts, None),\n \"setpoint\": (self.get_set_point, None),\n \"setpoint_volts\": (self.get_set_point_volts, None),\n \"tripped\": (self.get_tripped, None),\n \"trace\": (self.get_trace, None),\n \"disabled\": (self.get_disabled, None),\n \"sensor_name\": (self.get_name, None),\n \"mode\": (self.get_mode, None),\n })",
"def type(self):\n return _digital_swig.digital_probe_mpsk_snr_est_c_sptr_type(self)",
"def __init__(self,n,k,d,es=1e-3,ee=1e-3):\n self.q = 4\n self.n = n\n self.k = k\n self.d = d \n self.t = int((d-1)/2)\n self.symbol_err_rate = es\n self.erasure_err_rate = ee\n self.result = mpfr(\"0\")\n self.has_result = False\n #print (n,k,d,es,ee)",
"def __init__(self, spi_rack, module, frequency=100e6):\n #def __init__(self, module, frequency=100e6):\n self.spi_rack = spi_rack\n self.module = module\n\n self.rf_frequency = frequency\n self.stepsize = 1e6\n self.ref_frequency = 10e6\n self.use_external = 0\n self.outputPower = None\n\n # These are the 6 registers present in the ADF4351\n self.registers = 6*[0]\n # In REG3: set ABP=1 (3 ns, INT-N) and CHARGE CANCEL=1\n self.registers[3] = (1<<22) | (1<<21) | 3\n # In REG5: set LD PIN MODE to 1 -> digital lock detect\n self.registers[5] = (1<<22) | (3<<19) | 5\n\n self.set_frequency(frequency)",
"def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, mem, inp, outp):\n self.pc = 0\n self.mem = mem\n self.inp = inp\n self.outp = outp",
"def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, sim, lamda, mu):\n self.lamda = lamda\n self.mu = mu\n super(PoissonGenerator, self).__init__(sim=sim)",
"def __init__(self, atoms, contraints=None, label=\"SpikeSourcePoisson\",\n rate = 1, start = 0, duration = 10000, seed=None):\n super( SpikeSourcePoisson, self ).__init__(\n n_neurons = atoms,\n constraints = contraints,\n label = label\n )\n \n self.rate = rate\n self.start = start\n self.duration = duration\n self.seed = seed",
"def __init__(self,address,\n peak_one_position_min,\n peak_one_position_max,\n peak_two_position_min,\n peak_two_position_max,\n peak_one_width,\n peak_two_width,\n peak_ratio = 0.4,\n normalized_peak_to_noise_ratio = 0.4,\n spectrometer_dark_path = None,\n iron_edge_position = None):\n #self.logger = logging.getLogger(self.__class__.__name__)\n #self.logger.setLevel(logging.INFO)\n #self.logging = logging\n\n self.src = Source('%s'%address)\n if spectrometer_dark_path is not None:\n self.dark = easy_pickle.load(spectrometer_dark_path)\n else:\n self.dark = None\n self.peak_one_position_min = int(peak_one_position_min)\n self.peak_one_position_max = int(peak_one_position_max)\n self.peak_two_position_min = int(peak_two_position_min)\n self.peak_two_position_max = int(peak_two_position_max)\n self.peak_one_width = int(peak_one_width)\n self.peak_two_width = int(peak_two_width)\n self.normalized_peak_to_noise_ratio = float(normalized_peak_to_noise_ratio)\n self.peak_ratio = float(peak_ratio)\n if iron_edge_position is not None:\n self.iron_edge_position = int(iron_edge_position)\n else:\n self.iron_edge_position = None\n\n self.ntwo_color = 0\n self.nnodata = 0\n self.nshots = 0\n self.naccepted= 0",
"def __init__(self, coeff):\n self.coeff = coeff",
"def __shared_initialize__(self, **kwargs):",
"def __init__(self, address=DEFAULT_VISA_ADDRESS, **kwargs):\n super(PNA, self).__init__(address, **kwargs)\n self.resource.timeout = kwargs.get(\"timeout\", 2000)\n self.scpi = keysight_pna_scpi.SCPI(self.resource)\n # self.use_binary()\n self.use_ascii()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
type(self) > snr_est_type_t Return the type of estimator in use. | def type(self):
return _digital_swig.digital_probe_mpsk_snr_est_c_sptr_type(self) | [
"def is_estimator(model):\n if type(model) == type:\n return issubclass(model, BaseEstimator)\n\n return isinstance(model, BaseEstimator)",
"def test_sklearn_check_estimator(seco_estimator_class):\n check_estimator(seco_estimator_class)",
"def _get_estimator_name(estimator):\n if isinstance(estimator, type):\n # this is class\n return estimator.__name__\n else:\n # this an instance\n return estimator.__class__.__name__",
"def is_classifier(estimator):\n return isinstance(estimator, BaseClassifier)",
"def get_estimator(self, params=None):\r\n params = dct(params)\r\n if self.T.goal.startswith(\"class\"):\r\n return LinearSVC(\r\n random_state=params.pop(\"random_state\", self.T.random_state),\r\n **params,\r\n )\r\n else:\r\n return LinearSVR(\r\n random_state=params.pop(\"random_state\", self.T.random_state),\r\n **params,\r\n )",
"def getEvaluatorTypeForProfile(profilename: 'SbName') -> \"SoType\":\n return _coin.ScXML_getEvaluatorTypeForProfile(profilename)",
"def get_type(self):\n return FeatureType.VALUE\n # pretty sure its never interpreter type\n # TODO: think about that",
"def TestType(self):\r\n\t\treturn self._get_attribute('testType')",
"def get_estimator(res_df, test_type, mode='mean_cv'):\n if mode == 'mean_cv':\n # choose best test score out of top 20 best validation scores\n best_res = res_df[res_df.test_type == '[' + str(test_type) + ']'].sort_values(['mean_test_score'], ascending=False).head(1)\n # best_res = best_res.sort_values(['best_estimator_test_score'], ascending=False).head(1)\n\n best_estimator = svm.SVC(C=best_res['param_C'].values.tolist()[0], kernel='linear')\n\n return best_res, best_estimator\n\n elif mode == 'all_splits':\n results = []\n estimators = []\n\n for split in range(4):\n # choose best test score out of top 20 best validation scores\n best_res = res_df[res_df.test_type == '[' + str(test_type) + ']']\\\n .sort_values(['split' + str(split) + '_test_score'], ascending=False).head(1)\n\n results.append(best_res)\n estimators.append(svm.SVC(C=best_res['param_C'].values.tolist()[0], kernel='linear'))\n\n return results, estimators\n\n else:\n raise Exception('Unknown mode.')",
"def _check_est(est):\n\n # Check estimator exist and return the correct function\n estimators = {\n 'cov': np.cov,\n 'scm': _scm,\n 'lwf': _lwf,\n 'oas': _oas,\n 'mcd': _mcd,\n 'corr': np.corrcoef\n }\n\n if callable(est):\n # All good (cross your fingers)\n pass\n elif est in estimators.keys():\n # Map the corresponding estimator\n est = estimators[est]\n else:\n # raise an error\n raise ValueError(\n \"\"\"%s is not an valid estimator ! Valid estimators are : %s or a\n callable function\"\"\" % (est, (' , ').join(estimators.keys())))\n return est",
"def test_sklearn_compatible_estimator(estimator, check):\n check(estimator)",
"def test_parametrized_classifiers_type():\n pf = _ParametrizedClassifiers(CLASSIFIERS)\n assert pf._estimator_type == 'classifier'",
"def is_regressor(estimator):\n from yellowbrick.base import Visualizer\n if isinstance(estimator, Visualizer):\n return is_regressor(estimator.estimator)\n return getattr(estimator, \"_estimator_type\", None) == \"regressor\"",
"def test_parametrized_regressors_type():\n pr = _ParametrizedRegressors(REGRESSORS)\n assert pr._estimator_type == 'regressor'",
"def final_estimator(self) -> T_FinalEstimatorDF:\n pass",
"def ScXML_getEvaluatorTypeForProfile(profilename: 'SbName') -> \"SoType\":\n return _coin.ScXML_getEvaluatorTypeForProfile(profilename)",
"def get_type_of_sim(self):\n return self.get_abstract_item(\"Initial Bulletin\", \"Type\")",
"def _validate_estimator(self):\n super()._validate_estimator()",
"def get_matcher_type(self):\n return self._type"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
msg_nsample(self) > int Return how many samples between SNR messages. | def msg_nsample(self):
return _digital_swig.digital_probe_mpsk_snr_est_c_sptr_msg_nsample(self) | [
"def n_samples(self):\n return len(self.sampler)",
"def n_profile_samples(self):\n return self.__n_profile_samples",
"def getNumSamples(sound):\n return getLength(sound)",
"def packet_get_samples_per_frame(cls, data: bytes) -> int:\n return _lib.opus_packet_get_samples_per_frame(data, cls.SAMPLING_RATE)",
"def sample_size(self):\n\t\treturn _get_sample_size(self._device)",
"def n_samples(self):\n if self.isempty:\n return 0\n return utils.PrettyInt(len(self._abscissa_vals))",
"def num_samples(self):\r\n return self.snapshots[0].num_samples",
"def getNumSamples(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return SliceSamplerBase.getNumSamples(self)",
"def get_num_samples(self, split_name):",
"def count_samples(\n self,\n samples: List,\n ) -> int:\n num_samples = len(samples)\n with utils.format_text(\"yellow\", [\"underline\"]) as fmt:\n self.log.info(fmt(f\"number of data: {num_samples}\"))\n\n return num_samples",
"def snr_value(self):\n return _raw_util.raw_message_snr_value(self)",
"def time_to_num_samples(time):\r\n num_samples = time * TIME_SAMPLES_RATIO\r\n return num_samples",
"def sample_checkCOUNT(self):\n self.open.write('SAMPLE:COUNT?')\n reply = self.open.read() \n return('Sample Count: ' + str(reply))",
"def max_samples_per_send(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_samples_per_send\")",
"def num_sequences_sampled(self) -> int:\n return self._num_sequences_sampled",
"def get_snr_values(self):\n return _raw_util.raw_message_get_snr_values(self)",
"def sample_rangeCOUNT(self):\n self.open.write('SAMPLE:COUNT? MIN')\n replymin = self.open.read() \n self.open.write('SAMPLE:COUNT? MAX')\n replymax = self.open.read() \n return('Sample Count Range: ' + str(replymin) + ',' + str(replymax))",
"def psnr_calc(noisy, real):\n numpix = noisy.size(1)*noisy.size(2)*noisy.size(3)\n bs = noisy.size(0)\n avg_sq_norm = (1/numpix)*torch.norm(0.5*(noisy.view(bs, -1)- real.view(bs,-1)), dim = 1)**2#multiplication by 0.5 because vals between [-1,1]\n psnrs = -10*torch.log10(avg_sq_norm)\n return psnrs, torch.tensor([torch.mean(psnrs), torch.std(psnrs)])",
"def samples2seconds(nsamples, fs=44100.):\n return nsamples / float(fs)",
"def stream_count_samples(stream, **kwargs):\n n_samples = 0\n for trace in stream:\n n_samples += trace.stats.npts\n return n_samples"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set_type(self, snr_est_type_t t) Set type of estimator to use. | def set_type(self, *args, **kwargs):
return _digital_swig.digital_probe_mpsk_snr_est_c_sptr_set_type(self, *args, **kwargs) | [
"def setType(self, ttype):\n if ttype == LINEAR_IMPLICIT:\n self.type = ttype\n elif ttype == NONLINEAR:\n self.type = ttype\n else:\n raise DREAMException(\"Solver: Unrecognized solver type: {}.\".format(ttype))",
"def set_type(self, ttype):\n self.type = ttype\n self.token.type = ttype",
"def set_element_type(self, type):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.object.type\", self._object._eco_id, type)\r\n p2e._app.Exec(arg_str)",
"def set_type(self, t):\n if t.lower() in [\"q\", \"r\", \"n\", \"b\", \"p\", \"k\"]:\n self.type = t\n else:\n raise ValueError(\"Couldn't set new type for piece\")",
"def transformer_type(self, transformer_type):\n\n self._transformer_type = transformer_type",
"def set_testtype(self, name):\n self.testID['TESTTYPE'] = name",
"def set_sensor_type(self, context, type):\n editor = self._parent\n obj = editor.getSelected()[0]\n fsm, sensor = self._get_fsm_sensor()\n sensor.type = type\n sensor.name = type",
"def set_type(self, vertex, t):\n raise NotImplementedError(\"Not implemented on backend\" + type(self).backend)",
"def set_edge_type(self, e, t):\n raise NotImplementedError(\"Not implemented on backend \" + type(self).backend)",
"def experiment_type(self, experiment_type):\n\n self._experiment_type = experiment_type",
"def setDriver_type(self, driver_type):\n\n if driver_type not in self._implemented_types:\n raise RuntimeError(\"unimplemented driver class: %s\" % driver_type)\n\n self._driver_type = driver_type",
"def set_task_type(self, task_type):\n self._task_type = task_type",
"def set_type_of_sim(self, sim_type=\"Perigee/Apogee\"):\n self.set_abstract_item(\"Initial Bulletin\", \"Type\", sim_type)",
"def set_type(self, type):\n return _raw_util.raw_message_sptr_set_type(self, type)",
"def set_line_type(self, line, type):\n self._set_line_type(line, type)",
"def set_type(self, type):\n return _raw_util.raw_message_set_type(self, type)",
"def set_type(self, atype):\n _ldns.ldns_rdf_set_type(self, atype)\n #parameters: ldns_rdf *, ldns_rdf_type,\n #retvals: ",
"def set_actuator_type(self, context, type):\n editor = self._parent\n obj = editor.getSelected()[0]\n fsm, sensor = self._get_fsm_sensor()\n actuator = sensor.actuators[fsm.selected_actuator]\n actuator.type = type\n actuator.name = type\n self._initialize_actuator(obj, actuator)",
"def set_type(self, rr_type):\n _ldns.ldns_rr_set_type(self, rr_type)\n #parameters: ldns_rr *, ldns_rr_type,\n #retvals:"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
set_msg_nsample(self, int n) Set the number of samples between SNR messages. | def set_msg_nsample(self, *args, **kwargs):
return _digital_swig.digital_probe_mpsk_snr_est_c_sptr_set_msg_nsample(self, *args, **kwargs) | [
"def set_sample_number(self):\r\n self.n_samples = self.exprs.shape[0]",
"def set_n_rejection_samples(self, rejection_samples=200):\n if rejection_samples < 0:\n raise ValueError('Must have non-negative rejection samples.')\n self._n_rejection_samples = rejection_samples",
"def resample(self, n):\n if n==len(self.times):\n return\n self.times = np.linspace(self.times[0], self.times[-1], n)",
"def set_snr(self, snr):\n return _raw_util.raw_message_set_snr(self, snr)",
"def truncate_samples(self, n_samples):\n if not self.active_region_is_default:\n raise AudioSignalException('Cannot truncate while active region is not set as default!')\n\n n_samples = int(n_samples)\n if n_samples > self.signal_length:\n n_samples = self.signal_length\n\n self.audio_data = self.audio_data[:, 0: n_samples]",
"def set_snr(self, snr):\n return _raw_util.raw_message_sptr_set_snr(self, snr)",
"def set_sample(self, data, nid):\n self.sample = Sample(self, data, nid)",
"def run(self, nsamples):\n self.nsamples = nsamples\n dimension = self.nodes.shape[1]\n if dimension > 1:\n sample = np.zeros([self.nsamples, dimension])\n for i in range(self.nsamples):\n r = np.zeros([dimension])\n ad = np.zeros(shape=(dimension, len(self.nodes)))\n for j in range(dimension):\n b_ = list()\n for k in range(1, len(self.nodes)):\n ai = self.nodes[k, j] - self.nodes[k - 1, j]\n b_.append(ai)\n ad[j] = np.hstack((self.nodes[0, j], b_))\n r[j] = stats.uniform.rvs(loc=0, scale=1, random_state=self.random_state) ** (1 / (dimension - j))\n d = np.cumprod(r)\n r_ = np.hstack((1, d))\n sample[i, :] = np.dot(ad, r_)\n else:\n a = min(self.nodes)\n b = max(self.nodes)\n sample = a + (b - a) * stats.uniform.rvs(size=[self.nsamples, dimension], random_state=self.random_state)\n return sample",
"def setNumFrames(self, nframes) -> None:\n ...",
"def set_snr_list(self, snr_list):\n return _raw_util.raw_message_set_snr_list(self, snr_list)",
"def _sample_n(self, n, seed=None):\n return self._inverse_scale.solvevec(\n samplers.normal(\n shape=ps.concat([[n], ps.shape(self._loc)], axis=0), seed=seed),\n adjoint=True)",
"def avg_snr(self, avg_snr):\n\n self._avg_snr = avg_snr",
"def set_n_files(self, n_files):\n self._n_files = n_files",
"def set_snr_list(self, snr_list):\n return _raw_util.raw_message_sptr_set_snr_list(self, snr_list)",
"def sample(self, n=100, njobs=4, progressbar=True, start=None, trace=None):\n start = self.start if start is None else start\n with self.model:\n logging.info(\n 'drawing %d MCMC samples using %d jobs' % (n, njobs))\n self.step = pm.NUTS(scaling=start)\n self.trace = pm.sample(n, self.step, start=start, njobs=njobs,\n progressbar=progressbar, trace=trace)",
"def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricISS2ISS2_SetNumberOfSpatialSamples(self, num)",
"def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricISS3ISS3_SetNumberOfSpatialSamples(self, num)",
"def truncate_seconds(self, n_seconds):\n n_samples = int(n_seconds * self.sample_rate)\n self.truncate_samples(n_samples)",
"def resample_counts(self, output, Nb, regression, **kwargs):\n ## iterate through 'Nb' bootstrap runs whereas each one samples the counts 'M' times\n l_stat_distr = []\n for nb in range(Nb):\n l_smpl = []\n # iterate through box sizes\n for n in range(self.N):\n tmp_counts = np.copy(self.counts[n])\n K = tmp_counts.shape[0]\n # number of resamples\n M = K**2\n # get a fresh seed\n np.random.seed(nb*n)\n # bootstrap (drawing with replacement from box counts)\n tmp_smpl = np.random.choice(tmp_counts.ravel(), M).reshape((K, K))\n l_smpl.append(tmp_smpl)\n # make sure that lacunarity computation runs on the resampled counts\n self.counts = l_smpl \n ## For which statistic should the distribution be generated?\n if output == 'boxdim':\n tmp_stat = self.box_dimension(regression, verb=kwargs.get(\"verb\"))\n elif output == 'lac':\n tmp_stat = self.lacunarity(regression, normalized=kwargs.get(\"normalized\"), \n verb=kwargs.get(\"verb\"), regr_param=kwargs.get(\"regr_param\"))\n # distribution of values:\n l_stat_distr.append(tmp_stat)\n # for next run, the counts must be the real counts again\n self.counts = np.copy(self._counts)\n return l_stat_distr",
"def register_every_n_samples_event(self, func, \n samples = 1,\n options = 0,\n cb_data = None\n ):\n event_type_map = dict(input=DAQmx_Val_Acquired_Into_Buffer, \n output=DAQmx_Val_Transferred_From_Buffer)\n event_type = event_type_map[self.channel_io_type]\n\n if options=='sync':\n options = DAQmx_Val_SynchronousEventCallbacks\n\n if func is None:\n c_func = None # to unregister func\n else:\n if self._register_every_n_samples_event_cache is not None:\n # unregister:\n self.register_every_n_samples_event(None, samples=samples, options=options, cb_data=cb_data)\n # TODO: check the validity of func signature\n # TODO: use wrapper function that converts cb_data argument to given Python object\n c_func = EveryNSamplesEventCallback_map[self.channel_type](func)\n \n self._register_every_n_samples_event_cache = c_func\n\n return CALL('RegisterEveryNSamplesEvent', self, event_type, uInt32(samples), uInt32 (options), c_func, cb_data)==0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
probe_mpsk_snr_est_c(snr_est_type_t type, int msg_nsamples = 10000, double alpha = 0.001) > digital_probe_mpsk_snr_est_c_sptr A probe for computing SNR of a signal. This is a probe block (a sink) that can be used to monitor and retrieve estimations of the signal SNR. This probe is designed for use with MPSK signals especially. The type of estimator is specified as the parameter in the constructor. The estimators tend to trade off performance for accuracy, although experimentation should be done to figure out the right approach for a given implementation. Further, the current set of estimators are designed and proven theoretically under AWGN conditions; some amount of error should be assumed and/or estimated for real channel conditions. Factory function returning shared pointer of this class | def probe_mpsk_snr_est_c(*args, **kwargs):
return _digital_swig.probe_mpsk_snr_est_c(*args, **kwargs) | [
"def type(self):\n return _digital_swig.digital_probe_mpsk_snr_est_c_sptr_type(self)",
"def set_type(self, *args, **kwargs):\n return _digital_swig.digital_probe_mpsk_snr_est_c_sptr_set_type(self, *args, **kwargs)",
"def msr (riskfree_rate,er,cov):\r\n n=er.shape[0]\r\n init_guess=np.repeat(1/n,n)\r\n bounds=((0.0,1.0),)*n \r\n def neg_sharpe_ratio(weights,riskfree_rate,er,cov):\r\n \"\"\"\r\n Returns the negative of Sharpe Ratio, given weights\r\n \"\"\"\r\n r=portfolio_return(weights,er)\r\n vol=portfolio_vol(weights,cov)\r\n return -(r-riskfree_rate)/vol\r\n \r\n weights_sum_to_1={'type':'eq','fun':lambda weights:np.sum(weights)-1}\r\n results=minimize(neg_sharpe_ratio,init_guess,args=(riskfree_rate,er,cov,),method='SLSQP',options={'disp':False},constraints=(weights_sum_to_1),bounds=bounds)\r\n return results.x",
"def psnr(noisy: np.ndarray, clean: np.ndarray, dynamic: float=1.0) -> float:\n assert noisy.shape == clean.shape, \"Shape mismatch when computing PSNR.\"\n peak = dynamic * dynamic\n return 10 * math.log10(peak / mse(noisy, clean))",
"def msr(riskfree_rate, er, cov):\n n = er.shape[0]\n initial_weights = np.repeat(1/n, n) # Equally distr. weights\n bounds = ((0.0, 1.0),)*n # n bounds of (0,1) tuples\n constraint_weight_sum_is_one = {\n 'type': 'eq',\n 'fun': lambda weights: np.sum(weights) - 1\n }\n\n def neg_sharpe_ratio(weights, riskfree_rate, er, cov):\n \"\"\"\n Returns the inverse of the Sharpe ratio given:\n * weights: allocation of the assets\n \"\"\"\n r = portfolio_return(weights, er)\n v = portfolio_vol(weights, cov)\n return -(r - riskfree_rate)/v\n\n results = minimize(neg_sharpe_ratio, initial_weights, args=(riskfree_rate, er, cov,), method=\"SLSQP\", options={\n 'disp': False}, constraints=(constraint_weight_sum_is_one), bounds=bounds)\n return results.x",
"def psnr(mse):\n return -10.0 * mse.log10()",
"def psnr(gt_im, interpolated_im):\r\n e = torch.abs(gt_im - interpolated_im) ** 2\r\n mse = torch.sum(e) / e.numel()\r\n psnr_err = 10 * torch.log10(torch.tensor(255) * torch.tensor(255) / mse)\r\n\r\n return psnr_err.item()",
"def compute_PSNR(out, lbl):\n out = out[0, :, :, 0]\n lbl = lbl[0, :, :, 0]\n diff = out - lbl\n rmse = np.sqrt(np.mean(diff**2))\n psnr = 20*np.log10(255/rmse)\n return psnr",
"def spectralSNR(partarray, apix=1.0):\n\tt0 = time.time()\n\t### initialization\n\tpart0 = partarray[0]\n\tif isinstance(partarray, list):\n\t\tnumimg = len(partarray)\n\telse:\n\t\tnumimg = partarray.shape[0]\n\tif numimg < 2:\n\t\tapDisplay.printWarning(\"Cannot calculate the SSNR for less than 2 images\")\n\t\treturn 0.0\n\tfor partimg in partarray:\n\t\tif part0.shape != partimg.shape:\n\t\t\tapDisplay.printError(\"Cannot calculate the SSNR for images of different sizes\")\n\t\tif len(partimg.shape) != 2:\n\t\t\tapDisplay.printError(\"Cannot calculate the SSNR non-2D images\")\n\n\t### get fft\n\tfftlist = []\n\tfor partimg in partarray:\n\t\tfftim = real_fft2d(partimg)\n\t\tfftlist.append(fftim)\n\n\t### dimension init\n\tfftim0 = real_fft2d(partarray[0])\n\tfftshape = numpy.asarray(fftim0.shape, dtype=numpy.float32)\n\tfftcenter = fftshape/2.0\n\tlength = int(max(fftshape)/2.0)\n\tlinear = numpy.zeros((length), dtype=numpy.float32)\n\tlinear[0] = 1.0\n\n\t### figure out which pixels go with which ring\n\tringdict = getLinearIndices2d(fftshape)\n\n\t### for each ring calculate the FRC\n\tkeys = ringdict.keys()\n\tkeys.sort()\n\tfor key in keys:\n\t\tsys.stderr.write(\".\")\n\t\tindexlist = ringdict[key]\n\t\tnumer = 0.0\n\t\tdenom = 0.0\n\t\tfor indextuple in indexlist:\n\t\t\tn1, d1 = mini_ssnr1fft(fftlist, indextuple)\n\t\t\t#n1, d1 = mini_ssnr1(partarray, indextuple)\n\t\t\t#n2, d2 = mini_ssnr2(partarray, indextuple)\n\t\t\t#if indextuple[0] == 5 and indextuple[1] == 5:\n\t\t\t#print \"%d,%d (%.3f / %.3f) vs (%.3f / %.3f) %.3f\"%(indextuple[0], indextuple[1], n1, d1, n2, d2, n1/d1)\n\t\t\t#return\n\t\t\tnumer += n1\n\t\t\tdenom += d1\n\t\tK = len(indexlist)\n\t\tssnr = numer / ( K/(K-1.0) * denom ) - 1.0\n\t\tfrc = ssnr / (ssnr + 1)\n\t\t#if key >= 3 and key <= 5:\n\t\t#\tprint \"======================\"\n\t\t#\tprint \"numerring=\", key, numer\n\t\t#\tprint \"denomring=\", key, denom\n\t\t#\tprint \"ssnr=\", key, ssnr\n\t\t#print \"%02d %.3f %.3f (%.3f / %.3f)\"%(key, ssnr, frc, numer/K, denom/K)\n\t\t#print key, frc\n\t\tlinear[key] = frc\n\tsys.stderr.write(\"\\n\")\n\n\t### output\n\twriteFrcPlot(\"ssnr.dat\", linear, apix)\n\tres = getResolution(linear, apix, boxsize=linear.shape[0]*2)\n\tapDisplay.printMsg(\"Finished SSNR of res %.3f Angstroms in %s\"%(res, apDisplay.timeString(time.time()-t0)))\n\treturn res",
"def psnr_calc(noisy, real):\n numpix = noisy.size(1)*noisy.size(2)*noisy.size(3)\n bs = noisy.size(0)\n avg_sq_norm = (1/numpix)*torch.norm(0.5*(noisy.view(bs, -1)- real.view(bs,-1)), dim = 1)**2#multiplication by 0.5 because vals between [-1,1]\n psnrs = -10*torch.log10(avg_sq_norm)\n return psnrs, torch.tensor([torch.mean(psnrs), torch.std(psnrs)])",
"def estimate_snr(images):\n\n if len(images.shape) == 2: # in case of a single projection\n images = images[:, :, None]\n\n p = images.shape[1]\n n = images.shape[2]\n\n radius_of_mask = p // 2 - 1\n\n points_inside_circle = disc(p, r=radius_of_mask, inner=True)\n num_signal_points = np.count_nonzero(points_inside_circle)\n num_noise_points = p * p - num_signal_points\n\n noise = np.sum(np.var(images[~points_inside_circle], axis=0)) * num_noise_points / (num_noise_points * n - 1)\n\n signal = np.sum(np.var(images[points_inside_circle], axis=0)) * num_signal_points / (num_signal_points * n - 1)\n\n signal -= noise\n\n snr = signal / noise\n\n return snr, signal, noise",
"def calculatesReceiverSNR(self, SNR=None):\n pass",
"def snr_value(self):\n return _raw_util.raw_message_sptr_snr_value(self)",
"def set_snr(self, snr):\n return _raw_util.raw_message_sptr_set_snr(self, snr)",
"def rmse_and_cramer_rao(SNR_range, N_samples_range, iteration, A, angles, locations, K, method_code, return_name):\n \n import numpy as np\n from stochastic_cramer_rao import cramer_rao\n \n N_samples_zero = N_samples_range[0]\n SNR_zero = SNR_range[0]\n \n if SNR_range[1] == SNR_range[0]+1:\n snr_dB = SNR_range[1]\n if return_name == \"rmse\":\n MSE = np.zeros(N_samples_range[1]-N_samples_range[0])\n elif return_name == \"cramer\":\n cramer = np.zeros(N_samples_range[1]-N_samples_range[0])\n\n elif N_samples_range[1] == N_samples_range[0]+1:\n N_samples = N_samples_range[1]\n if return_name == \"rmse\":\n MSE = np.zeros(SNR_range[1]-SNR_range[0])\n elif return_name == \"cramer\":\n cramer = np.zeros(SNR_range[1]-SNR_range[0])\n \n for snr_dB in range(SNR_range[0],SNR_range[1]):\n \n for N_samples in range(N_samples_range[0], N_samples_range[1]):\n \n for i in range(500):\n\n # Signal(A*s) to noise(n) ratio\n received_snr = 10**(snr_dB/10)\n ratio_As_to_s = 1/4\n snr = received_snr*ratio_As_to_s\n #snr = received_snr\n \n # Source signal implementation (shape: (3,500))\n signal = np.random.normal(0,np.sqrt(snr),(3,N_samples))\n #w = np.atleast_2d([np.pi/3, np.pi/4, np.pi/5]).T\n #signal = (np.sqrt(snr))*np.exp(1j*w*(np.atleast_2d(np.arange(1,N_samples+1))))\n\n # Received signal power on sensors\n signal_power = sum(sum(np.abs(A.dot(signal))**2))/(12*N_samples)\n\n # Noise signal implementation (shape: (12,500))\n noise = np.random.normal(0,np.sqrt(0.5),(12,N_samples)) + 1j*np.random.normal(0,np.sqrt(0.5),(12,N_samples))\n noise_power = sum(sum(np.abs(noise)**2))/(12*N_samples)\n #if i == 0:\n # print()\n # print(\"SIGNAL POWER\")\n # print(signal_power)\n # print(\"NOISE POWER\")\n # print(noise_power)\n # print(\"SIGNAL TO NOISE RATIO\")\n # print(signal_power/noise_power)\n\n # Received signal (shape: (12,500))\n z = A.dot(signal) + noise\n\n # Sample covariance matrix\n R_sample = z.dot(z.conj().T)/N_samples\n\n # Eigenvalue and eigenvectors\n w_sample, v_sample = np.linalg.eig(R_sample)\n \n #if i == 0 and snr_dB == -20:\n # print()\n # print(\"EIGENVALUES OF SAMPLE COVARIANCE MATRIX\")\n # print(w_sample[0])\n # print(w_sample[1])\n # print(w_sample[2])\n # print(w_sample[3])\n\n # Sensor Selection Matrix (shape: (12,6))\n T = np.array([[1,0,0,0,0,0],\n [1,0,0,0,0,0],\n [0,1,0,0,0,0],\n [0,1,0,0,0,0],\n [0,0,1,0,0,0],\n [0,0,1,0,0,0],\n [0,0,0,1,0,0],\n [0,0,0,1,0,0],\n [0,0,0,0,1,0],\n [0,0,0,0,1,0],\n [0,0,0,0,0,1],\n [0,0,0,0,0,1]])\n\n # Push-Sum Matrix (shape: (6,6))\n P_push = np.array([[0.2,0.2,0.2,0 ,0 ,0],\n [0.2,0.2,0.2,0 ,0 ,0],\n [0.6,0.6,0.2,0.2,0 ,0],\n [0 ,0 ,0.4,0.2,0.2,0.2],\n [0 ,0 ,0 ,0.2,0.2,0.2],\n [0 ,0 ,0 ,0.4,0.6,0.6]])\n\n # Average-Consensus Matrix (shape: (6,6))\n P_ave = np.array([[0.17,0.5,0.33,0 ,0 ,0],\n [0.5,0.17,0.33,0 ,0 ,0],\n [0.33,0.33,0.01,0.33,0 ,0],\n [0 ,0 ,0.33,0.01,0.33,0.33],\n [0 ,0 ,0 ,0.33,0.17,0.5],\n [0 ,0 ,0 ,0.33,0.5,0.17]])\n\n # Weight Vector (shape: (6,1))\n w = np.atleast_2d([1,1,1,1,1,1]).T\n \n if method_code == 1:\n\n # Average Consensus Covariance Matrix Estimation \n R_ave_con = K * np.multiply(T.dot(np.linalg.matrix_power(P_ave,iteration)).dot(T.T), R_sample)\n R = R_ave_con\n \n if method_code == 2:\n # Push-Sum Covariance Matrix Estimation\n R_push_numerator = np.multiply(T.dot(np.linalg.matrix_power(P_push,iteration)).dot(T.T), R_sample)\n R_push_denominator = T.dot(np.linalg.matrix_power(P_push,iteration)).dot(w).dot(np.ones((1,6))).dot(T.T)\n\n # Push Sum Covariance Matrix (shape: (12,12))\n R_push = K*np.multiply(R_push_numerator, (1/(R_push_denominator)))\n R = R_push\n\n if method_code == 3:\n # Conventional ESPRIT Algorithm \n R = R_sample \n\n w_push, v_push = np.linalg.eig(R)\n\n # Upper group selection matrix J_up\n J_up = np.kron(np.eye(6),np.array([1,0]))\n\n # Lower group selection matrix J_down\n J_down = np.kron(np.eye(6),np.array([0,1]))\n\n # Push-Sum estimated signal eigenvector matrices\n U_s_push = v_push[:,:3]\n\n # Upper signal eigenvectors\n U_s_up = J_up.dot(U_s_push)\n\n # Lower signal eigenvectors\n U_s_down = J_down.dot(U_s_push)\n\n # Matrix including knowledge about DOAs of the source signals\n psi = np.linalg.inv((U_s_up.conj().T).dot(U_s_up)).dot((U_s_up.conj().T)).dot(U_s_down)\n\n w2, v2 = np.linalg.eig(psi)\n doa = []\n doa.append(np.arcsin(np.angle(w2[0])/np.pi)*360/(2*np.pi))\n doa.append(np.arcsin(np.angle(w2[1])/np.pi)*360/(2*np.pi))\n doa.append(np.arcsin(np.angle(w2[2])/np.pi)*360/(2*np.pi))\n\n #if i == 0:\n # print()\n # print(\" DOAs of the source signals in degrees with SNR: \" + str(snr_dB) )\n # print(\" DOAs of the source signals in degrees with N_samples: \" + str(N_samples) )\n # print(\"****************************************************************\")\n # print(\"****************************************************************\")\n # print(\"DOA of the first source signal: \" + str(doa[0]))\n # print(\"DOA of the second source signal: \" + str(doa[1]))\n # print(\"DOA of the third source signal: \" + str(doa[2]))\n \n diff_1 = min(abs(doa[0]-(angles*360/(2*np.pi))))\n diff_2 = min(abs(doa[1]-(angles*360/(2*np.pi))))\n diff_3 = min(abs(doa[2]-(angles*360/(2*np.pi))))\n \n if SNR_range[1] == SNR_range[0] + 1:\n if return_name == \"rmse\":\n MSE[N_samples - N_samples_zero] = MSE[N_samples - N_samples_zero]+1/3*1/500*((diff_1)**2+(diff_2)**2+(diff_3)**2)\n if i == 499: \n print(\"RMSE\")\n print(np.sqrt(MSE[N_samples - N_samples_zero]))\n elif return_name == \"cramer\":\n cramer[N_samples - N_samples_zero] = cramer[N_samples - N_samples_zero]+(1/500)*np.sqrt(cramer_rao(A, signal, angles, locations))*360/(2*np.pi)\n if i == 499: \n print(\"Cramer Rao Bound\")\n print(np.sqrt(cramer[N_samples - N_samples_zero]))\n\n elif N_samples_range[1] == N_samples_range[0] + 1:\n if return_name == \"rmse\":\n MSE[snr_dB - SNR_zero] = MSE[snr_dB - SNR_zero]+1/3*1/500*((diff_1)**2+(diff_2)**2+(diff_3)**2)\n if i == 499:\n print(\"RMSE\")\n print(np.sqrt(MSE[snr_dB - SNR_zero]))\n elif return_name == \"cramer\":\n cramer[snr_dB - SNR_zero] = cramer[snr_dB - SNR_zero]+(1/500)*np.sqrt(cramer_rao(A, signal, angles, locations))*360/(2*np.pi)\n if i == 499:\n print(\"Cramer Rao Bound\")\n print(np.sqrt(cramer[snr_dB - SNR_zero]))\n \n if return_name == \"rmse\":\n return np.sqrt(MSE)\n elif return_name == \"cramer\":\n return cramer",
"def msr(rf_rate, er, cov):\n n = er.shape[0]\n initial_weights = np.repeat(1/n, n)\n bounds = ((0.0, 1.0),) * n\n weights_sum_to_1 = {\n 'type': 'eq',\n 'fun': lambda w: w.sum() - 1 \n }\n def neg_sharpe_ratio(w):\n ret = portfolio_return(w, er)\n vol = portfolio_vol(w, cov)\n return -(ret - rf_rate) / vol\n results = minimize(neg_sharpe_ratio, initial_weights, method='SLSQP', \n options={'disp': False}, constraints=(weights_sum_to_1), \n bounds=bounds)\n return results.x",
"def _set_snr(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"snr\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"snr must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"snr\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__snr = t\n if hasattr(self, '_set'):\n self._set()",
"def get_salsa_kernel_from_params(kernel_type, add_order, kernel_scale, bandwidths,\n problem_dim):\n if kernel_type == 'se':\n return ESPKernelSE(problem_dim, kernel_scale, add_order, bandwidths)\n elif kernel_type.startswith('matern'):\n nu = float(kernel_type[-3:])\n nu_vals = [nu] * problem_dim\n return ESPKernelMatern(problem_dim, nu_vals, kernel_scale, add_order, bandwidths)\n else:\n raise ValueError('Unknown kernel type %s.'%(kernel_type))",
"def SNR(st, stations, components, mv=None, T=None):\n import operator\n ns = len(stations)\n nc = len(components)\n #==============================\n # == SORT BY SNR ==\n SNR_c = np.zeros((ns,nc), dtype=np.float32)\n SNR = np.zeros(ns, dtype=np.float32)\n SNR_dic = {}\n for s in range(ns):\n for c in range(nc):\n if mv is None:\n data = st.select(station=stations[s])[c].data\n else:\n id1 = max(0, T + mv[s] - np.int32(autodet.cfg.template_len/2 * autodet.cfg.sampling_rate))\n id2 = min(st.select(station=stations[s])[c].data.size, T + mv[s] + np.int32(autodet.cfg.template_len/2 * autodet.cfg.sampling_rate))\n if id2-id1 <= 0:\n data = np.float32([0.])\n else:\n data = st.select(station=stations[s])[c].data[id1:id2]\n if np.var(data) != 0.:\n SNR_c[s,c] = np.power(data, 2).max()/np.var(data)\n else:\n pass\n SNR[s] = np.mean(SNR_c[s,c])\n SNR_dic.update({stations[s]:SNR[s]})\n SNR_sorted = sorted(SNR_dic.items(), key=operator.itemgetter(1))\n SNR_sorted.reverse()\n return SNR_sorted",
"def SNR_labels(y_woutn, snr):\n # np.random.seed(seed_noise)\n std_signal = np.std(y_woutn)\n std_noise = np.sqrt((std_signal**2)/snr)\n noiseVec = np.random.normal(0.0, std_noise, len(y_woutn))\n return torch.tensor(y_woutn + noiseVec, dtype = torch.float)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
__init__(self) > digital_scrambler_bb_sptr __init__(self, p) > digital_scrambler_bb_sptr | def __init__(self, *args):
this = _digital_swig.new_digital_scrambler_bb_sptr(*args)
try: self.this.append(this)
except: self.this = this | [
"def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def scrambler_bb(*args, **kwargs):\n return _digital_swig.scrambler_bb(*args, **kwargs)",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def descrambler_bb(*args, **kwargs):\n return _digital_swig.descrambler_bb(*args, **kwargs)",
"def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def additive_scrambler_bb(*args, **kwargs):\n return _digital_swig.additive_scrambler_bb(*args, **kwargs)",
"def __init__(self, a, b):\n self.a = make_generator(a)\n self.b = make_generator(b)",
"def __init__(self, algorithm: GeneratorAlgorithm) -> None:\n self.algorithm = algorithm",
"def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__( self, public_key, secret_multiplier ):\n\n self.public_key = public_key\n self.secret_multiplier = secret_multiplier",
"def __init__(self,size,randbytes=None,hash=sha1,mgf=MGF1_SHA1,saltlen=8):\n if randbytes is None:\n randbytes = load_urandom()\n super(PSSPadder,self).__init__(size,randbytes,hash,mgf,saltlen)",
"def __init__(self):\n this = _coin.new_SoBlinker()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self):\n self.codeword = Codeword.generate_random_codeword()\n self.transmission = \"\"",
"def __init__(self, *args):\n this = _coin.new_SbRotation(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, machine, sample_size=16):\n self.hilbert = machine.hilbert\n self.machine_pow = 2.0\n super().__init__(machine, sample_size)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
scrambler_bb(int mask, int seed, int len) > digital_scrambler_bb_sptr Scramble an input stream using an LFSR. This block works on the LSB only of the input data stream, i.e., on an "unpacked binary" stream, and produces the same format on its output. | def scrambler_bb(*args, **kwargs):
return _digital_swig.scrambler_bb(*args, **kwargs) | [
"def descrambler_bb(*args, **kwargs):\n return _digital_swig.descrambler_bb(*args, **kwargs)",
"def bpsk(input_bits, noise):\n modulator = Modulator()\n demodulator = Demodulator()\n channel = Channel()\n signal = modulator.make_bpsk_mod(input_bits)\n\n signal = channel.send_signal(signal, noise)\n\n result_bits = demodulator.make_bpsk_demod(signal, channel)\n return result_bits",
"def additive_scrambler_bb(*args, **kwargs):\n return _digital_swig.additive_scrambler_bb(*args, **kwargs)",
"def get_ULRB_scramble():\n return _MEGA_SCRAMBLER.call(\"megaScrambler.getSkewbULRBScramble\")",
"def blockmix_salsa8(BY, Yi, r):\n\n start = (2 * r - 1) * 16\n X = BY[start:start+16] # BlockMix - 1\n tmp = [0]*16\n\n for i in range(2 * r): # BlockMix - 2\n #blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner)\n salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer)\n #array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4\n\n for i in range(r): # BlockMix - 6\n BY[i * 16:(i * 16)+(16)] = BY[Yi + (i * 2) * 16:(Yi + (i * 2) * 16)+(16)]\n BY[(i + r) * 16:((i + r) * 16)+(16)] = BY[Yi + (i*2 + 1) * 16:(Yi + (i*2 + 1) * 16)+(16)]",
"def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def apply_rubberband(infile, time_stretching_ratio=1.0, pitch_shifting_semitones=1):\n fs1, x = monoWavRead(filename=infile)\n\n tmp_file_1 = tmp_path('x')\n tmp_file_2 = tmp_path('y')\n if not os.path.exists('tmp'):\n os.makedirs('tmp')\n \n write(filename = tmp_file_1, rate = fs1, data = x)\n cmd = \"rubberband -c 1 -t {0} -p {1} {2} {3}\".format(\n time_stretching_ratio,\n pitch_shifting_semitones,\n tmp_file_1,\n tmp_file_2)\n #print(cmd)\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n out, err = p.communicate()\n if p.returncode != 0:print (\"ERROR!\")\n\n fs2, y = monoWavRead(filename=tmp_file_2)\n\n #Change the output file name to suit your requirements here\n outfile_name = os.path.basename(infile).split(\".\")[0] + (\"_timestr%s_pitchshift%s.wav\" % (str(time_stretching_ratio),str(pitch_shifting_semitones)))\n outfile = os.path.join(outfile_path, outfile_name)\n write(filename = outfile, rate = fs1, data = y)\n \n if (FILE_DELETION):\n extractFeaturesAndDelete(outfile)",
"def maskDead(smr, verbose=False):\n if verbose:\n print('(*) Perform masking of the dead/bad pixel')\n smr.errorType = 'F'\n #\n # mask blinded pixels\n #\n smr.blinded = np.empty((smr.numPixels,), dtype=bool)\n smr.blinded[:] = False\n\n id_list = np.array(list(range(10)) + list(range(1024-10, 1024)))\n smr.blinded[0+id_list] = True # channel 1\n smr.blinded[1024+id_list] = True # channel 2\n smr.blinded[2048+id_list] = True # channel 3\n smr.blinded[3072+id_list] = True # channel 4\n smr.blinded[4096+id_list] = True # channel 5\n smr.blinded[5120+id_list] = True # channel 6\n smr.blinded[6144+id_list] = True # channel 7\n smr.blinded[7168+id_list] = True # channel 8\n #\n # mask dead pixels\n #\n i_masked = smr.spectra.mask.sum()\n smr.spectra = ma.masked_equal(smr.spectra, 0, copy=False)\n if verbose:\n masked = smr.spectra.mask.sum()\n print('* Info: masked %6.1f pixels/spectrum with zero signal'\n % ((masked - i_masked) / float(smr.numSpectra)))\n i_masked = masked\n\n smr.spectra = ma.masked_where((smr.spectra / smr.coaddf) >= 65535.,\n smr.spectra, copy=False)\n if verbose:\n masked = smr.spectra.mask.sum()\n print('* Info: masked %6.1f pixels/spectrum with saturated signal'\n % ((masked - i_masked) / float(smr.numSpectra)))\n i_masked = masked",
"def hrsbias(rawpath, outpath, link=False, mem_limit=1e9, sdb=None, clobber=True):\n if not os.path.isdir(rawpath): return \n\n image_list = ImageFileCollection(rawpath)\n if len(image_list.files)==0: return\n\n #make output directory\n if not os.path.isdir(outpath): os.mkdir(outpath)\n \n \n obsdate=get_obsdate(image_list.summary['file'][0])\n \n\n #process the red bias frames\n matches = (image_list.summary['obstype'] == 'Bias') * (image_list.summary['detnam'] == 'HRDET')\n rbias_list = []\n for fname in image_list.summary['file'][matches]:\n ccd = red_process(rawpath+fname)\n rbias_list.append(ccd)\n if sdb is not None: dq_ccd_insert(rawpath + fname, sdb)\n\n if rbias_list:\n if os.path.isfile(\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate)) and clobber: \n os.remove(\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate))\n rbias = ccdproc.combine(rbias_list, method='median', output_file=\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate), mem_limit=mem_limit)\n del rbias_list\n\n #process the red bias frames\n matches = (image_list.summary['obstype'] == 'Bias') * (image_list.summary['detnam'] == 'HBDET')\n hbias_list = []\n for fname in image_list.summary['file'][matches]:\n ccd = blue_process(rawpath+fname)\n hbias_list.append(ccd)\n if sdb is not None: dq_ccd_insert(rawpath + fname, sdb)\n\n if hbias_list:\n if os.path.isfile(\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate)) and clobber: \n os.remove(\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate))\n hbias = ccdproc.combine(hbias_list, method='median', output_file=\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate), mem_limit=mem_limit)\n del hbias_list\n\n\n #provide the link to the bias frame\n if link:\n ldir = '/salt/HRS_Cals/CAL_BIAS/{0}/{1}/'.format(obsdate[0:4], obsdate[4:8])\n if not os.path.isdir(ldir): os.mkdir(ldir)\n ldir = '/salt/HRS_Cals/CAL_BIAS/{0}/{1}/product'.format(obsdate[0:4], obsdate[4:8])\n if not os.path.isdir(ldir): os.mkdir(ldir)\n \n infile=\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate)\n link='/salt/HRS_Cals/CAL_BIAS/{0}/{1}/product/RBIAS_{2}.fits'.format(obsdate[0:4], obsdate[4:8], obsdate)\n if os.path.islink(link) and clobber: os.remove(link)\n os.symlink(infile, link)\n infile=\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate)\n link='/salt/HRS_Cals/CAL_BIAS/{0}/{1}/product/HBIAS_{2}.fits'.format(obsdate[0:4], obsdate[4:8], obsdate)\n if os.path.islink(link) and clobber: os.remove(link)\n os.symlink(infile, link)",
"def convert_srl(hdl):\n\n so = Signal(bool(0))\n si = Signal(bool(0))\n clk = Signal(bool(0))\n\n srlq = srl(clk, si, so)\n\n srlq.convert(hdl=hdl, name='srl')",
"def _handle_sb_linemode_slc(self, buf):\n assert 0 == len(buf) % 3, ('SLC buffer must be byte triplets')\n self._slc_start()\n while len(buf):\n func = buf.popleft()\n flag = buf.popleft()\n value = buf.popleft()\n self._slc_process(func, SLC_definition(flag, value))\n self._slc_end()\n self.request_forwardmask()",
"def scramble(String, Seed):\r\n Success = type(String) == str and type(Seed) == int\r\n if Success:\r\n \"\"\"Initialize Local Variables...\"\"\"\r\n Size = len(String)\r\n State = Seed\r\n Sorted = True\r\n TokenSet = \"\"\r\n Delim = \",\"\r\n Output = \"\"\r\n\r\n for Char in range(Size):\r\n \"\"\"Generate a new Randomizer State...\"\"\"\r\n State = xrandGen(State)\r\n \"\"\"Assign new Value to a Character...\"\"\"\r\n Token = xrandVal(State,2) + String[Char]\r\n \"\"\"Add to the set...\"\"\"\r\n Flags = tokenFlagSet(0, \"SORTED_ORDER\")\r\n TokenSet = tokenAdd(TokenSet, Delim, Token, Flags)\r\n\r\n \"\"\"Build a new scramble set...\"\"\"\r\n for Char in range(Size):\r\n Token = tokenGet(TokenSet, Delim, Char + 1)\r\n \"\"\"Grab the 3rd byte for the symbol...\"\"\"\r\n \"\"\"The first 2 bytes represent the Sorted Value\"\"\"\r\n \"\"\"from the PRNG...\"\"\"\r\n Output += Token[2]\r\n return Output\r\n \r\n \"\"\"DefaultReturn...\"\"\"\r\n return \"\"",
"def detail_mask(clip: vs.VideoNode,\n sigma: float = 1.0, rxsigma: List[int] = [50, 200, 350],\n pf_sigma: Optional[float] = 1.0,\n rad: int = 3, brz: Tuple[int, int] = (2500, 4500),\n rg_mode: int = 17,\n ) -> vs.VideoNode:\n from kagefunc import kirsch\n\n bits, clip = _get_bits(clip)\n\n clip_y = get_y(clip)\n pf = core.bilateral.Gaussian(clip_y, sigma=pf_sigma) if pf_sigma else clip_y\n ret = core.retinex.MSRCP(pf, sigma=rxsigma, upper_thr=0.005)\n\n blur_ret = core.bilateral.Gaussian(ret, sigma=sigma)\n blur_ret_diff = core.std.Expr([blur_ret, ret], \"x y -\")\n blur_ret_dfl = core.std.Deflate(blur_ret_diff)\n blur_ret_ifl = iterate(blur_ret_dfl, core.std.Inflate, 4)\n blur_ret_brz = core.std.Binarize(blur_ret_ifl, brz[0])\n blur_ret_brz = core.morpho.Close(blur_ret_brz, size=8)\n\n kirsch_mask = kirsch(clip_y).std.Binarize(brz[1])\n kirsch_ifl = kirsch_mask.std.Deflate().std.Inflate()\n kirsch_brz = core.std.Binarize(kirsch_ifl, brz[1])\n kirsch_brz = core.morpho.Close(kirsch_brz, size=4)\n\n merged = core.std.Expr([blur_ret_brz, kirsch_brz], \"x y +\")\n rm_grain = core.rgvs.RemoveGrain(merged, rg_mode)\n return rm_grain if bits == 16 else depth(rm_grain, bits)",
"def bsr(blast_object:Blastn, max_bits_dict:dict):\n\n for hsp in blast_object.hsp_objects:\n hsp.bsr = hsp.bits / max_bits_dict[hsp.name]\n\n if hsp.bsr < MIN_BSR:\n blast_object.remove_hsp_object_all(hsp)",
"def run_sbm(ctx, input_dir, output_dir, mask_file, zscore_plot=2):\n import os\n import io\n import os\n import subprocess\n\n from jinja2 import Template\n from neuro_pypes.ica import plot_ica_results\n\n input_glob = os.path.join(input_dir, '*.nii')\n\n tmp_file = 'sbm_batch_template.m'\n tmp_str = Template(io.open(tmp_file).read())\n tmp_str = tmp_str.render(input_glob=input_glob,\n output_dir=output_dir,\n out_prefix='sbm_',\n mask_file=mask_file)\n\n batch_file = os.path.abspath('sbm_filled_template.m')\n io.open(batch_file, 'w').write(tmp_str)\n\n cmd = 'matlab -nodesktop -nosplash -r \"icatb_batch_file_run(\\'{}\\'); exit();\"'.format(batch_file)\n print(cmd)\n subprocess.check_call(cmd, shell=True)\n\n os.remove(batch_file)\n\n bg_img = os.path.expanduser(SPM_CANONICAL_BRAIN_2MM)\n return plot_ica_results(output_dir, application='sbm', mask_file=mask_file,\n zscore=zscore_plot, bg_img=bg_img)",
"def test_scl_bcc_with_correct_checksum():\n assert scl.calc_bcc(b'\\x060 91 56 24859 169 11\\x03') == b'\\x12'",
"def skull_strip(input_data, output_filename='', output_mask_filename='', method=\"bet\", command=\"fsl5.0-bet2\", temp_dir='./', extra_parameters={}):\n\n skull_strip_methods = ['bet']\n if method not in skull_strip_methods:\n print('Input \\\"method\\\" parameter is not available. Available methods: ', skull_strip_methods)\n return\n\n if method == 'bet':\n\n # A good reason to have a Class for qtim methods is to cut through all of this extra code.\n\n temp_input, temp_output, temp_mask_output = False, False, False\n\n if not isinstance(input_data, str):\n input_filename = os.path.join(temp_dir, 'temp.nii.gz')\n save_numpy_2_nifti(input_data, input_filename)\n temp_input = True\n else:\n input_filename = input_data\n\n if output_filename == '':\n temp_output = True\n output_filename = os.path.join(temp_dir, 'temp_out.nii.gz')\n\n if output_mask_filename == '':\n temp_mask_output = True\n output_mask_filename = os.path.join(temp_dir, 'temp_mask_out.nii.gz')\n\n # if extra_parameters['fsl_threshold'] is None:\n extra_parameters['fsl_threshold'] = .5\n\n print(' '.join([command, input_filename, output_filename, '-f', str(extra_parameters['fsl_threshold']), '-g', '0', '-m']))\n subprocess.call([command, input_filename, output_filename, '-f', str(extra_parameters['fsl_threshold']), '-g', '0', '-m'])\n\n if output_mask_filename != '':\n move(output_filename + '_mask.nii.gz', output_mask_filename)\n\n if temp_input:\n os.remove(input_filename)\n pass\n\n if temp_output or temp_mask_output:\n output, output_mask = convert_input_2_numpy(output_filename), convert_input_2_numpy(output_mask_filename)\n os.remove(output_filename)\n os.remove(output_mask_filename)\n return output, output_mask",
"def LRST_RX_B(self, value):\n if value not in [0, 1]:\n raise ValueError(\"Value must be [0,1]\")\n self._writeReg('CHIPCFG', 'LRST_RX_B', value)",
"def decode_strand(read_flag, mask):\n\n strand_flag = (read_flag & mask == 0)\n if strand_flag:\n return \"+\"\n else:\n return \"-\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
__init__(self) > digital_simple_framer_sptr __init__(self, p) > digital_simple_framer_sptr | def __init__(self, *args):
this = _digital_swig.new_digital_simple_framer_sptr(*args)
try: self.this.append(this)
except: self.this = this | [
"def __init__(self, *args):\n this = _digital_swig.new_digital_framer_sink_1_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, src):\n self.src = src",
"def __init__(self):\n this = _coin.new_SoByteStream()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, name=None):\n self._mng = pn_messenger(name)",
"def __init__(self, *args):\n _ida_pro.channel_redir_t_swiginit(self, _ida_pro.new_channel_redir_t(*args))",
"def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Standard clamping of a value into a fixed range (in this case 4.0 to 4.0) | def clip(val):
return max(min(val, 4.0), -4.0) | [
"def clamp(x='0.0', min='0.0', max='1.0'):\n\n pass",
"def clamp(inclusive_lower_bound: int,\n inclusive_upper_bound: int,\n value: int) -> int:\n if value <= inclusive_lower_bound:\n return inclusive_lower_bound\n elif value >= inclusive_upper_bound:\n return inclusive_upper_bound\n else:\n return value",
"def clamp( v, v_min, v_max ):\n if v < v_min: v = v_min\n if v > v_max: v = v_max\n return v",
"def clamp(num, min_val, max_val):\n return max(min_val, min(num, max_val))",
"def clamp(x, a, b):\n return min(max(x,a),b)",
"def clamp(value, parameter):\n value = parameter.min if value < parameter.min else value\n value = parameter.max if value > parameter.max else value\n return value",
"def clamp(x, lower, upper) -> torch.Tensor:\n x = torch.min(torch.max(x, lower), upper)\n x = torch.where(lower < upper, x, (lower + upper) / 2)\n return x",
"def clamp01(val):\n return clamp(val, 0.0, 1.0)",
"def _clamp(x):\n if x < -50:\n return -50\n if x > 50:\n return 50\n return x",
"def clamp(val, minimum, maximum):\n if minimum > maximum:\n tmp = minimum\n minimum = maximum\n maximum = tmp\n\n if minimum > val:\n return minimum\n elif maximum < val:\n return maximum\n else:\n return val",
"def clip_scalar(val, vmin, vmax):\n return vmin if val < vmin else vmax if val > vmax else val",
"def clip(min, val, max):\n return min if val < min else max if val > max else val",
"def maybe_clamp(x, x_range, ignored_if_non_positive):\n x_min, x_max = x_range\n if x_min is not None and x_max is not None and x_min > x_max:\n raise ValueError('Invalid range: %s.' % str(x_range))\n if (x_min is not None) and (not ignored_if_non_positive or x_min > 0.0):\n x = tf.math.maximum(x_min, x)\n if (x_max is not None) and (not ignored_if_non_positive or x_max > 0.0):\n x = tf.math.minimum(x_max, x)\n return x",
"def clip(value, min=None, max=None):\n if min is not None and value < min:\n value = min\n if max is not None and value > max:\n value = max\n return value",
"def clamp(input, min_value=None, max_value=None) -> torch.Tensor:\n return leaky_clamp(\n input, min_value=min_value, max_value=max_value, clamped_slope=0.0\n )",
"def clamp(val,low=None,high=None):\n if (low is not None) and (val < low):\n return low\n if (high is not None) and (val > high):\n return high\n return val",
"def clip(val, minval, maxval):\n return max(min(maxval, val), minval)",
"def _fit_to_range(self, val: float) -> float:\n return self.lo + (val * self.range)",
"def clamp(min_val, max_val):\n def decorator(func):\n @wraps(func)\n def wrapped(*arg, **kwargs):\n return sorted((min_val, func(*arg, **kwargs), max_val))[1]\n return wrapped\n return decorator"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns 22 confusion matrix assuing binary data values as per [ [true_negatives, false_positives], [false_negatives, true_positives] ] | def confusion_matrix(actual, predictions):
if predictions.shape[0] != actual.shape[0]:
raise ValueError("predictions and actual must be the same length")
true_negatives = 0
false_negatives = 0
false_positives = 0
true_positives = 0
for actual_value, predicted_value in zip(actual,predictions):
if int(actual_value) == 0 and int(predicted_value) == 0:
true_negatives += 1
elif int(actual_value) == 1 and int(predicted_value) == 0:
false_negatives += 1
elif int(actual_value) == 0 and int(predicted_value) == 1:
false_positives += 1
else:
true_positives += 1
confusion_matrix = [[true_negatives, false_positives],[false_negatives, true_positives]]
return confusion_matrix | [
"def buildConfusionMatrix(predictedLabels, correctLabels):\n confusionMatrix = {}\n confusionMatrix['positive', 'positive'] = 0\n confusionMatrix['positive', 'neutral'] = 0\n confusionMatrix['positive', 'negative'] = 0\n confusionMatrix['neutral', 'positive'] = 0\n confusionMatrix['neutral', 'neutral'] = 0\n confusionMatrix['neutral', 'negative'] = 0\n confusionMatrix['negative', 'positive'] = 0\n confusionMatrix['negative', 'neutral'] = 0\n confusionMatrix['negative', 'negative'] = 0\n ##### START OF YOUR CODE HERE ######\n\n\n ##### END OF YOUR CODE HERE ######\n return confusionMatrix",
"def confusion_matrix(test_labels, pred_labels):\n\t# store the unique classes in this problem\n\tclasses = np.unique(test_labels)\n\t# initialize empty confusion matrix\n\tcon_matrix = np.zeros((np.size(classes), np.size(classes)), dtype=int)\n\t# for each test image, compare the predicted and true labels\n\tfor n in range(np.size(test_labels)):\n\t\t# if the labels match up\n\t\tif test_labels[n] == pred_labels[n]:\n\t\t\t# find the index of the class\n\t\t\ti = np.where(classes==test_labels[n])\n\t\t\t# and increment the count of the corresponding diagonal entry in the confusion matrix\n\t\t\tcon_matrix[i, i] += 1 \n\t\t# if the labels don't match up\n\t\telse:\n\t\t\t# find the index of the actual and predicted class\n\t\t\ti_actual = np.where(classes==test_labels[n])\n\t\t\ti_pred = np.where(classes==pred_labels[n])\n\t\t\t# and increment the count of the corresponding off-diagonal entry\n\t\t\tcon_matrix[i_actual, i_pred] += 1\n\treturn con_matrix",
"def test_confusion_matrix_from_data(self):\n\n #binary test\n exp = [1,1,1,0,0,0,1,1,1,0,1,0,1]\n obs = [1,1,0,1,0,0,1,1,1,1,1,1,0]\n exp_tp = 6.0\n exp_fp = 3.0\n exp_fn = 2.0\n exp_tn = 2.0\n \n tp,fp,fn,tn = confusion_matrix_from_data(obs,exp)\n self.assertEqual([tp,fp,fn,tn],\\\n [exp_tp,exp_fp,exp_fn,exp_tn])\n\n #quantitative test\n #results should be indentical to binary test\n #since only presence/absence is considered\n exp = [1,1,1,0,0,0,1,1,1,0,1,0,1]\n obs = [13.7,6.5,0,1,0,0,2.3,1,1.0,1.3,1.5,1,0]\n exp_tp = 6.0\n exp_fp = 3.0\n exp_fn = 2.0\n exp_tn = 2.0\n \n tp,fp,fn,tn = confusion_matrix_from_data(obs,exp)\n self.assertEqual([tp,fp,fn,tn],\\\n [exp_tp,exp_fp,exp_fn,exp_tn])",
"def confusion_matrix(self):\n if self.prediction is None:\n self.predict()\n return confusion_matrix(self.y_test, self.prediction)",
"def test(self, test_data):\n true_labels = test_data[\"label\"]\n pred_labels = pd.DataFrame()\n pred_labels[\"label\"] = self.get_pred(test_data.drop(\"label\", axis=1))[\"label\"]\n self.cm = confusion_matrix(true_labels, pred_labels)\n return self.cm",
"def confusion(prediction, truth):\r\n confusion_vector = prediction / truth\r\n # Element-wise division of the 2 tensors returns a new tensor which holds a\r\n # unique value for each case:\r\n # 1 where prediction and truth are 1 (True Positive)\r\n # inf where prediction is 1 and truth is 0 (False Positive)\r\n # nan where prediction and truth are 0 (True Negative)\r\n # 0 where prediction is 0 and truth is 1 (False Negative)\r\n\r\n true_positives = torch.sum(confusion_vector == 1).item()\r\n false_positives = torch.sum(confusion_vector == float('inf')).item()\r\n true_negatives = torch.sum(torch.isnan(confusion_vector)).item()\r\n false_negatives = torch.sum(confusion_vector == 0).item()\r\n\r\n return true_positives, false_positives, true_negatives, false_negatives",
"def confusion_matrix(self):\n for exp in self._array_raw_exps:\n self._all_confusion_matrix.append(self.get_confusion_matrix(self._num_classes, exp))\n # print(self._all_confusion_matrix[0])\n # os.system(\"pause\")",
"def confusion_matrix(ans, predicts):\n confusion_answer = pd.Series(ans, name='Answer')\n confusion_predicted = pd.Series(predicts, name='Predicted')\n return pd.crosstab(confusion_answer, confusion_predicted)",
"def get_cf_mat(df, match_col='Match', pred_col='p', cutoff=0.5):\n df['p_cutoff'] = np.where(\n df[pred_col] >= cutoff,\n 1,\n 0)\n cf = confusion_matrix(df[match_col], df['p_cutoff'])\n\n accuracy = np.trace(cf) / float(np.sum(cf))\n # Metrics for Binary Confusion Matrices\n precision = cf[1, 1] / sum(cf[:, 1])\n recall = cf[1, 1] / sum(cf[1, :])\n f1_score = 2 * precision * recall / (precision + recall)\n stats_text = \"\\n\\nAccuracy={:0.3f}\\nPrecision={:0.3f}\\nRecall={:0.3f}\\nF1 Score={:0.3f}\".format(\n accuracy, precision, recall, f1_score)\n\n group_names = ['True Neg', 'False Pos', 'False Neg', 'True Pos']\n group_counts = [\"{0:0.0f}\".format(value) for value in\n cf.flatten()]\n group_percentages = [\"{0:.2%}\".format(value) for value in\n cf.flatten() / np.sum(cf)]\n labels = []\n for i in zip(group_names, group_counts, group_percentages):\n labels.append(\"%s\" % i[0] + \"\\n%s\" % i[1] + \"\\n%s\" % i[2])\n labels = np.asarray(labels).reshape(2, 2)\n\n plt.figure(figsize=(8, 5))\n sns.heatmap(cf,\n annot=labels,\n fmt='',\n cmap='Blues',\n annot_kws={\"size\": 14})\n plt.xlabel(stats_text, fontsize=14)\n plt.title(\"Classifer Cutoff: {:.0%}\".format(cutoff), fontsize=20)",
"def confusion_matrix(self, name, labels_test, preds):\n print('{} Confusion Matrix ({} samples): '.format(name, len(labels_test)))\n print(confusion_matrix(labels_test, preds))",
"def confusion_matrix(model_ft, n_classe, dataloaders, device=default_device):\r\n\r\n # initialize the confusion matrix\r\n conf_matrix = torch.zeros(n_classe, n_classe)\r\n\r\n # enable the computation without training\r\n with torch.no_grad():\r\n for i, (inputs, label) in enumerate(dataloaders['val']):\r\n inputs = inputs.to(device)\r\n label = label.to(device)\r\n outputs = model_ft(inputs)\r\n _, preds = torch.max(outputs, 1)\r\n for t, p in zip(label.view(-1), preds.view(-1)):\r\n conf_matrix[t.long(), p.long()] += 1\r\n\r\n # percentage properly classified, from a class\r\n accuracy_from_class = conf_matrix.diag() / conf_matrix.sum(dim=1)\r\n\r\n # percentage classified in a class which are properly classified\r\n # TODO: change the 0 in the sum to sum on line instead of column\r\n accuracy_to_class = conf_matrix.diag() / conf_matrix.sum(dim=0)\r\n\r\n return np.array(conf_matrix), accuracy_from_class, accuracy_to_class",
"def calc_confusion_matrix(test_set, model):\n confusion_matrix = np.zeros((4, 4))\n predictions = model.predict(test_set)\n ground_truths = test_set[:, -1].astype(int)\n for i, j in zip(predictions, ground_truths):\n confusion_matrix[i - 1, j - 1] = confusion_matrix[i - 1, j - 1] + 1\n return confusion_matrix",
"def _get_conf_mat(self):\n conf_clean, conf_cat = read_confounds(self.confounds)\n conf_mat = pd.get_dummies(self.data[conf_clean], columns=conf_cat, \n drop_first=True)\n return conf_mat.to_numpy()",
"def generate_confusion_matrix(y_test, predictions, start=10, stop=90, steps=5):\n matrices = []\n for i in range(start, stop, steps):\n matrix = confusion_matrix(y_test, predictions[:, 1] > i/100)\n matrices.append(matrix)\n print(i, matrix)\n return steps, matrices",
"def tf_confusion_matrix(predictions, labels, classes):\r\n print(\"pred = %s, type = %s\\nlabels = %s, type = %s\\nclasses = %s, type = %s\" % \r\n (predictions[0:5], type(predictions), labels[0:5], type(labels), classes[0:5], type(classes)))\r\n\r\n y_true = []\r\n y_pred = []\r\n\r\n for p in predictions:\r\n pred = p[0]\r\n y_pred.append(classes[pred])\r\n\r\n for l in labels:\r\n label = l[0]\r\n y_true.append(classes[label])\r\n\r\n cm = metrics.confusion_matrix(y_true, y_pred, classes)\r\n\r\n return cm",
"def compute_confusion_matrix(y_true, y_pred, classes):\n y_pred = np.around(\n np.clip(\n y_pred,\n a_min=np.min(classes),\n a_max=np.max(classes)\n )\n )\n cm = confusion_matrix(y_true, y_pred, labels=classes)\n num_labels_vectorized = cm.sum(axis=1)[:, np.newaxis]\n cm = np.divide(cm.astype('float'),\n num_labels_vectorized)\n return cm",
"def compute_confuse_matrix(fname, classes):\n print('im in')\n y_true = []\n with codecs.open(fname, 'r', 'utf8') as f:\n for line in f:\n line = line.strip().split('\\t')[-1]\n y_true.append(line)\n\n checkpoint_dir = \"output/self_attention/multi_attention_0802/\"\n pred_path = \"tmp/eval_y_self_attention.txt\"\n if os.path.exists(checkpoint_dir + 'config.pkl'):\n config = pickle.load(open(checkpoint_dir+'config.pkl', 'rb'))\n else:\n config = Config()\n\n\n config.mode = 'inference'\n \n word2id, id2word = read_vocab(config.word_vocab_file)\n tag2id, id2tag = read_vocab(config.tag_vocab_file)\n\n with tf.Session(config=get_config_proto(log_device_placement=False)) as sess:\n model = get_model(config.model, config, sess)\n model.build()\n model.restore_model(checkpoint_dir)\n y_pred = infer_file(model, word2id, id2tag, fname, pred_path)\n\n cmatrix = confusion_matrix(y_true, y_pred, classes)\n print(cmatrix)\n correct = [x == y for x,y in list(zip(y_true, y_pred))]\n print(correct.count(True) / len(correct))\n return cmatrix",
"def confusion_matrix(self, test_x, test_y):\n\n # Create an empty dictionary of dictionary and initialize it to 0\n d = defaultdict(dict)\n for xx in range(10):\n for yy in range(10):\n d[xx][yy] = 0\n\n data_index = 0\n for xx, yy in zip(test_x, test_y):\n # classify the test example\n predicted = self.classify(xx)\n # populate the dictionary\n d[yy][predicted] += 1\n data_index += 1\n if data_index % 100 == 0:\n print(\"%i/%i for confusion matrix\" % (data_index, len(test_x)))\n return d",
"def confusion_matrix(m, pairs):\n for p in pairs:\n truth = p[1]\n pred = p[2]\n if truth not in m:\n m[truth] = {}\n if pred not in m[truth]:\n m[truth][pred] = 0\n m[truth][pred] = m[truth][pred] + 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
computes the accuracy by referencing the confusion matrix function from above, must be defined together! | def accuracy(actual, predictions):
if predictions.shape[0] != actual.shape[0]:
raise ValueError("predictions and actual must be the same length!")
CM = confusion_matrix(actual, predictions)
acc = (CM[1][1]+CM[0][0])/(CM[1][1]+CM[0][0]+CM[1][0]+CM[0][1])
return acc | [
"def accuracy(confusion_matrix):\n\n total = 0\n correct = 0\n for ii in confusion_matrix:\n total += sum(confusion_matrix[ii].values())\n correct += confusion_matrix[ii].get(ii, 0)\n\n if total:\n return float(correct) / float(total)\n else:\n return 0.0",
"def accuracy(self,X_test,Y_test): #returns the accuracy of the model for a given testing data set(X_test,Y_test), both should be provided\n predictions=self.predict(X_test)\n temp=np.abs(Y_test-predictions)\n temp=float(np.squeeze(np.sum(temp)))\n m=X_test.shape[1]\n accuracy=100-((temp/m)*100)\n return accuracy",
"def accuracy(y_true, y_pred):\n return torch.mean(y_pred.eq(y_true).float())",
"def accuracy(labels, labels_true):\r\n # YOUR CODE HERE\r\n\r\n total_label = len(labels)\r\n correct_label = 0\r\n\r\n for i in range(total_label):\r\n if labels[i] == labels_true[i]:\r\n correct_label += 1\r\n\r\n return correct_label/total_label\r\n pass",
"def test_calculate_accuracy_stats_from_confusion_matrix(self):\n #This really only works well for qualitative data, since\n # most values will be somewhat off with quantitative data.\n #The method is generic, however, so either can be used.\n \n #test data from Wikipedia: http://en.wikipedia.org/wiki/Receiver_operating_characteristic\n \n #Example A\n tp,fp,fn,tn = 63,28,37,72\n result = calculate_accuracy_stats_from_confusion_matrix(tp,fp,fn,tn)\n self.assertFloatEqual(result['sensitivity'],0.63)\n self.assertFloatEqual(result['false_positive_rate'],0.28)\n self.assertFloatEqual(result['specificity'],0.72)\n self.assertFloatEqual(result['accuracy'],0.675)\n \n #Example B\n tp,fp,fn,tn = 77,77,23,23\n result = calculate_accuracy_stats_from_confusion_matrix(tp,fp,fn,tn)\n self.assertFloatEqual(result['sensitivity'],0.77)\n self.assertFloatEqual(result['false_positive_rate'],0.77)\n self.assertFloatEqual(result['specificity'],0.23)\n self.assertFloatEqual(result['accuracy'],0.50)\n\n #Example C\n tp,fp,fn,tn = 24,88,76,12\n result = calculate_accuracy_stats_from_confusion_matrix(tp,fp,fn,tn)\n self.assertFloatEqual(result['sensitivity'],0.24)\n self.assertFloatEqual(result['false_positive_rate'],0.88)\n self.assertFloatEqual(result['specificity'],0.12)\n self.assertFloatEqual(result['accuracy'],0.18)",
"def compute_accuracy(predictions, labels):\n predicted_labels = torch.argmax(predictions, dim=1)\n n_correct = torch.sum(predicted_labels == labels).item()\n batch_size = torch.numel(labels)\n acc = float(n_correct) / float(batch_size)\n return acc * 100",
"def accuracy(predictions, labels):\n return 100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]",
"def calculate_accuracy_score(self) -> float:\n return accuracy_score(self.labels, self.y_pred)",
"def _eval_classification_accuracy(self):\n batch_size = len(self._targets)\n\n pred = torch.cat(self._predictions, dim=1)\n target = torch.cat(self._targets)\n\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n results = {}\n for k in self._topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n results[f\"Top_{k} Acc\"] = correct_k.mul_(100.0 / batch_size).item()\n self._results[\"Accuracy\"] = results\n\n small_table = create_small_table(results)\n logger.info(\"Evaluation results for classification: \\n\" + small_table)\n\n if self._dump:\n dump_info_one_task = {\n \"task\": \"classification\",\n \"tables\": [small_table],\n }\n self._dump_infos.append(dump_info_one_task)",
"def accuracy(cft):\n # Sum the diagonal\n accuracy = (cft[tp] + cft[tn]) / float(np.sum(cft))\n return accuracy",
"def average_perceptron_accuracy(train_feature_matrix, val_feature_matrix, train_labels, val_labels, T):\r\n # Your code here\r\n theta, theta_0 = average_perceptron(train_feature_matrix, train_labels, T)\r\n\r\n train_predictions = classify(train_feature_matrix, theta, theta_0)\r\n val_predictions = classify(val_feature_matrix, theta, theta_0)\r\n\r\n train_accuracy = accuracy(train_predictions, train_labels)\r\n validation_accuracy = accuracy(val_predictions, val_labels)\r\n\r\n return (train_accuracy, validation_accuracy)",
"def calculate_accuracy(model, dataSet, device):\n \"\"\"Calculates the accuracy of the model towards the data\"\"\"\n model.eval() # put in evaluation mode\n total_correct = 0\n total_images = 0\n confusion_matrix = np.zeros([10, 10], int)\n # Doesn't calculate back prog\n with torch.no_grad():\n for data in dataSet:\n # Get data\n images, labels = data\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images) # Run through model\n _, predicted = torch.max(outputs.data, 1) # pick label\n total_images += labels.size(0)\n total_correct += (predicted == labels).sum().item()\n for i, l in enumerate(labels):\n confusion_matrix[l.item(), predicted[i].item()] += 1 # Correct prediction should be in the diagonal\n\n model_accuracy = total_correct / total_images * 100\n return model_accuracy, confusion_matrix",
"def perceptron_accuracy(train_feature_matrix, val_feature_matrix, train_labels, val_labels, T):\r\n # Your code here\r\n theta, theta_0 = perceptron(train_feature_matrix, train_labels, T)\r\n\r\n train_predictions = classify(train_feature_matrix, theta, theta_0)\r\n val_predictions = classify(val_feature_matrix, theta, theta_0)\r\n\r\n train_accuracy = accuracy(train_predictions, train_labels)\r\n validation_accuracy = accuracy(val_predictions, val_labels)\r\n\r\n return (train_accuracy, validation_accuracy)",
"def getAccuracy(self):\r\n \r\n \tif self.predictionError <= cons.epsilon_0:\r\n \t accuracy = 1.0\r\n \telse:\r\n \t accuracy = cons.alpha * ( (self.predictionError / cons.epsilon_0)**(-cons.nu) )\r\n\r\n \treturn accuracy",
"def accuracy(activations, fixations, gpu):\n\n ##Accuracy for one image\n\n #drop unnecessary first dimension of activations (there is only one channel)\n activations = activations.reshape(activations.size()[-2], activations.size()[-1])\n\n #how many fixations are there?\n num_fix = 0\n for i,j in fixations:\n if (i,j) == (-1000,-1000):\n break\n num_fix += 1\n #flatten\n activations_f = activations.view(-1)\n\n #find x largest values and their indices in flattened activation-tensor\n lar_val, lar_val_idx = torch.topk(activations_f, num_fix)\n\n idx_unfl = []\n for idx_fl in lar_val_idx:\n idx_unfl.append(map_idx(activations, idx_fl.item(), gpu))\n\n #see if they match with fixations indices\n hits = 0\n #does each fixation lead to one of the x biggest activation values?\n for fix in range(num_fix):\n for idx in idx_unfl:\n current = torch.all(torch.eq(idx,fixations[fix]))\n hits += current.item()\n\n #calcualte proportion of hits\n acc = hits / num_fix\n \n return acc, hits, num_fix",
"def compute_classification_accuracy(labels: List[int], predictions: List[int], num_classes: int = -1) -> float:\n assert len(labels) == len(predictions)\n\n correct = 0\n for a, b in zip(labels, predictions):\n if a == b:\n correct += 1\n\n return correct / len(labels)",
"def cal_classificationerror(y, y_pred):\n return 1-accuracy(y,y_pred)",
"def accuracy(data, labels, centroids):\n label_dict = update_assignment(data, labels, centroids)\n sum = 0\n for label_list in label_dict.values():\n sum += majority_count(label_list)\n accuracy = sum / len(labels)\n return accuracy",
"def compute_accuracy(predictions, labels):\n return labels[predictions.ravel() < 0.5].mean()\n # return np.mean(labels==(predictions.ravel() > 0.5))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns precision and recall using confusion matrix above as per wiki definition | def precision_and_recall(actual, predictions):
if predictions.shape[0] != actual.shape[0]:
raise ValueError("predictions and actual must be the same length")
CM = confusion_matrix(actual, predictions)
#[[TN, FP],[FN, TP]]
if CM[1][1] != 0:
precision = CM[1][1]/(CM[1][1]+CM[0][1])
recall = CM[1][1]/(CM[1][1]+CM[1][0])
else:
precision = 0
recall = 0
"""
precision = (conf_matrix[1, 1])/(conf_matrix[1, 1] + conf_matrix[0, 1])
recall = (conf_matrix[1, 1])/(conf_matrix[1, 1] + conf_matrix[1, 0])
"""
return precision, recall | [
"def calc_precision(confusion_matrix):\n predicted = confusion_matrix.sum(1)\n correct = confusion_matrix.diagonal()\n return correct / predicted",
"def getPrecision(label, confusionMatrix):\n ##### START OF YOUR CODE HERE ######\n return 0\n ##### END OF YOUR CODE HERE ######",
"def calc_recall(confusion_matrix):\n predicted = confusion_matrix.sum(0)\n correct = confusion_matrix.diagonal()\n return correct / predicted",
"def calculate_recall(self):\n test_classes = [f[0] for f in self.test_set]\n correct_counts = {c: 0 for c in test_classes}\n total_counts = {c: 0 for c in test_classes}\n\n for feature_dict in self.test_set:\n actual_class = feature_dict[0]\n predicted_class = self.predict(feature_dict[1])\n\n if actual_class == predicted_class:\n correct_counts[actual_class] += 1\n total_counts[actual_class] += 1\n else:\n total_counts[actual_class] += 1\n\n print(\"=== Recall Statistics ===\")\n for c in correct_counts:\n if not total_counts[c] == 0:\n self.recall[c] = (correct_counts[c] * 1.0) / (total_counts[c] * 1.0)\n print(\"%s class recall:\" % (c.upper()), self.recall[c])\n else:\n print(\"%s class recall:\" % (c.upper()), \"N/A\")",
"def classification_metrics(confusion_matrix, depth):\r\n\r\n # Calculate and print the classification metrics from the confusion matrix and the depth\r\n accuracy = confusion_matrix.diagonal().sum()/confusion_matrix.sum()\r\n precision = confusion_matrix.diagonal()/confusion_matrix.sum(axis=0)\r\n recall = confusion_matrix.diagonal()/confusion_matrix.sum(axis=1)\r\n\r\n f1_measure = (2*precision*recall)/(precision+recall)\r\n\r\n print(f\"Tree depth: {depth} \\n\\n\",\r\n f\"Confusion matrix:\\n{confusion_matrix}\\n\\n\",\r\n f\"Accuracy: {accuracy} \\n\",\r\n f\"Class accuracies: {confusion_matrix.diagonal()} \\n\",\r\n f\"Class precisions: {precision}\\n\",\r\n f\"Class recalls: {recall}\\n\" ,\r\n f\"Class F1_measures: {f1_measure}\")",
"def compute_precision(confusion_matrix):\n\n true_positives = torch.diagonal(confusion_matrix)\n\n false_positives = torch.sum(confusion_matrix, dim=0) - true_positives\n\n unweighted_precision = torch.true_divide(true_positives, true_positives + false_positives)\n\n # to get the weighted precision, we use the label count as weights\n label_counts = torch.sum(confusion_matrix, dim=1)\n num_samples = torch.sum(label_counts)\n\n weighted_precision = torch.true_divide(unweighted_precision * label_counts, num_samples)\n\n return torch.sum(weighted_precision).item()",
"def confusion_matrix(self, name, labels_test, preds):\n print('{} Confusion Matrix ({} samples): '.format(name, len(labels_test)))\n print(confusion_matrix(labels_test, preds))",
"def calculate_precision(self):\n test_classes = [f[0] for f in self.test_set]\n correct_counts = {c: 0 for c in test_classes}\n total_counts = {c: 0 for c in test_classes}\n\n for feature_dict in self.test_set:\n actual_class = feature_dict[0]\n predicted_class = self.predict(feature_dict[1])\n\n if actual_class == predicted_class:\n correct_counts[actual_class] += 1\n total_counts[actual_class] += 1\n else:\n total_counts[predicted_class] += 1\n\n print(\"=== Precision Statistics ===\")\n for c in correct_counts:\n try:\n if not total_counts[c] == 0:\n self.precision[c] = (correct_counts[c] * 1.0) / (total_counts[c] * 1.0)\n print(\"%s class precision:\" % (c.upper()), self.precision[c])\n else:\n print(\"%s class precision:\" % (c.upper()), \"N/A\")\n except KeyError:\n continue # predicted class may be not int test_classes",
"def print_confusion_matrix(batch_predictions, labels):\n pred = np.argmax(batch_predictions, 1)\n lab = np.argmax(labels, 1)\n matrix = np.zeros((NUM_CLASSES, NUM_CLASSES))\n for x in xrange(batch_predictions.shape[0]):\n matrix[pred[x], lab[x]] += 1\n #print(matrix)\n print('\\n'.join([''.join(['{:4d}'.format(int(item)) for item in row]) for row in matrix]))\n correct_predictions = np.equal(pred, lab)\n tmp_accuracy = np.mean(correct_predictions)\n print(\"accuracy: \" + str(tmp_accuracy))",
"def calc_precision_recall(img_results):\r\n true_pos = 0; false_pos = 0; false_neg = 0\r\n for res in img_results:\r\n true_pos += res['true_pos']\r\n false_pos += res['false_pos']\r\n false_neg += res['false_neg']\r\n\r\n try:\r\n precision = true_pos/(true_pos + false_pos)\r\n except ZeroDivisionError:\r\n precision = 0.0\r\n try:\r\n recall = true_pos/(true_pos + false_neg)\r\n except ZeroDivisionError:\r\n recall = 0.0\r\n\r\n return (precision, recall)",
"def precision_recall(groundtruth, predictions):\n # filter NA groundtruth and sort by prediction scores\n filtered_predictions = [(p, g) for p, g in izip(predictions, groundtruth) if g != 0]\n sorted_predictions = sorted(filtered_predictions, key=lambda x: x[0])\n\n tp = len([x for x,g in sorted_predictions if g > 0]) # all positives\n fp = len([x for x,g in sorted_predictions if g < 0]) # all negatives\n # initially all examples are classified as positives\n # then we sweep through the data and adapt tp & fp accordingly\n tn = 0\n fn = 0\n precision = np.zeros(len(sorted_predictions)+1)\n recall = np.zeros(len(sorted_predictions)+1)\n # handle the special case of the first threshold value\n # where everything is classified positive -> perfect recall\n precision[0] = tp / float(fp + tp)\n recall[0] = tp / float(tp + fn)\n for idx, pg in enumerate(sorted_predictions):\n _, g = pg\n if g < 0:\n tn += 1\n fp -= 1\n elif g > 0:\n tp -= 1\n fn += 1\n\n if tp <= 0:\n precision[idx+1] = 0.\n recall[idx+1] = 0.\n else:\n precision[idx+1] = tp / float(fp + tp)\n recall[idx+1] = tp / float(tp + fn)\n return precision, recall",
"def tf_confusion_matrix(predictions, labels, classes):\r\n print(\"pred = %s, type = %s\\nlabels = %s, type = %s\\nclasses = %s, type = %s\" % \r\n (predictions[0:5], type(predictions), labels[0:5], type(labels), classes[0:5], type(classes)))\r\n\r\n y_true = []\r\n y_pred = []\r\n\r\n for p in predictions:\r\n pred = p[0]\r\n y_pred.append(classes[pred])\r\n\r\n for l in labels:\r\n label = l[0]\r\n y_true.append(classes[label])\r\n\r\n cm = metrics.confusion_matrix(y_true, y_pred, classes)\r\n\r\n return cm",
"def calculate_metrics(true_test_labels, pred_test_labels):\n weighted_f1 = precision_recall_fscore_support(true_test_labels,\n pred_test_labels,\n beta=3,\n average='binary')\n\n precision, recall, fbeta_score, _ = weighted_f1\n\n return precision, recall, fbeta_score",
"def getRecall(label, confusionMatrix):\n ##### START OF YOUR CODE HERE ######\n return 0\n ##### END OF YOUR CODE HERE ######",
"def calcPrecRecallF(self):\n totalRelevant = 0.0\n for doc in self.relevant:\n if doc in self.retrieved:\n totalRelevant += 1\n\n self.precision = totalRelevant/float(len(self.retrieved)) \n self.recall = float(totalRelevant)/self.numberRelevant\n self.fmseasure = 2.0*(self.precision*self.recall)/(self.precision+self.recall)\n\n print(\"##############################################\") \n print(\"Precision:\")\n print(self.precision)\n print(\"Recall:\")\n print(self.recall)\n print(\"F-measure:\")\n print(self.fmseasure)\n print(\"##############################################\")",
"def physical_precision_recall(self):\n gt_reader = self.gt_reader\n b3_reader = self.b3_reader\n\n gt_val = gt_reader.values\n b3_val = b3_reader.values\n\n # use nonzero since indices won't match\n #TODO this might require some preliminary data treatment to make sure nonactivities are counted properly\n #TODO seems to work for 'A','B', ... activity labels as well as number activity labels\n tp = np.count_nonzero((b3_val != 0) & (gt_val != 0))\n tn = np.count_nonzero((b3_val == 0) & (gt_val == 0))\n fp = np.count_nonzero((b3_val != 0) & (gt_val == 0))\n fn = np.count_nonzero((b3_val == 0) & (gt_val != 0))\n\n if (tp+fp <= 0 or tp+fn <= 0):\n raise Exception(\"Cannot compute precision and recall: div. by zero\")\n\n precision = float(tp) / (tp + fp)\n recall = float(tp) / (tp + fn)\n\n #DEBUG\n #print np.array_equal(gt_activityCounts.values,b3_activityCounts.values)\n #print np.array_equal(gt_activityStarts.values,b3_activityStarts.values)\n #print \"precision is: \", precision\n #print \"recall is: \", recall\n\n return precision, recall\n\n\n #for col in reader.transpose()[1:].ix:\n # # Get nonempty values TODO empty values are null, or 0?\n # #TODO do something nicer here\n # checks = dict()\n # for i in col[1:].ix:\n # activityFrames = i[i.notnull()][1:].index #indices of the frames in which activities occur\n # if activityFrames != []:\n # minIdx = min(activityFrames)\n # minVal = i[min(activityFrames)] # this is the val. of the first activity found in this col\n # checks[minIdx] = minVal\n # for j in col[1:]:\n # if j[minIdx] == minVal: activities.append(j[0]) # append individual number for matches\n\n ##now check rows for a match\n #for k in checks.iterkeys():\n # selected_rows = reader.ix[k]\n # selected_rows[selected_rows == selected_rows[k]]",
"def f1_score(confusion_matrix):\n # get TP, TN, FT, and FN\n tp = np.diag(confusion_matrix)\n fp = np.sum(confusion_matrix, axis=0) - tp\n fn = np.sum(confusion_matrix, axis=1) - tp\n\n # get precision and recall\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n # get rid of nans if any\n precision[np.isnan(precision)] = 1\n recall[np.isnan(recall)] = 1\n # get f1 score for all classes\n f1_all_classes = np.array(2*(precision*recall)/(precision+recall))\n # get macrof1\n macro_f1 = f1_all_classes.mean()*100\n\n return macro_f1, f1_all_classes, precision, recall",
"def _evaluate(self):\n results = self.classifier.classify(self.inputs)\n self.confusion = [[0.0,0.0],[0.0,0.0]]\n for i in range(0,len(results)):\n #type(self.targets[i])\n #type(results[i])\n self.confusion[self.targets[i]][results[i]] += 1\n self.tpr = self.confusion[1][1] / sum(self.confusion[1])\n self.fpr = self.confusion[0][1] / sum(self.confusion[0])\n logging.debug(\"Confusion: %s, tpr %f, fpr %f\" % (str(self.confusion),self.tpr,self.fpr))",
"def precision_recall_fscore(y_test,y_pred):\r\n report = pd.DataFrame(columns=['Category', 'f_score', 'precision', 'recall'])\r\n\r\n for i,col in enumerate(y_test):\r\n precision, recall, f_score, support = precision_recall_fscore_support(y_test[col], y_pred[:,i], average='weighted')\r\n report.set_value(i, 'Category', col)\r\n report.set_value(i, 'f_score', f_score)\r\n report.set_value(i, 'precision', precision)\r\n report.set_value(i, 'recall', recall)\r\n print('simply the average results are:')\r\n print('Mean f_score:', report['f_score'].mean())\r\n print('Mean precision:', report['precision'].mean())\r\n print('Mean recall:', report['recall'].mean())\r\n return report"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the jitter in a set of actual zero crossings, given the ideal crossings and unit interval. | def calc_jitter(ui, nbits, pattern_len, ideal_xings, actual_xings):
jitter = []
t_jitter = []
i = 0
# Assemble the TIE track.
for actual_xing in actual_xings:
tie = actual_xing - ideal_xings[i]
# Check for multiple crossings and skip them.
if(tie < (-ui / 2.)):
continue
# Check for missed crossings and zero fill, as necessary.
#while(i < len(ideal_xings) and tie > (ui / 2.)):
while(i < len(ideal_xings) and tie > ui):
if(debug):
print "Just entered missed crossing detection with:"
print "i:", i
print "tie:", tie
print "ideal_xing:", ideal_xing[i]
print "actual_xing:", actual_xing
print
for j in range(2): # If we missed one crossing, then we missed two.
if(i >= len(ideal_xings)):
if(debug):
print "Oops! Ran out of 'ideal_xings' entries while correcting for missed crossings."
break
jitter.append(0.)
t_jitter.append(ideal_xings[i])
i += 1
tie = actual_xing - ideal_xings[i]
assert tie >= (-ui / 2.)
if(i < len(ideal_xings)):
jitter.append(tie)
t_jitter.append(ideal_xings[i])
i += 1
if(i >= len(ideal_xings)):
if(debug):
print "Oops! Ran out of 'ideal_xings' entries. (i = %d, len(ideal_xings) = %d, len(jitter) = %d, len(actual_xings) = %d)" \
% (i, len(ideal_xings), len(jitter), len(actual_xings))
print "\tLast ideal xing: %e; last actual xing: %e." % (ideal_xings[-1], actual_xings[-1])
print "\tLast edge just processed occured at time, %e." % t_jitter[-1]
break
assert (len(jitter) == len(t_jitter)), "Error: Somehow, the lengths of the jitter vector and its time index are different!"
jitter = array(jitter)
# # DEBUG
# ixs = where(abs(jitter) > ui)[0]
# if(len(ixs)):
# ix = ixs[0]
# print "xings near large jitter:", actual_xings[ix - 5 : ix + 5]
if(debug):
print "mean(jitter):", mean(jitter)
print "len(jitter):", len(jitter)
jitter -= mean(jitter)
# Separate the rising and falling edges, shaped appropriately for averaging over the pattern period.
# - We have to be careful to keep the last crossing, in the case where there are an odd number of them,
# because we'll be assembling a "repeated average" vector, later, and subtracting it from the original
# jitter vector. So, we can't get sloppy, or we'll end up with misalignment between the two.
try:
xings_per_pattern = where(ideal_xings > pattern_len * ui)[0][0]
except:
print "ideal_xings:", ideal_xings
raise
assert not (xings_per_pattern % 2), "Odd number of crossings per pattern detected!"
risings_per_pattern = xings_per_pattern // 2
fallings_per_pattern = xings_per_pattern // 2
num_patterns = nbits // pattern_len - 1
jitter = jitter[xings_per_pattern:] # The first pattern period is problematic.
if(len(jitter) < xings_per_pattern * num_patterns):
jitter = np.append(jitter, zeros(xings_per_pattern * num_patterns - len(jitter)))
try:
t_jitter = t_jitter[:len(jitter)]
if(len(jitter) > len(t_jitter)):
jitter = jitter[:len(t_jitter)]
except:
print "jitter:", jitter
raise
try:
tie_risings = reshape(jitter.take(range(0, num_patterns * risings_per_pattern * 2, 2)), (num_patterns, risings_per_pattern))
tie_fallings = reshape(jitter.take(range(1, num_patterns * fallings_per_pattern * 2, 2)), (num_patterns, fallings_per_pattern))
except:
print "ideal_xings[xings_per_pattern - 1]:", ideal_xings[xings_per_pattern - 1], "ideal_xings[-1]:", ideal_xings[-1]
print "num_patterns:", num_patterns, "risings_per_pattern:", risings_per_pattern, "fallings_per_pattern:", fallings_per_pattern, "len(jitter):", len(jitter)
print "nbits:", nbits, "pattern_len:", pattern_len
raise
assert len(filter(lambda x: x == None, tie_risings)) == 0, "num_patterns: %d, risings_per_pattern: %d, len(jitter): %d" % \
(num_patterns, risings_per_pattern, len(jitter))
assert len(filter(lambda x: x == None, tie_fallings)) == 0, "num_patterns: %d, fallings_per_pattern: %d, len(jitter): %d" % \
(num_patterns, fallings_per_pattern, len(jitter))
# Do the jitter decomposition.
# - Use averaging to remove the uncorrelated components, before calculating data dependent components.
tie_risings_ave = tie_risings.mean(axis=0)
tie_fallings_ave = tie_fallings.mean(axis=0)
try:
isi = max(tie_risings_ave.ptp(), tie_fallings_ave.ptp())
except:
print "tie_risings_ave:", tie_risings_ave, "\ntie_fallings_ave:", tie_fallings_ave
raise
dcd = abs(mean(tie_risings_ave) - mean(tie_fallings_ave))
# - Subtract the data dependent jitter from the original TIE track.
tie_ave = concatenate(zip(tie_risings_ave, tie_fallings_ave))
tie_ave = resize(tie_ave, len(jitter))
try:
tie_ind = jitter - tie_ave
except:
print "tie_ave:", tie_ave
raise
if(debug):
plot(jitter, label="jitter", color="b")
plot(concatenate(zip(tie_risings_ave, tie_fallings_ave)), label="rise/fall aves.", color="r")
title("Original TIE track & Average over one pattern period")
legend()
show()
plot(t_jitter, tie_ind)
title("Data Independent Jitter")
show()
# - Use spectral analysis to help isolate the periodic components of the data independent jitter.
y = fft(make_uniform(t_jitter, tie_ind, ui, nbits))
y_mag = abs(y)
y_sigma = sqrt(mean((y_mag - mean(y_mag)) ** 2))
# - We'll call any spectral component with a magnitude > 6-sigma a "peak".
thresh = 6 * y_sigma
y_per = where(y_mag > thresh, y, zeros(len(y)))
if(debug):
print "# of spectral peaks detected:", len(where(y_per)[0])
print "thresh:", thresh, "max(y_mag):", max(y_mag)
tie_per = real(ifft(y_per))
pj = tie_per.ptp()
if(debug):
plot(tie_per)
title("Periodic Jitter")
show()
# - Subtract the periodic jitter and calculate the standard deviation of what's left.
tie_rnd = make_uniform(ideal_xings[:len(tie_ind)], tie_ind, ui, nbits) - tie_per
rj = sqrt(mean((tie_rnd - mean(tie_rnd)) ** 2))
return (jitter, t_jitter, isi, dcd, pj, rj, tie_ind) | [
"def calc_jitter(\n ui, nui, pattern_len, ideal_xings, actual_xings, rel_thresh=6, num_bins=99, zero_mean=True\n) -> JitterResults:\n\n def my_hist(x):\n \"\"\"\n Calculates the probability mass function (PMF) of the input vector,\n enforcing an output range of [-UI/2, +UI/2], sweeping everything in [-UI, -UI/2] into the first bin,\n and everything in [UI/2, UI] into the last bin.\n \"\"\"\n hist, bin_edges = np.histogram(\n x, [-ui] + [-ui / 2.0 + i * ui / (num_bins - 2) for i in range(num_bins - 1)] + [ui]\n )\n bin_centers = (\n [-ui / 2.0]\n + [np.mean([bin_edges[i + 1], bin_edges[i + 2]]) for i in range(len(bin_edges) - 3)]\n + [ui / 2.0]\n )\n\n return (np.array(list(map(float, hist))) / sum(hist), bin_centers)\n\n # Check inputs.\n if not ideal_xings.all():\n raise ValueError(\"calc_jitter(): zero length ideal crossings vector received!\")\n if not actual_xings.all():\n raise ValueError(\"calc_jitter(): zero length actual crossings vector received!\")\n\n # Line up first ideal/actual crossings, and count/validate crossings per pattern.\n ideal_xings = np.array(ideal_xings) - (ideal_xings[0] - ui / 2.0)\n actual_xings = np.array(actual_xings) - (actual_xings[0] - ui / 2.0)\n xings_per_pattern = np.where(ideal_xings > (pattern_len * ui))[0][0]\n if xings_per_pattern % 2 or not xings_per_pattern:\n log.debug(\"xings_per_pattern: %d\", xings_per_pattern)\n log.debug(\"len(ideal_xings): %d\", len(ideal_xings))\n log.debug(\"min(ideal_xings): %d\", min(ideal_xings))\n log.debug(\"max(ideal_xings): %d\", max(ideal_xings))\n raise AssertionError(\"utility.calc_jitter(): Odd number of (or, no) crossings per pattern detected!\")\n num_patterns = nui // pattern_len\n\n # Assemble the TIE track.\n i = 0\n jitter = []\n t_jitter = []\n skip_next_ideal_xing = False\n for ideal_xing in ideal_xings:\n if skip_next_ideal_xing:\n t_jitter.append(ideal_xing)\n skip_next_ideal_xing = False\n continue\n # Confine our attention to those actual crossings occuring\n # within the interval [-UI/2, +UI/2] centered around the\n # ideal crossing.\n min_t = ideal_xing - ui / 2.0\n max_t = ideal_xing + ui / 2.0\n while i < len(actual_xings) and actual_xings[i] < min_t:\n i += 1\n if i == len(actual_xings): # We've exhausted the list of actual crossings; we're done.\n break\n if actual_xings[i] > max_t: # Means the xing we're looking for didn't occur, in the actual signal.\n jitter.append(3.0 * ui / 4.0) # Pad the jitter w/ alternating +/- 3UI/4.\n jitter.append(-3.0 * ui / 4.0) # (Will get pulled into [-UI/2, UI/2], later.\n skip_next_ideal_xing = True # If we missed one, we missed two.\n else: # Noise may produce several crossings. We find all those\n xings = [] # within the interval [-UI/2, +UI/2] centered\n j = i # around the ideal crossing, and take the average.\n while j < len(actual_xings) and actual_xings[j] <= max_t:\n xings.append(actual_xings[j])\n j += 1\n tie = np.mean(xings) - ideal_xing\n jitter.append(tie)\n t_jitter.append(ideal_xing)\n jitter = np.array(jitter)\n\n log.debug(\"mean(jitter): %d\", np.mean(jitter))\n log.debug(\"len(jitter): %d\", len(jitter))\n\n if zero_mean:\n jitter -= np.mean(jitter)\n\n # Do the jitter decomposition.\n # - Separate the rising and falling edges, shaped appropriately for averaging over the pattern period.\n tie_risings = jitter.take(list(range(0, len(jitter), 2)))\n tie_fallings = jitter.take(list(range(1, len(jitter), 2)))\n tie_risings.resize(num_patterns * xings_per_pattern // 2)\n tie_fallings.resize(num_patterns * xings_per_pattern // 2)\n tie_risings = np.reshape(tie_risings, (num_patterns, xings_per_pattern // 2))\n tie_fallings = np.reshape(tie_fallings, (num_patterns, xings_per_pattern // 2))\n\n # - Use averaging to remove the uncorrelated components, before calculating data dependent components.\n try:\n tie_risings_ave = tie_risings.mean(axis=0)\n tie_fallings_ave = tie_fallings.mean(axis=0)\n isi = max(tie_risings_ave.ptp(), tie_fallings_ave.ptp())\n except:\n log.error(\"xings_per_pattern: %d\", xings_per_pattern)\n log.error(\"len(ideal_xings): %d\", len(ideal_xings))\n raise\n isi = min(isi, ui) # Cap the ISI at the unit interval.\n dcd = abs(np.mean(tie_risings_ave) - np.mean(tie_fallings_ave))\n\n # - Subtract the data dependent jitter from the original TIE track, in order to yield the data independent jitter.\n tie_ave = sum(list(zip(tie_risings_ave, tie_fallings_ave)), ())\n tie_ave = np.resize(tie_ave, len(jitter))\n tie_ind = jitter - tie_ave\n\n # - Use spectral analysis to help isolate the periodic components of the data independent jitter.\n # -- Calculate the total jitter spectrum, for display purposes only.\n # --- Make vector uniformly sampled in time, via zero padding np.where necessary.\n # --- (It's necessary to keep track of those elements in the resultant vector, which aren't paddings; hence, 'valid_ix'.)\n x, valid_ix = make_uniform(t_jitter, jitter, ui, nui)\n y = np.fft.fft(x)\n jitter_spectrum = abs(y[: len(y) // 2]) / np.sqrt(len(jitter)) # Normalized, in order to make power correct.\n f0 = 1.0 / (ui * nui)\n spectrum_freqs = [i * f0 for i in range(len(y) // 2)]\n\n # -- Use the data independent jitter spectrum for our calculations.\n tie_ind_uniform, valid_ix = make_uniform(t_jitter, tie_ind, ui, nui)\n\n # --- Normalized, in order to make power correct, since we grab Rj from the freq. domain.\n # --- (I'm using the length of the vector before zero padding, because zero padding doesn't add energy.)\n # --- (This has the effect of making our final Rj estimate more conservative.)\n y = np.fft.fft(tie_ind_uniform) / np.sqrt(len(tie_ind))\n y_mag = abs(y)\n y_mean = moving_average(y_mag, n=len(y_mag) // 10)\n y_var = moving_average((y_mag - y_mean) ** 2, n=len(y_mag) // 10)\n y_sigma = np.sqrt(y_var)\n thresh = y_mean + rel_thresh * y_sigma\n y_per = np.where(y_mag > thresh, y, np.zeros(len(y))) # Periodic components are those lying above the threshold.\n y_rnd = np.where(y_mag > thresh, np.zeros(len(y)), y) # Random components are those lying below.\n y_rnd = abs(y_rnd)\n rj = np.sqrt(np.mean((y_rnd - np.mean(y_rnd)) ** 2))\n tie_per = np.real(np.fft.ifft(y_per)).take(valid_ix) * np.sqrt(\n len(tie_ind)\n ) # Restoring shape of vector to its original,\n pj = tie_per.ptp() # non-uniformly sampled state.\n\n # --- Save the spectrum, for display purposes.\n tie_ind_spectrum = y_mag[: len(y_mag) // 2]\n\n # - Reassemble the jitter, excluding the Rj.\n # -- Here, we see why it was necessary to keep track of the non-padded elements with 'valid_ix':\n # -- It was so that we could add the average and periodic components back together,\n # -- maintaining correct alignment between them.\n if len(tie_per) > len(tie_ave):\n tie_per = tie_per[: len(tie_ave)]\n if len(tie_per) < len(tie_ave):\n tie_ave = tie_ave[: len(tie_per)]\n jitter_synth = tie_ave + tie_per\n\n # - Calculate the histogram of original, for comparison.\n hist, bin_centers = my_hist(jitter)\n\n # - Calculate the histogram of everything, except Rj.\n hist_synth, bin_centers = my_hist(jitter_synth)\n\n # - Extrapolate the tails by convolving w/ complete Gaussian.\n rv = norm(loc=0.0, scale=rj)\n rj_pdf = rv.pdf(bin_centers)\n rj_pmf = rj_pdf / sum(rj_pdf)\n hist_synth = np.convolve(hist_synth, rj_pmf)\n tail_len = (len(bin_centers) - 1) // 2\n hist_synth = (\n [sum(hist_synth[: tail_len + 1])]\n + list(hist_synth[tail_len + 1 : len(hist_synth) - tail_len - 1])\n + [sum(hist_synth[len(hist_synth) - tail_len - 1 :])]\n )\n\n return JitterResults(\n jitter,\n t_jitter,\n isi,\n dcd,\n pj,\n rj,\n tie_ind,\n thresh[: len(thresh) // 2],\n jitter_spectrum,\n tie_ind_spectrum,\n spectrum_freqs,\n hist,\n hist_synth,\n bin_centers,\n )",
"def jitter_variants(F0):\n \n # Compute jitter (absolute)\n jitt_absolute = np.sum(np.abs(np.diff(F0)))/np.float64(len(F0)-1.0)\n \n # Compute jitter (relative)\n jitt_relative = jitt_absolute/np.mean(F0)*100\n \n # Compute jitter (rap)\n inn_rap = 0.0\n for i in xrange(1, len(F0)-1):\n inn_rap += abs(F0[i]-sum(F0[i-1:i+1].copy())/3.0)\n jitt_rap = (np.sum(inn_rap)\\\n /np.float64(len(F0)-1.0))\\\n /np.mean(F0)*100\n \n # Compute jitter (ppq5)\n inn_ppq = 0.0\n for i in xrange(2, len(F0)-2):\n inn_ppq += abs(F0[i]-sum(F0[i-2:i+2].copy())/5.0)\n jitt_ppq = (np.sum(inn_ppq)\\\n /np.float64(len(F0)-1.0))\\\n /np.mean(F0)*100\n \n # Return jitter variants\n return (jitt_absolute, jitt_relative, jitt_rap, jitt_ppq)",
"def zero_crossings(x, y):\n n = len(x)\n x_zc = []\n for i in range(n-1):\n if y[i] == 0.0:\n x_zc.append(x[i])\n elif ( (y[i] > 0.0 and y[i+1] < 0.0)\n or (y[i] < 0.0 and y[i+1] > 0.0) ):\n x_zc.append(\n (y[i] * x[i+1] - y[i+1] * x[i]) / (y[i] - y[i+1]))\n return x_zc",
"def jitter(self, bb, curr_idx, jitter_amount=3, iou_threshold=0.5):\n\t\t\n\t\t# This array will contain all the jittered bb and the initial one\n\t\tjittered_bbs = [bb]\n\t\t# Randomly decide how many times we want to jitter the bb\n\t\tnb_jitter = np.random.randint(0, 10)\n\t\t\n\t\t# Array used to store the reference data\n\t\tidxs_y = [curr_idx]\n\t\tfor i in range(nb_jitter):\n\t\t\t# We generate a jittered bb\n\t\t\ttmp = np.concatenate((bb[:4] + np.random.randint(0, jitter_amount, (4)), [np.random.uniform(0.0, 0.9)]))\n\t\t\t# IoU < iou_threshold we add it to the ref array because it won't be removed later by nms\n\t\t\tif nms.get_iou(bb, tmp)\t< iou_threshold:\n\t\t\t\tidxs_y.append(i + curr_idx + 1)\n\t\t\tjittered_bbs.append(tmp)\n\n\t\t# Return the final jittered bb and the reference data\n\t\treturn np.array(jittered_bbs), np.array(idxs_y)",
"def full_jitter(value: float) -> float:\n return random.uniform(0, value)",
"def randn_jitter(img, stdval):\n translation = np.random.randn(2) * stdval[0]\n rotation = np.random.randn() * np.pi * stdval[1] / 180.\n scaling = np.random.randn() * stdval[2]\n return jitter(img, translation, rotation, scaling)",
"def _jitter(self, seconds, jitter_amount, random_func=None):\n if not random_func:\n random_func = self.m.random.random\n return seconds * (1 + random_func() * (jitter_amount * 2) - jitter_amount)",
"def rand_jitter(img, maxval):\n translation = (np.random.rand(2) * 2. - 1.) * maxval[0]\n rotation = (np.random.rand() * 2. - 1.) * np.pi * maxval[1] / 180.\n scaling = (np.random.rand() * 2. - 1.) * maxval[2]\n return jitter(img, translation, rotation, scaling)",
"def calculate_staggering():\n proposed_amounts[10] = amounts[10]\n proposed_amounts[11] = amounts[11]\n proposed_amounts[12] = amounts[12]\n for i in range(amounts[0]):\n if get_overlap() >= staggering[0]:\n proposed_amounts[0] += 1\n for i in range(amounts[1]):\n if get_overlap() >= staggering[1]:\n proposed_amounts[1] += 1\n for i in range(amounts[2]):\n if get_overlap() >= staggering[2]:\n proposed_amounts[2] += 1\n for i in range(amounts[3]):\n if get_overlap() >= staggering[3]:\n proposed_amounts[3] += 1\n for i in range(amounts[4]):\n if get_overlap() >= staggering[4]:\n proposed_amounts[4] += 1\n for i in range(amounts[5]):\n if get_overlap() >= staggering[5]:\n proposed_amounts[5] += 1\n for i in range(amounts[6]):\n if get_overlap() >= staggering[6]:\n proposed_amounts[6] += 1\n for i in range(amounts[7]):\n if get_overlap() >= staggering[7]:\n proposed_amounts[7] += 1\n for i in range(amounts[8]):\n if get_overlap() >= staggering[8]:\n proposed_amounts[8] += 1\n for i in range(amounts[9]):\n if get_overlap() >= staggering[9]:\n proposed_amounts[9] += 1",
"def find_crossing_times(\n t,\n x,\n min_delay: float = 0.0,\n rising_first: bool = True,\n min_init_dev: float = 0.1,\n thresh: float = 0.0,\n):\n\n if len(t) != len(x):\n raise ValueError(f\"len(t) ({len(t)}) and len(x) ({len(x)}) need to be the same.\")\n\n t = np.array(t)\n x = np.array(x)\n\n try:\n max_mag_x = max(abs(x))\n except:\n log.error(\"len(x): %d\", len(x))\n raise\n min_mag_x = min_init_dev * max_mag_x\n i = 0\n while abs(x[i]) < min_mag_x:\n i += 1\n assert i < len(x), \"Input signal minimum deviation not detected!\"\n x = x[i:] - thresh\n t = t[i:]\n\n sign_x = np.sign(x)\n sign_x = np.where(sign_x, sign_x, np.ones(len(sign_x))) # \"0\"s can produce duplicate xings.\n diff_sign_x = np.diff(sign_x)\n xing_ix = np.where(diff_sign_x)[0]\n xings = [t[i] + (t[i + 1] - t[i]) * x[i] / (x[i] - x[i + 1]) for i in xing_ix]\n\n if not xings:\n return np.array([])\n\n i = 0\n if min_delay:\n assert min_delay < xings[-1], f\"min_delay ({min_delay}) must be less than last crossing time ({xings[-1]}).\"\n while xings[i] < min_delay:\n i += 1\n\n log.debug(\"min_delay: %d\", min_delay)\n log.debug(\"rising_first: %d\", rising_first)\n log.debug(\"i: %d\", i)\n log.debug(\"max_mag_x: %d\", max_mag_x)\n log.debug(\"min_mag_x: %d\", min_mag_x)\n log.debug(\"xings[0]: %d\", xings[0])\n log.debug(\"xings[i]: %d\", xings[i])\n\n try:\n if rising_first and diff_sign_x[xing_ix[i]] < 0.0:\n i += 1\n except:\n log.error(\"len(diff_sign_x): %d\", len(diff_sign_x))\n log.error(\"len(xing_ix): %d\", len(xing_ix))\n log.error(\"i: %d\", i)\n raise\n\n return np.array(xings[i:])",
"def calc_95_ci(populations, t):\n mean = calc_pop_avg(populations, t)\n SEM = calc_pop_std(populations, t) / len(populations) ** .5\n return (mean, 1.96 * SEM)",
"def lagged_auto_cov(self, Xi, t=1):\n N = len(Xi)\n\n # use sample mean estimate from whole series\n Xs = np.mean(Xi)\n\n # construct copies of series shifted relative to each other,\n # with mean subtracted from values\n end_padded_series = np.zeros(N + t)\n end_padded_series[:N] = Xi - Xs\n start_padded_series = np.zeros(N + t)\n start_padded_series[t:] = Xi - Xs\n try:\n auto_cov = 1. / (N - 1) * np.sum(start_padded_series * end_padded_series)\n except:\n auto_cov = 0.0\n return auto_cov",
"def get_jitter_vect(xdelta, ydelta):\n return gnipMath.cVector2(\n random.randint(-xdelta, xdelta),\n random.randint(-ydelta, ydelta)\n )",
"def jitter(img, translation, rotation, scaling):\n # we write the program in a general fashion assuming that the image is \n # multi channel.\n img = np.atleast_3d(img)\n img_size = np.array(img.shape[:2])\n center = img_size / 2. - 0.5\n hh, ww = np.meshgrid(np.arange(img_size[0]), np.arange(img_size[1]))\n old_coor = np.hstack((hh.reshape(hh.size, 1), ww.reshape(ww.size, 1)))\\\n - center\n \n rotation_matrix = np.asarray([[ np.cos(rotation), -np.sin(rotation)],\n [ np.sin(rotation), np.cos(rotation)]])\n new_coor = np.dot(old_coor, rotation_matrix)\n new_coor -= translation\n new_coor *= 2. ** (- scaling)\n new_coor += center\n img_jittered = np.empty_like(img)\n # we use linear interpolation to create the image for better quality, and\n # use the nearest values for pixels outside the image\n for i in range(img.shape[2]):\n model = interpolate.RectBivariateSpline(np.arange(img_size[0]),\n np.arange(img_size[1]),\n img[:,:,i])\n out = model.ev(new_coor[:,0], new_coor[:,1])\n img_jittered[:,:,i] = out.reshape(img_size[1],img_size[0]).T\n # finally, if it's a single channel image, we will just return a single\n # channel image\n if img_jittered.shape[2] == 1:\n img_jittered.reshape(img_size)\n return img_jittered",
"def random_jitter(value: float) -> float:\n return value + random.random()",
"def overlap(self,mu):\r\n return 1./self.N*sum(self.pattern[mu]*self.x)",
"def zero_crossings(x):\n\tzero_cross = np.where(np.diff(np.signbit(x)))[0]\n\n\treturn zero_cross.size",
"def _create_multiplicity_knots(self):\n return [0, 0, 0, 0,\n 0.12, 0.25, 0.37,\n 0.5, 0.5,\n 0.62, 0.75, 0.87,\n 1, 1, 1, 1]",
"def find_crossings(\n t,\n x,\n amplitude=1.0,\n min_delay: float = 0.0,\n rising_first: bool = True,\n min_init_dev=0.1,\n mod_type=0,\n):\n\n assert mod_type >= 0 and mod_type <= 2, f\"ERROR: utility.find_crossings(): Unknown modulation type: {mod_type}\"\n\n xings = []\n if mod_type == 0: # NRZ\n xings.append(\n find_crossing_times(t, x, min_delay=min_delay, rising_first=rising_first, min_init_dev=min_init_dev)\n )\n elif mod_type == 1: # Duo-binary\n xings.append(\n find_crossing_times(\n t,\n x,\n min_delay=min_delay,\n rising_first=rising_first,\n min_init_dev=min_init_dev,\n thresh=(-0.5 * amplitude),\n )\n )\n xings.append(\n find_crossing_times(\n t,\n x,\n min_delay=min_delay,\n rising_first=rising_first,\n min_init_dev=min_init_dev,\n thresh=(0.5 * amplitude),\n )\n )\n elif mod_type == 2: # PAM-4 (Enabling the +/-0.67 cases yields multiple ideal crossings at the same edge.)\n xings.append(\n find_crossing_times(\n t,\n x,\n min_delay=min_delay,\n rising_first=rising_first,\n min_init_dev=min_init_dev,\n thresh=(0.0 * amplitude),\n )\n )\n else:\n raise ValueError(f\"Unknown modulation type: {mod_type}\")\n\n return np.sort(np.concatenate(xings))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the next rise/set after a given time. Fetches date by searching the chart of rise/set times as obtained from | def get_next_event_after_dt(self, start_dt, body="moon", event="rise"):
# Other methods, such as using astropy's astroplan, were too slow.
for day in self.charts[body]:
if not event in day:
continue # no rise/set that day
event_dt = self._get_datetime_from_iso(day[event]["time"])
# Found the first event after the current time.
# Assumes sequential order in the chart
if event_dt > start_dt:
azimuth = day[event]["azimuth"]
return (event_dt, azimuth) | [
"def find_next_closest():\n global month, day, year, time\n global available\n page_text = \"\"\n try:\n while page_text != available:\n increment_time()\n dateField = browser.find_element_by_class_name(\"ng_cal_input_field\")\n dateField.send_keys(month + \" \" + str(day) + \" \" + str(year))\n timeSelect = Select(browser.find_element_by_id(\"requestedTime\"))\n timeSelect.select_by_value(time)\n click_by_name(\"checkAvail\")\n page_text = browser.find_element_by_class_name(\"alert\").text\n next_date_text = browser.find_elements_by_class_name(\"alert\")[1].text\n print(\"The next available time is:\\n\" + next_date_text)\n except:\n print(\"An error occurred while finding the next closest time.\")\n sys.exit()",
"def find_next_in_rule(self, start_dt):\n\t\trule = rrule.rrule(self.repeat_period, dtstart=self.start_date, interval=self.repeat_every)\n\t\tnext_start = rule.after(start_dt)\n\t\tskip = 0\n\t\twhile get_timestamp(next_start.date()) in self.exceptions:\n\t\t\tnext_start = rule.after(next_start)\n\t\t\tskip += 1\n\t\treturn next_start.date(), skip",
"def _find_next_episode(self, episodes):\n today = date.today()\n rw = None\n timespan = None\n\n # Search for the episode which airs next (air date is the closest to now)\n for episode in episodes:\n try:\n airdate = datetime.strptime(episode['firstAired'], '%Y-%m-%d')\n airdate = airdate.date()\n if airdate >= today:\n ctimespan = airdate - today\n if timespan is None or ctimespan < timespan:\n rw = episode\n timespan = ctimespan\n except:\n continue\n return rw",
"def next_equinox(date):\n return holiday(date, pi, 0)",
"def get_next_kinko_date(cl, typ):\n # avoiding circular import\n from kinko.models import ClientPricing\n kinko_date, start_date, end_date = None, None, None\n try:\n cp = ClientPricing.objects.get(client__name=cl, active=True)\n\n freq = None\n days = None\n if typ == 'inv':\n freq = cp.invoice_frequency\n days = cp.invoice_days\n elif typ == 'rem':\n freq = cp.remittance_frequency\n days = cp.remittance_days\n delay= cp.remittance_delay\n today = datetime.datetime.now().replace(\n hour=0, minute=0, second=0, microsecond=0)\n if freq:\n # giving dtstart so that we can\n # calculate start date and end date of a kinko.\n # (using \"before\" and \"after\" functions of rrule)\n # start and end dates are used just for reference.\n rrule_obj = params_to_rrule(freq, days,\n dt_start=datetime.datetime(2013,1,1))\n if rrule_obj:\n kinko_date = rrule_obj.after(today)\n if typ == 'rem':\n # In case of remittance there is a one day delay\n # business_date_operation takes care of Sun/Sat\n # So in case of saturday remittance cannot happen on monday\n # business_date_operation will return \"monday\"\n # and so the kinko date will be the next occurence after monday\n kinko_date = rrule_obj.after(business_date_operation(today,delay))\n start_date, end_date = get_date_range(rrule_obj, kinko_date, typ)\n\n except (ClientPricing.DoesNotExist,\n ClientPricing.MultipleObjectsReturned), e:\n LOGGER.warning('ClientPricing for client %s not defined' % cl)\n return kinko_date, start_date, end_date",
"def getTomorrow():\n return getAfterXDay(1, getToday())",
"def next_draw_date(date=datetime.now()):\n wed_weekday = 2\n sat_weekday = 5\n draw_hour = 20\n days_diff = 0\n\n if date.weekday() in [wed_weekday, sat_weekday] and date.hour < draw_hour:\n # provided date is next valid lottery date\n days_diff = 0\n elif date.weekday() < wed_weekday or date.weekday() >= sat_weekday:\n days_diff = (wed_weekday-date.weekday()) % 7\n elif date.weekday() > wed_weekday or date.weekday() < sat_weekday:\n days_diff = (sat_weekday-date.weekday()) % 7\n\n new_date = date+timedelta(days=days_diff)\n return new_date.replace(hour=20, minute=00, second=00, microsecond=00)",
"def get_next_alarm_event(self):\n http = self.credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n WebData.put('alarm_time_check', now)\n events_result = service.events().list(\n calendarId='primary', timeMin=now, maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n if events:\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n if event['summary'] == 'Alarm':\n alarm_time = dateutil.parser.parse(start)\n if alarm_time > datetime.datetime.now(pytz.utc):\n logging.info('Got alarm time %s from Google API', start)\n WebData.put('alarm_time', alarm_time.isoformat() + 'Z')\n return alarm_time\n\n logging.info('No future alarms found from Google API')\n WebData.put('alarm_time', '')",
"def next_day(self):\n for day in self.days:\n if not day.is_past:\n return day",
"def next_rising(self, body, start=None, use_center=False):\n return self._find_rise_or_set(body, start, use_center, +1, True)",
"def soq(self, date: datetime.date) -> datetime.date:\n for i in range(self.index(date), -1, -1):\n if (\n utils.quarter(self.__dates__[i]) == utils.quarter(date)\n and self.__dates__[i].year == date.year\n ):\n continue\n return self.__dates__[i + 1]\n return self.__dates__[i]",
"def get_next_work_day(self, division=None, date=None):\n date = date or datetime.date.today()\n one_day = datetime.timedelta(days=1)\n while True:\n date += one_day\n if self.is_work_day(date, division=division):\n return date",
"def get_previous_spike(spike_times, current_time, spike_type):\n\n spike_times = spike_times[::-1]\n for i in range(len(spike_times)):\n if spike_times[i][1] == current_time:\n for j in range(i + 1, len(spike_times)):\n if spike_times[j][0] == spike_type:\n if spike_times[j][1] != current_time:\n return spike_times[j][1]\n else:\n continue\n return -1",
"def pick_the_right_tle(target_date_time_object, dictionary):\n datetime_corrected = target_date_time_object.strftime(\"%Y-%m-%d\")\n try:\n picked_tle = dictionary[datetime_corrected]\n return(picked_tle)\n except KeyError as error:\n try:\n datetime_corrected_plus_1 = target_date_time_object + datetime.timedelta(1)\n target_date_time_object_plus_1 = datetime_corrected_plus_1.strftime(\"%Y-%m-%d\")\n picked_tle = dictionary[target_date_time_object_plus_1]\n return(picked_tle)\n except KeyError as error:\n try:\n datetime_corrected_plus_2 = datetime_corrected_plus_1 + datetime.timedelta(1)\n target_date_time_object_plus_2 = datetime_corrected_plus_1.strftime(\"%Y-%m-%d\")\n picked_tle = dictionary[target_date_time_object_plus_2]\n return(picked_tle)\n except KeyError as error:\n warnings.warn(\n \"I can't find a matching TLE less than 2 days from the requested date. This discrepancy is too large, please find a new TLE\")\n warnings.warn(\"The TLE that failed is {}\".format(target_date_time_object))",
"def next_run(self) -> datetime:\n\n return self.next_run_after(datetime.now().astimezone())",
"def advance(self, date, num, term):\r\n qDate = QuantLib.Date(date.dayOfMonth(), date.month(), date.year())\r\n #need to map timePeriod to ql()\r\n nqDate = super(Target,self).advance(qDate, num, term.ql())\r\n return Date.Date(nqDate.dayOfMonth(), nqDate.month(), nqDate.year())",
"def next_pass(self, body, singlepass=True):\n if not isinstance(body, EarthSatellite):\n raise TypeError(\n 'the next_pass() method is only for use with'\n ' EarthSatellite objects because of their high speed'\n )\n\n result = _libastro._next_pass(self, body)\n # _libastro behavior is singlepass=False\n if ((not singlepass)\n or (None in result)\n or (result[4] >= result[0])):\n return result\n # retry starting just before next_rising\n obscopy = self.copy()\n # Almost always 1 minute before next_rising except\n # in pathological case where set came immediately before rise\n obscopy.date = result[0] - min(1.0/1440,\n (result[0] - result[4])/2)\n result = _libastro._next_pass(obscopy, body)\n if result[0] <= result[2] <= result[4]:\n return result\n raise ValueError(\"this software is having trouble with those satellite parameters\")",
"def advance(self, date, num, term):\r\n qDate = QuantLib.Date(date.dayOfMonth(), date.month(), date.year())\r\n #need to map timePeriod to ql()\r\n nqDate = super(US,self).advance(qDate, num, term.ql())\r\n return Date.Date(nqDate.dayOfMonth(), nqDate.month(), nqDate.year())",
"def find_next_open_time(hours_dict):\n # Assumes all days are defined\n open_hours = hours_dict[0][\"open\"]\n if len(open_hours) < 7:\n return \"\", \"\"\n today = datetime.datetime.today()\n weekday = today.weekday()\n current_time_string = \"{}{}\".format(today.hour, today.minute)\n # If already closed, return next day's opening time\n if open_hours[weekday][\"end\"] < current_time_string:\n opening_time_string = open_hours[(weekday+1) % 7][\"start\"]\n else:\n opening_time_string = open_hours[weekday][\"start\"]\n return opening_time_string[:2], opening_time_string[2:]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the current moon phase and waxing/waning information. | def get_moon_phase(self):
# These numbers are just guesses.
phases = {
"new": (0, 0.005),
"crescent": (0.005, 0.47),
"quarter": (0.47, 0.53),
"gibbous": (0.53, 0.9925),
"full": (0.9925, 1),
}
now_dt = datetime.datetime.now()
illumination = moon.moon_illumination(Time(now_dt))
for phase, (lower, upper) in phases.items():
if lower < illumination <= upper:
current_phase = phase
break
yesterday = Time(now_dt - datetime.timedelta(hours=1))
trend = (
"waning" if moon.moon_illumination(yesterday) > illumination else "waxing"
)
return (trend, current_phase, illumination) | [
"def moon(self) -> int:\n return self._moon",
"def moon_phase(self):\n # ('new', 'waxing crescent', 'first quarter', 'waxing gibbous', 'full ', 'waning gibbous', 'third quarter', 'waning crescent')\n # 'new', 'first quarter', 'full ', 'third quarter'\n days = (self.year * self.length_of_year) + self.day_of_year\n day = (days % self.lunar_cycle) + 1\n if day == self.lunar_cycle / 4:\n if day == (days % ((self.lunar_cycle * 4) + 3)) + 1 and random.choice((True, False)):\n return 'solar'\n return 'new'\n if day == (self.lunar_cycle / 4) * 2:\n return '1st'\n if day == (self.lunar_cycle / 4) * 3:\n if day == (days % ((self.lunar_cycle * 5) + 4)) + 1 and random.choice((True, False, False)):\n return 'lunar'\n return 'full'\n if day == self.lunar_cycle:\n return '3rd'\n return ''",
"def get_terror_waves_info(self):",
"def getMission (self):\n return self.mission.getValue ()",
"def MOR_VOC(self):\n return self.__Morning_VOC",
"def get_current_mood(sim_info: SimInfo) -> Mood:\n return sim_info.get_mood()",
"def read_moisture(self) -> float:\n return self.instr.read_register(self.readings_map['soil_moist'], 1)",
"def status(self):\n print(\"My power is at \"+ str(volt()) + \" volts\")\n print('Left speed set to: '+str(self.LEFT_SPEED)+' // Right set to: '+str(self.RIGHT_SPEED))\n print('My MIDPOINT is set to: '+ str(self.MIDPOINT))\n print('My safe stop distance is ' + str(self.SAFE_STOP_DIST) + 'cm')\n print('My hard stop distance is ' + str(self.HARD_STOP_DIST) + 'cm')",
"def moy(self):\n return self.intHOY * 60 + self.minute # minute of the year",
"def measure_p(self):\n self._ser.write('MEAS:POW?')\n __value = float(self._ser.read()[:-1])\n print(f'IT6861A OUT Power: {__value}W')\n return __value",
"def MOR_TOT(self):\n return self.__Morning_Total",
"def measure_p(self):\n self._ser.write('MEAS:POW?')\n __value = float(self._ser.read()[:-1])\n print(f'C62012P OUT Power: {__value}W')\n return __value",
"def moon_phase_angle(self):\n # Trigger calculation if necessary.\n _ = self.alt_az_frame\n elongation = self._sun_radec.separation(self._moon_radec)\n return np.arctan2(\n self._sun_radec.distance*np.sin(elongation),\n self._moon_radec.distance -\n self._sun_radec.distance * np.cos(elongation)).to(u.deg).value",
"def phase_data(self):\n self.instrument.write(\"PHAS\") # Set display to phase format\n # time.sleep(5)\n try:\n start_time = time.perf_counter_ns()\n data_degree = self.instrument.query(\"OUTPFORM\") # Output format is a list of form (degrees, 0)\n end_time = time.perf_counter_ns()\n total_time = (end_time - start_time)/(10 ** 9)\n # print(f\"Phase data time in s: {total_time}\")\n except Exception as e:\n print(e)\n return False\n return data_degree",
"def get_wind_directions(self):\n return self.data.get('WindDirection', None)",
"def next_full_moon(date):\n return _find_moon_phase(date, twopi, pi)",
"def info(self):\n return self.integration.info",
"def phase(self):\n return self.data",
"def moon_shower(self):\n days = (self.year * self.length_of_year) + self.day_of_year\n day = (days % self.lunar_cycle) + 1 # + 1 cause?\n full = int((self.lunar_cycle / 4) * 3) + 1\n near = range(full - 2, full + 3) # 2 days before and after full moon.\n try:\n chance = (3, 9, 27, 9, 3)[near.index(day)]\n if self._lunar_meteors:\n chance *= 6\n if chance >= random.randint(1, 100):\n self._lunar_meteors = True\n return 'lunar meteors'\n except ValueError:\n self._lunar_meteors = False\n return ''"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test GaussLegendre quadrature nodes and weights | def test_gaussLeg():
sc = simc.SimClass()
n,w = sc.gaussLeg(16,-1,1)
w_corr = np.array([0.027152459411754, 0.062253523938648, 0.095158511682493, \
0.124628971255534, 0.149595988816577, 0.169156519395003, 0.182603415044924, \
0.189450610455069, 0.189450610455069, 0.182603415044924, 0.169156519395003, \
0.149595988816577, 0.124628971255534, 0.095158511682493, 0.062253523938648, \
0.027152459411754])
n_corr = np.array([-0.989400934991650, -0.944575023073233, -0.865631202387832,\
-0.755404408355003, -0.617876244402644, -0.458016777657227, -0.281603550779259,\
-0.095012509837637, 0.095012509837638, 0.281603550779259, 0.458016777657228, \
0.617876244402644, 0.755404408355003, 0.865631202387831, 0.944575023073233, \
0.989400934991650])
assert np.abs(sum(w)-2) < 1e-13
assert max(np.abs(w-w_corr)) < 1e-13
assert max(np.abs(n-n_corr)) < 1e-13 | [
"def evaluate_basis_gauss(self):\n phi = np.zeros((len(self.x), self.N_s))\n dphi_w = np.zeros((len(self.x), self.N_s))\n\n for n in range(self.N_s):\n\n # Get the Legendre polynomial of order n and its gradient\n l = L.basis(n)\n dl = l.deriv()\n\n # Evaluate the basis at the Gaussian nodes\n phi[:, n] = leg.legval(self.x, l.coef)\n\n # Evaluate the gradient at the Gaussian nodes and multiply by the\n # weights\n dphi_w[n, :] = leg.legval(self.x, dl.coef) * self.w\n\n return phi, dphi_w",
"def GaussLegendre(num_points):\n n = num_points\n points = np.zeros(n)\n weights = np.zeros(n)\n\n if n > 1:\n try:\n x[n] # x is defined below (global variable)\n except KeyError:\n raise ValueError(\n 'Gauss-Legendre rule with %d points not available' % n)\n\n if n == 1:\n points[0] = 0\n weights[0] = 2\n elif n % 2 == 0:\n for i in range(len(x[n])):\n points[n//2+i] = x[n][i]\n points[n//2-1-i] = -x[n][i]\n weights[n//2+i] = w[n][i]\n weights[n//2-1-i] = w[n][i]\n else:\n for i in range(len(x[n])):\n points[n//2+i] = x[n][i]\n points[n//2-i] = -x[n][i]\n weights[n//2+i] = w[n][i]\n weights[n//2-i] = w[n][i]\n return points, weights",
"def test_gaussian_node(self):\n means = [0.0, 0.5, 1.0]\n stds = [1.0, 2.0, 3.0]\n gauss0 = GaussianNode(mean=means[0], std=stds[0], scope=0)\n gauss1 = GaussianNode(mean=means[1], std=stds[1], scope=1)\n gauss2 = GaussianNode(mean=means[2], std=stds[2], scope=2)\n sample1 = np.array([1, 2, 3])\n sample2 = np.array([10, 20, 30])\n x = torch.Tensor([sample1, sample2])\n\n # Get results\n res_gauss0 = gauss0(x)\n res_gauss1 = gauss1(x)\n res_gauss2 = gauss2(x)\n\n # Expect results from normal distributions\n normal0 = torch.distributions.Normal(loc=means[0], scale=stds[0])\n normal1 = torch.distributions.Normal(loc=means[1], scale=stds[1])\n normal2 = torch.distributions.Normal(loc=means[2], scale=stds[2])\n\n exp_gauss0 = normal0.log_prob(torch.Tensor([1, 10]))\n exp_gauss1 = normal1.log_prob(torch.Tensor([2, 20]))\n exp_gauss2 = normal2.log_prob(torch.Tensor([3, 30]))\n\n # Assertions\n self.assertEqual(len(res_gauss0.tolist()), 2)\n self.assertEqual(len(res_gauss1.tolist()), 2)\n self.assertEqual(len(res_gauss2.tolist()), 2)\n\n # Assert that results are numerically equal\n self.assertTrue(np.isclose(res_gauss0.tolist(), exp_gauss0, atol=DELTA).all())\n self.assertTrue(np.isclose(res_gauss1.tolist(), exp_gauss1, atol=DELTA).all())\n self.assertTrue(np.isclose(res_gauss2.tolist(), exp_gauss2, atol=DELTA).all())",
"def test_calc_hg(self):\n W = np.array([[1, 0, 0], [-1, 0, 0], [1, 0, 0]])\n gnn.W = W\n result = gnn.calc_hg(graph)\n expected = expected = np.array([18, 0, 18])\n assert_array_equal(result, expected)",
"def gauss_points(el_type, n):\n\n if el_type == 'Tri6':\n # one point gaussian integration\n if n == 1:\n weights = [1]\n gps = np.array([[1.0 / 3, 1.0 / 3, 1.0 / 3]])\n\n # three point gaussian integration\n elif n == 3:\n weights = [1.0 / 3, 1.0 / 3, 1.0 / 3]\n gps = np.array([\n [2.0 / 3, 1.0 / 6, 1.0 / 6],\n [1.0 / 6, 2.0 / 3, 1.0 / 6],\n [1.0 / 6, 1.0 / 6, 2.0 / 3]\n ])\n\n # six point gaussian integration\n elif n == 6:\n g1 = 1.0 / 18 * (8 - np.sqrt(10) + np.sqrt(38 - 44 * np.sqrt(2.0 / 5)))\n g2 = 1.0 / 18 * (8 - np.sqrt(10) - np.sqrt(38 - 44 * np.sqrt(2.0 / 5)))\n w1 = (620 + np.sqrt(213125 - 53320 * np.sqrt(10))) / 3720\n w2 = (620 - np.sqrt(213125 - 53320 * np.sqrt(10))) / 3720\n\n weights = [w2, w2, w2, w1, w1, w1]\n gps = np.array([\n [1 - 2 * g2, g2, g2],\n [g2, 1 - 2 * g2, g2],\n [g2, g2, 1 - 2 * g2],\n [g1, g1, 1 - 2 * g1],\n [1 - 2 * g1, g1, g1],\n [g1, 1 - 2 * g1, g1]\n ])\n\n return (weights, gps)",
"def test_2():\n d = 3\n x = np.array([1, 1.5, 2])\n\n grad_val = mt_obj.griewank_grad(x, d)\n assert(np.all(np.round(grad_val, 6) == np.array([0.166577,\n 0.135511,\n 0.140324])))",
"def test_analytic_weighted_nlls(self):\n e = np.array([1, 2, 1, 3, 1])\n self.fitting_problem.data_e = e\n self.cost_func = WeightedNLLSCostFunc(self.fitting_problem)\n self.cost_func.jacobian = self.jacobian\n self.cost_func.hessian = self.hessian\n eval_result, _ = self.cost_func.hes_res(params=self.params)\n actual_hessian = grad2_r_weighted_nlls(\n self.fitting_problem.data_x, e, self.params)\n\n self.assertTrue(np.isclose(actual_hessian, eval_result).all())",
"def gauss(sigma):\n\n return Gx, x",
"def test_3():\n d = 3\n x = np.zeros((d))\n func_val = mt_obj.griewank_func(x, d)\n assert(func_val == 0)\n assert(np.all(mt_obj.griewank_grad(x, d) == np.zeros((d))))",
"def test_sum_hessian(problem):\n problem.set_up()\n skip_BCEWithLogitsLoss(problem) # TODO Implement _sum_hessian for BCEWithLogitsLoss\n\n backpack_res = BackpackDerivatives(problem).sum_hessian()\n autograd_res = AutogradDerivatives(problem).sum_hessian()\n\n check_sizes_and_values(autograd_res, backpack_res)\n problem.tear_down()",
"def Gauss_Legendre(N):\n\n\t\n\tL = np.zeros((N, N))\n\tweights = np.zeros(N)\n\tmesh_points = np.zeros(N)\n\n\t# Roots of Legendre polynomial of degree N\n\tpoly_roots = scipy.special.roots_legendre(N)\n\t\t\n\n\tfor i in range(N):\n\t\tmesh_points[i] = poly_roots[0][i]\n\n\t\t\n\t# Compute matrix L\n\tfor k in range(N):\n\t\tL_poly = scipy.special.legendre(k)\n\t\n\t\tfor j in range(N):\t\t\n\t\t\tx = mesh_points[j]\n\t\n\t\t\tL[j, k] = L_poly(x)\n\n\t# Inverse of L\n\tL_inv = np.linalg.inv(L)\n\n\t# Compute weights\n\tfor k in range(N):\n\t\tweights[k] = 2*L_inv[0,k]\n\n\n\treturn weights, mesh_points",
"def testAddition(self):\n pp = 1000\n dd = 10\n kk = 15\n tol = 1.0e-4\n\n data = np.zeros((pp,dd),dtype = float)\n fn = np.zeros((pp),dtype = float)\n test = np.zeros((dd),dtype = float)\n sigma = np.zeros((1),dtype = float)\n\n xx=gp.SquaredExpCovariogramD()\n xx.setEllSquared(5.0)\n\n f = open(\"tests/data/gp_additive_test_root.sav\")\n ff = f.readlines()\n f.close()\n\n\n for i in range(len(ff)):\n s=ff[i].split()\n fn[i] = float(s[10])\n for j in range(10):\n data[i][j] = float(s[j])\n\n #establish the Gaussian Process\n try:\n gg = gp.GaussianProcessD(data,fn,xx)\n except pex.LsstCppException, e:\n print e.args[0].what()\n\n gg.setLambda(0.002)\n\n #now add new points to it and see if GaussianProcess.interpolate performs\n #correctly\n f = open(\"tests/data/gp_additive_test_data.sav\")\n ff = f.readlines()\n f.close()\n for z in range(len(ff)):\n s = ff[z].split()\n for i in range(dd):\n test[i] = float(s[i])\n mushld = float(s[dd])\n try:\n gg.addPoint(test,mushld)\n except pex.LsstCppException,e:\n print e.args[0].what()\n\n\n f = open(\"tests/data/gp_additive_test_solutions.sav\")\n ff = f.readlines()\n f.close()\n\n worstMuErr = -1.0\n worstSigErr = -1.0\n\n for z in range(len(ff)):\n s = ff[z].split()\n for i in range(dd):\n test[i] = float(s[i])\n\n mushld = float(s[dd + kk])\n sigshld = float(s[dd + kk + 1])\n\n mu = gg.interpolate(sigma,test,kk)\n\n err = (mu - mushld)\n if mushld != 0:\n err = err/mushld\n if err < 0.0:\n err = -1.0 * err\n if z == 0 or err > worstMuErr:\n worstMuErr = err\n\n err = (sigma[0] - sigshld)\n if sigshld != 0:\n err = err/sigshld\n if err < 0.0:\n err = -1.0 * err\n if z == 0 or err > worstSigErr:\n worstSigErr = err\n\n\n print \"\\nThe errors for the test of adding points to the Gaussian process\\n\"\n print \"worst mu error \",worstMuErr\n print \"worst sig2 error \",worstSigErr\n\n\n self.assertTrue(worstMuErr < tol)\n self.assertTrue(worstSigErr < tol)",
"def gradient_200(weights, dev):\n\n @qml.qnode(dev, interface=None, diff_method=\"parameter-shift\")\n def circuit(w):\n for i in range(3):\n qml.RX(w[i], wires=i)\n\n qml.CNOT(wires=[0, 1])\n qml.CNOT(wires=[1, 2])\n qml.CNOT(wires=[2, 0])\n\n qml.RY(w[3], wires=1)\n\n qml.CNOT(wires=[0, 1])\n qml.CNOT(wires=[1, 2])\n qml.CNOT(wires=[2, 0])\n\n qml.RX(w[4], wires=2)\n\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(2))\n\n gradient = np.zeros([5], dtype=np.float64)\n hessian = np.zeros([5, 5], dtype=np.float64)\n\n # QHACK #\n def apply_hessian_parameter_shift(qnode, params, i, j, unshifted, shift=np.pi/4):\n if i != j:\n shifted_plus = params.copy()\n shifted_plus[i] += shift\n shifted_plus[j] += shift\n forward = qnode(shifted_plus) # forward evaluation\n\n shifted_min_plu = params.copy()\n shifted_min_plu[i] -= shift\n shifted_min_plu[j] += shift\n min_plu = qnode(shifted_min_plu)\n\n shifted_plu_min = params.copy()\n shifted_plu_min[i] += shift\n shifted_plu_min[j] -= shift\n plu_min = qnode(shifted_plu_min)\n\n shifted_minus = params.copy()\n shifted_minus[i] -= shift\n shifted_minus[j] -= shift\n backward = qnode(shifted_minus) # backward evaluation\n\n return (forward - min_plu - plu_min + backward) / ((2*np.sin(shift))**2), None\n else:\n shifted_plus = params.copy()\n shifted_plus[i] += np.pi/2\n forward = qnode(shifted_plus)\n\n shifted_minus = params.copy()\n shifted_minus[i] -= np.pi/2\n backward = qnode(shifted_minus)\n result_hessian = (forward - 2*unshifted + backward) / 2\n result_gradient = (forward - backward) / 2\n\n return result_hessian, result_gradient\n\n unshifted = circuit(weights)\n for i in range(5):\n for j in range(5):\n if i <= j:\n hess, grad = apply_hessian_parameter_shift(circuit, weights, i, j, unshifted, shift=np.pi / 4)\n if i == j:\n gradient[i] = grad\n\n hessian[i][j] = hess\n hessian[j][i] = hess\n\n # QHACK #\n\n return gradient, hessian, circuit.diff_options[\"method\"]",
"def test_qnode_gradient_repeated_gate_parameters(self, tol):\n par = [0.8, 1.3]\n\n def qf(x, y):\n qml.RX(np.pi / 4, wires=[0])\n qml.Rot(y, x, 2 * x, wires=[0])\n return qml.expval(qml.PauliX(0))\n\n dev = qml.device(\"default.qubit\", wires=1)\n q = qml.QNode(qf, dev)\n grad_A = q.jacobian(par, method=\"A\")\n grad_F = q.jacobian(par, method=\"F\")\n\n # the different methods agree\n assert np.allclose(grad_A, grad_F, atol=tol, rtol=0)",
"def build_quadrature(self) :\n\n# Compute the Gauss-Legendre quadrature\n [self.polar_nodes,self.polar_weight] = scipy.special.orthogonal.p_roots(self.sn) \n\n# Compute the Chebyshev quadrature\n [self.azith_nodes,self.azith_weight] = self.chebyshev()\n\n self.cos_theta = np.zeros((self.sn/2,1))\n for i in xrange(0,self.sn/2) :\n self.cos_theta[i] = np.real(self.polar_nodes[self.sn/2+i])\n self.sin_theta = np.sqrt(1-self.cos_theta**2)\n\n# Compute omega on one octant\n self.build_octant()\n\n# Compute omega by deploying the octant \n self.deploy_octant()\n\n# Compute the spherical harmonics\n self.compute_harmonics()\n\n# Compute D\n if self.galerkin == True :\n self.D = scipy.linalg.inv(self.M)\n else :\n self.D = np.dot(self.M.transpose(),np.diag(self.weight))",
"def gaussLegQuadSet(sNords):\n legWeights = np.zeros(sNords + 1)\n pnp1s = np.zeros(sNords)\n pprimes = np.zeros(sNords)\n legWeights[sNords] = 1.\n mus = np.polynomial.legendre.legroots(legWeights)\n for i, mu in enumerate(mus):\n pprimes[i] = spc.lpn(sNords, mu)[1][-1]\n pnp1s[i] = spc.lpn(sNords + 1, mu)[0][-1]\n weights = -2. / ((sNords + 1) * pnp1s * pprimes)\n #\n ordinateSet = np.array([mus[::-1], weights])\n return ordinateSet",
"def random_geometric_gauss(N, dim=2, sigma=1, grad=0, torus=0):\n G = nx.Graph()\n G.name = \"Random Geometric Graph\"\n G.add_nodes_from(list(range(N)))\n\n # sample node position uniformly\n for n in G:\n G.node[n]['pos'] = rnd.random(dim)\n if dim == 3:\n for n in range(len(G.nodes())):\n G.node[n]['pos'][2] = (1 - G.node[n]['pos'][2] ** grad) * .25\n nodes = G.nodes(data=True)\n # create the connections\n dmax = 0\n i = 0\n s = .5\n prob = rnd.random(N * N / 2).tolist()\n while nodes:\n\n u, du = nodes.pop()\n print(u)\n pu = du['pos']\n for v, dv in nodes:\n i += 1\n pv = dv['pos']\n d = sum(((a - b) ** 2 for a, b in zip(pu, pv)))\n if dim == 3:\n dxy = sum(((a - b) ** 2 for a, b in zip(pu[:-1], pv[:-1])))\n dz = (pu[-1] - pv[-1]) ** 2\n d = (s * dxy + (1 - s) * dz) * 1. / s\n if torus:\n d = sum(((min(abs(a - b), 1 - abs(a - b))) ** 2 for a, b in zip(pu, pv)))\n if d < .5 ** 2:\n p = scipy.stats.chi2(1).cdf(d / sigma)\n\n if p <= prob.pop():\n G.add_edge(u, v)\n dmax = max(d, dmax)\n return G",
"def test_gaussian(self):\n self.logTestName()\n res = self.H.is_gaussian()\n self.assertTrue(res)",
"def _getWeights(self, a, b):\n assert a <= b, 'Interval boundaries are corrupt, got %f and %f' % (a, b)\n M = self.num_nodes\n weights = np.zeros(M)\n\n # Define temporary integration method using built-in Gauss-Legendre\n # -> will need this to compute the integral from a to b over the Lagrangian polynomials\n [nodes_m, weights_m] = self._GaussLegendre(np.ceil(M / 2), a, b)\n\n # for each node, build Lagrangian polynomial in Newton base, evaluate at temp. integration nodes and integrate\n for j in np.arange(M):\n coeff = np.zeros(M)\n coeff[j] = 1.0\n poly = self._poly_newton(coeff)\n eval_pj = self._evaluate_horner(nodes_m, poly)\n weights[j] = self.evaluate(weights_m, eval_pj)\n\n return weights"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test computation of rhs function. | def test_rhsf():
sc = simc.SimClass()
sc.createInterface()
r = sc.rhsf(0)
assert np.abs(r - np.real((-1/(3+3j) - 1/(-2.5-2.5j)))) < 1e-13
r = sc.rhsf()
assert max(np.abs(r-np.real(1/(sc.zDrops-(3+3j)) + 1/(sc.zDrops-(-2.5-2.5j))))) < 1e-13 | [
"def test(self, x, y):\n return self.model.evaluate(x, y)",
"def test_scipy_eval(self):\n for method in ['2-point',\n '3-point',\n 'cs']:\n hes = Scipy(self.cost_func.problem, self.cost_func.jacobian)\n hes.method = method\n self.cost_func.hessian = hes\n eval_result = self.cost_func.hes_cost(params=self.params)\n self.assertTrue(np.isclose(self.actual, eval_result).all())",
"def test_scipy_eval(self):\n for method in ['2-point',\n '3-point',\n 'cs']:\n hes = Scipy(self.cost_func.problem, self.jacobian)\n hes.method = method\n eval_result = hes.eval(params=self.params)\n self.assertTrue(np.isclose(self.actual_hessian, eval_result).all())",
"def test_evaluate(self):\n\t\tpass",
"def test_math(self):\n self.assertTrue((1 + 1) == 2)",
"def test_exam_lsolve2a(self):\n\n a = symbol('a');\n b = symbol('b');\n x = symbol('x');\n y = symbol('y');\n eqns = [a*x + b*y == 3, x-y==b];\n solution = lsolve(eqns, [x,y]);\n solx = solution[0].rhs();\n soly = solution[1].rhs();\n realx = (3+pow(b,2))/(a+b);\n realy = (3-a*b)/(a+b);\n result = (solx-realx).normal().is_zero() and (soly-realy).normal().is_zero() \n self.assertEqual(result,1)",
"def test_solve(self):\n # Make sure vecs are initialized to zero\n self.zero_tacs_vecs()\n\n # solve\n func_vals = self.run_solve()\n\n # Test that linear solver residual is sufficiently small\n linSolveRes = np.real(self.gmres.getResidualNorm())\n converged = (\n linSolveRes < self.linSolveAtol\n or linSolveRes < self.linSolveRtol * np.real(self.res0.norm())\n )\n self.assertTrue(converged, \"Linear solver did not converge\")\n\n # Test that linear solver took between 1 and subspce * restarts iterations\n numIters = self.gmres.getIterCount()\n self.assertTrue(numIters > 0 and numIters <= self.linSolveIterLimit)\n\n # Test functions values against historical values\n np.testing.assert_allclose(\n func_vals, self.func_ref, rtol=self.rtol, atol=self.atol\n )",
"def testIf(self):\n input_data = {\n \"x\": constant_op.constant([1., 2.], shape=[1, 2]),\n \"b\": constant_op.constant(True)\n }\n\n weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32)\n\n def true_fn(x):\n return math_ops.matmul(x, weights)\n\n def false_fn(x):\n return math_ops.add(x, weights)\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=[1, 2], dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)\n ])\n def model(x, b):\n return cond.cond(\n b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))\n\n root, output_func = self._freezeModel(model)\n self._testConvertedFunction(root, root.f, output_func, input_data)",
"def testIf(self):\n with ops.Graph().as_default():\n with session_lib.Session() as sess:\n input_data = {\n \"x\": constant_op.constant([1., 2.], shape=[1, 2]),\n \"b\": constant_op.constant(True)\n }\n\n weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]],\n dtype=dtypes.float32)\n\n def true_fn(x):\n return math_ops.matmul(x, weights)\n\n def false_fn(x):\n return math_ops.add(x, weights)\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=[1, 2], dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)\n ])\n def model(x, b):\n return cond.cond(\n b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))\n\n root, output_func = self._freezeModel(model)\n self._testConvertedFunction(sess, root, root.f, output_func, input_data)",
"def f_tests(Y, X_f, X_r):\n B_r = npl.pinv(X_r).dot(Y)\n rank_r = npl.matrix_rank(X_r)\n E_r = Y - X_r.dot(B_r)\n B_f = npl.pinv(X_f).dot(Y)\n rank_f = npl.matrix_rank(X_f)\n E_f = Y - X_f.dot(B_f)\n SSR_r = np.sum(E_r ** 2, axis=0)\n SSR_f = np.sum(E_f ** 2, axis=0)\n nu_1 = rank_f - rank_r\n nu_2 = X_f.shape[0] - rank_f\n return (SSR_r - SSR_f) / nu_1 / (SSR_f / nu_2), nu_1, nu_2",
"def test_right_hand_side_operations(self):\n operators = (\n (\"__add__\", operator.add, True),\n (\"__sub__\", operator.sub, False),\n (\"__mul__\", operator.mul, True),\n (\"__truediv__\", operator.truediv, False),\n (\"__floordiv__\", operator.floordiv, False),\n (\"__mod__\", operator.mod, False),\n (\"__pow__\", operator.pow, False),\n )\n tensor = ht.float32([[1, 4], [2, 3]])\n num = 3\n for attr, op, commutative in operators:\n try:\n func = tensor.__getattribute__(attr)\n except AttributeError:\n continue\n self.assertTrue(callable(func))\n res_1 = op(tensor, num)\n res_2 = op(num, tensor)\n if commutative:\n self.assertTrue(ht.equal(res_1, res_2))\n # TODO: Test with split tensors when binary operations are working properly for split tensors",
"def test_RESCAL():\n testing_function('rescal')",
"def test_compute(self):\n # Setup\n real_data = pd.Series(['a', 'b', 'c', 'a', 'a', 'b'])\n synthetic_data = pd.Series(['a', 'b', 'c', 'a', 'b', 'c'])\n\n metric = TVComplement()\n\n # Run\n result = metric.compute(real_data, synthetic_data)\n\n # Assert\n assert result == 0.8333333333333333",
"def test_evalF1_2(self):\n r = evalF1([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.135)\n ref = 0.640058615996223\n self.assertTrue(np.allclose((r,), (ref,)))",
"def evaluate(model, x_test, y_test):\n scores = model.evaluate(x_test, y_test, verbose=0)\n return scores",
"def test_DistMult():\n testing_function('distmult')",
"def test_compare_SphericalSLD_OnionExpShell(self):\r\n note = \"\\n*****Note: This test was passes since Nov. 1st, 2010...\"\r\n print note\r\n # set params\r\n self.model.setParam(\"npts_inter\", 35)\r\n self.model.setParam(\"rad_core0\", 100)\r\n self.model.setParam(\"thick_inter0\", 200)\r\n self.model.setParam(\"nu_inter0\", 4)\r\n # Rexp func\r\n self.model.setParam(\"func_inter0\", 3)\r\n self.model.setParam(\"thick_inter1\", 200)\r\n self.model.setParam(\"nu_inter1\", 4)\r\n self.model.setParam(\"func_inter1\", 3)\r\n # set A_shell=1\r\n self.model2.setParam(\"sld_core0\", 2.07e-006)\r\n # change the function to flat function\r\n self.model2.setParam(\"rad_core0\", 100)\r\n self.model2.setParam(\"thick_shell1\", 200)\r\n self.model2.setParam(\"sld_out_shell1\", 4e-006)\r\n self.model2.setParam(\"sld_in_shell1\", 2.07e-006)\r\n self.model2.setParam(\"A_shell1\", -4)\r\n self.model2.setParam(\"thick_shell2\", 100)\r\n self.model2.setParam(\"sld_out_shell2\", 4e-006)\r\n self.model2.setParam(\"sld_in_shell2\", 4e-006)\r\n self.model2.setParam(\"A_shell2\", 0)\r\n self.model2.setParam(\"thick_shell3\", 200)\r\n self.model2.setParam(\"sld_out_shell3\", 1e-006)\r\n self.model2.setParam(\"sld_in_shell3\", 4e-006)\r\n self.model2.setParam(\"A_shell3\", -4)\r\n self.model2.setParam(\"sld_solv\", 1e-006)\r\n \r\n #sphericalsld model runs\r\n model_run_0_1 = self.model.run(0.1)\r\n model_run_0_01 = self.model.run(0.01)\r\n model_run_0_001 = self.model.run(0.001)\r\n #onionexp model runs\r\n model2_run_0_1 = self.model2.run(0.1)\r\n model2_run_0_01 = self.model2.run(0.01)\r\n model2_run_0_001 = self.model2.run(0.001)\r\n import time\r\n st = time.time()\r\n qs = []\r\n qs = [i/10000 for i in range(1,1000)]\r\n out = map(self.model.run,qs)\r\n print time.time()-st\r\n #Compare exp(A=0) to flat (where A_shell is null) function\r\n self.assertAlmostEqual(self.model.run(0.1),self.model2.run(0.1),4)\r\n self.assertAlmostEqual(self.model.run(0.01),self.model2.run(0.01),0)\r\n self.assertAlmostEqual(self.model.run(0.001),self.model2.run(0.001),-3)",
"def test_exam_lsolve2b(self):\n result = 0\n x = symbol('x')\n y = symbol('y')\n eqns = [3*x+y==7, 2*x-5*y==8]\n solution = lsolve(eqns, [x,y])\n solx = solution[0].rhs()\n soly = solution[1].rhs()\n # It should have returned x==43/17 and y==-10/17\n if(solx != numeric(43,17) or soly != numeric(-10,17)):\n result = 1\n print \"solution of the system \", [str(item) for item in eqns], \" for [x,y] \"\n print \"erronously returned \", [str(item) for item in solution]\n self.assertEqual(result,0)",
"def rhs(self):\n return self.M.solve(self.F)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
print out a 4x4 grid in 5width columns within a box | def print_grid (grid):
print("+--------------------+")
for i in range(4):
print("|",end='')
for j in range(4):
if(grid[i][j]==0):
print("{:<5}".format(" "),end='')
else:
print("{:<5}".format(grid[i][j]),end='')
print("|",end='')
print()
print("+--------------------+") | [
"def print_grid (grid):\r\n print(\"+--------------------+\")\r\n for y in range(4):\r\n print(\"|\", end=\"\")\r\n for x in range(4):\r\n if grid[y][x]==0:\r\n print(\" \"*5, end=\"\")\r\n else:\r\n print(\"{0:<5}\".format(grid[y][x]), end=\"\")\r\n print(\"|\") \r\n print(\"+--------------------+\")",
"def display_grid():\n\n print(f\"{grid[0]} {grid[1]} {grid[2]}\")\n print(f\"{grid[3]} {grid[4]} {grid[5]}\")\n print(f\"{grid[6]} {grid[7]} {grid[8]}\")",
"def print_grid(self):\n for i in range(0,6):\n print('[%s]' % ' , '.join(map(str,self.grid_row[i])))",
"def print_board():\n \n print \"\"\n print \" | | \"\n print \" \" + grid_status[(1,1)] + \" | \" + grid_status[(1,2)] + \" | \" + grid_status[(1,3)]\n print \"___|___|___\"\n print \" | | \"\n print \" \" + grid_status[(2,1)] + \" | \" + grid_status[(2,2)] + \" | \" + grid_status[(2,3)]\n print \"___|___|___\"\n print \" | | \"\n print \" \" + grid_status[(3,1)] + \" | \" + grid_status[(3,2)] + \" | \" + grid_status[(3,3)]\n print \" | | \"\n print \"\"",
"def box(N):\n print()\n for i in range(N):\n for j in range(N):\n print('*', end='')\n print()",
"def show_grid(self):\n print grid_text(self.grid)\n print \"\"",
"def print_grid(self):\n\n print('\\n'.join([' '.join(it) for it in self.game_grid]))",
"def printboard(self) -> None:\r\n print(' ', end=' ') # print out column guides at the top\r\n for i in range(self.boardcols):\r\n print(i + 1, end=' ')\r\n print()\r\n\r\n cnt = 1\r\n for row in self.board:\r\n print('{} '.format(cnt), end='') # print out row guides at the left\r\n cnt += 1\r\n for col in row:\r\n if col == ' ':\r\n print('.', end=' ')\r\n else:\r\n print(col, end=' ')\r\n print()\r\n\r\n print('Black: {} White: {}'.format(self.black_score, self.white_score))",
"def starbox(width, height):\n print(\"*\" * width) #print top edge of box\n\n # print sides of box\n for _ in range(height-2):\n print(\"*\" + \" \" * (width-2) + \"*\") \n\n print(\"*\" * width) #print bottom edge of box",
"def output(self):\n self.numList.reverse()\n def lengthFinder(columnNumber):\n currentLength=0\n longestLength=0\n for i in range(columnNumber, len(self.numList),5):\n currentLength=len(self.numList[i])\n if currentLength>longestLength:\n longestLength=currentLength\n return longestLength+1\n columnWidth=[]\n for i in range(5):\n columnWidth.append(lengthFinder(i))\n for i in range(len(self.numList)):\n print('{0:>{width}}'.format(self.numList[i], width=columnWidth[i%5]), end=' ')\n if i%5==4:\n print()\n print()",
"def printSudoku(grid):\n\titeration = 0\n\tfor i in grid:\n\t\tprint(i[0], i[1], i[2], \"||\", i[3], i[4], i[5], \"||\", i[6], i[7], i[8])\n\t\titeration += 1\n\t\tif (iteration == 3 or iteration == 6):\n\t\t\tprint(\"=======================\")\n\tprint(\"\")",
"def create_grid(grid):\r\n grid.append([0]*4)\r\n grid.append([0]*4)\r\n grid.append([0]*4)\r\n grid.append([0]*4)",
"def box(self):\n self._write_pos(0, 0, None, \"┌\", None) # ┌╭\n for x in range(1, self.window_size[1]):\n self._write_pos(0, x, None, \"─\", None)\n\n self._write_pos(0, self.window_size[1] - 1, None, \"┐\", None) # ┐╮\n for y in range(1, self.window_size[0]):\n self._write_pos(y, self.window_size[1] - 1, None, \"│\", None)\n\n self._write_pos(\n self.window_size[0], self.window_size[1] - 1, None, \"┘\", None\n ) # ┘╯\n\n for x in range(self.window_size[1] - 2, 0, -1):\n self._write_pos(self.window_size[0], x, None, \"─\", None)\n\n self._write_pos(self.window_size[0], 0, None, \"└\", None) # └╰\n\n for y in range(self.window_size[0] - 1, 0, -1):\n self._write_pos(y, 0, None, \"│\", None)",
"def debug_print_board(grid, debug):\n\tif debug:\n\t\tprint(grid)\n\t\tprint(\"\\n\\n\")\n\t\t# time.sleep(.5)",
"def create_grid(grid):\r\n for t in range(4):\r\n grid.append([0,0,0,0])",
"def print_rooms(self):\n # Add top border\n str = \"# \" * ((3 + self.width * 5) // 2) + \"\\n\"\n # The console prints top to bottom but our array is arranged\n # bottom to top.\n #\n # We reverse it so it draws in the right direction.\n reverse_grid = list(self.grid) # make a copy of the list\n reverse_grid.reverse()\n for row in reverse_grid:\n # PRINT NORTH CONNECTION ROW\n str += \"#\"\n for room in row:\n if room is not None and room.n_to is not None:\n str += \" | \"\n else:\n str += \" \"\n str += \"#\\n\"\n # PRINT ROOM ROW\n str += \"#\"\n for room in row:\n if room is not None and room.w_to is not None:\n str += \"-\"\n else:\n str += \" \"\n if room is not None:\n str += f\"{room.id}\".zfill(3)\n else:\n str += \" \"\n if room is not None and room.e_to is not None:\n str += \"-\"\n else:\n str += \" \"\n str += \"#\\n\"\n # PRINT SOUTH CONNECTION ROW\n str += \"#\"\n for room in row:\n if room is not None and room.s_to is not None:\n str += \" | \"\n else:\n str += \" \"\n str += \"#\\n\"\n # Add bottom border\n str += \"# \" * ((3 + self.width * 5) // 2) + \"\\n\"\n # Print string\n print(str)",
"def displayBoard(board):\n print(\"\\n\\t\",board[0],\"|\",board[1],\"|\",board[2])\n print(\"\\t ----------\")\n print(\"\\n\\t\",board[3],\"|\",board[4],\"|\",board[5])\n print(\"\\t---------\")\n print(\"\\n\\t\",board[6],\"|\",board[7],\"|\",board[8])",
"def visual(self,boxlength, file):\r\n print_tex(self.matrix, boxlength, file)",
"def print_board(board):\n assert isinstance(board, Board)\n print_person(board.person)\n for grid in board.grids:\n print_grid(grid)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check if 2 grids are equal return boolean value | def grid_equal (grid1, grid2):
if(grid1==grid2):
return True
else:
return False | [
"def grid_equal (grid1, grid2):\r\n for x in range(4):\r\n for y in range(4):\r\n if grid1[x][y]!=grid2[x][y]:\r\n return False\r\n return True",
"def equals(tile1, tile2) -> bool:\n\n # compare y dim\n if len(tile1) != len(tile2):\n return False\n\n # compare x dim\n if len(tile1[0]) != len(tile2[0]):\n return False\n\n # compare square by square\n for i in range(len(tile1)):\n for j in range(len(tile1[0])):\n if tile1[i][j] != tile2[i][j]:\n return False\n\n return True",
"def same_row(self, value1: int, value2: int):\n return (value1 - 1) // self.grid_size == (value2 - 1) // self.grid_size",
"def positionsInSameCell(self, pos1, pos2):\n x1,y1,z1 = pos1\n x2,y2,z2 = pos2\n if int(x1) == int(x2):\n if int(y1) == int(y2):\n if int(z1) == int(z2):\n return True",
"def same_col(self, value1: int, value2: int):\n return (value1 - 1) % self.grid_size == (value2 - 1) % self.grid_size",
"def is_same(self, board):\n return board == self.board",
"def compare_tiles(tile_row_list1, tile_row_list2):\n for row in range(0, len(tile_row_list1)):\n for col in range(0, len(tile_row_list1[row])):\n if tile_row_list1[row][col] is not tile_row_list2[row][col]:\n return False\n\n return True",
"def match_grids(grid1, grid2):\n matches = 0\n for row1, row2 in zip(grid1, grid2):\n for ch1, ch2 in zip(row1, row2):\n if ch1 == ch2:\n matches += 1\n return matches",
"def check_lost (grid):\r\n height=4\r\n #check for 0 value in grid \r\n for row in range(height):\r\n for col in range(height):\r\n if 0 in grid[row]:\r\n return False\r\n #check for equal adjacent values horizontally \r\n for row in range(height):\r\n for col in range(height-1): \r\n if grid[row][col] == grid[row][col+1]:\r\n return False\r\n \r\n #check for equal adjacent values vertically \r\n for row in range(height-1):\r\n for col in range(height): \r\n if grid[row][col] == grid[row+1][col]:\r\n return False \r\n else:\r\n return True",
"def __eq__(self, tile2):\r\n return self.x == tile2.x and self.y == tile2.y and self.zoom == tile2.zoom",
"def check_sample_map_equals_sample_grid(self):\n return (\n self.grid.x_size == self.sample_map.x_size\n and self.grid.y_size == self.sample_map.x_size\n and self.grid.x_offset == 0\n and self.grid.y_offset == 0\n )",
"def set_equals(t1: Tensor, t2: Tensor) -> bool:\n t1 = t1.unique(dim=0)\n t2 = t2.unique(dim=0)\n if t1.shape != t2.shape:\n return False\n equals_sum = (t1.unsqueeze(-2) == t2).all(dim=-1).sum(dim=-1)\n return torch.equal(equals_sum, torch.ones_like(equals_sum))",
"def equals(first_img, second_img): \n\n if first_img is None or second_img is None: \n return False \n diff = ImageChops.difference(first_img, second_img)\n if diff.getbbox() != None: \n return False\n else: \n diff = None\n return True",
"def equal_contents(self, other):\n for col, count in self.stones.items():\n if other.stones.get(col, 0) != count:\n return False\n for col, count in other.stones.items():\n if self.stones.get(col, 0) != count:\n return False\n return True",
"def equals(point1, point2):\n return point1[0] == point2[0] and point1[1] == point2[1]",
"def __eq__(self, ret_mat):\n for i in range(0,8):\n if self.data[i] != ret_mat[i]:\n return False\n return True",
"def members_are_equal(cls, point_1, point_2):\n return point_1 == point_2",
"def _equal(a, b):\n return type(a) != np.ndarray and a == b",
"def __eq__(self, other):\n return (isinstance(other, GridPegSolitairePuzzle) and\n self._marker == other._marker and\n self._marker_set == other._marker_set)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parses a flat tree to a Tree instance | def parse_flat_tree(self, flat_tree):
import copy
s = copy.copy(flat_tree)
words = deque(s.split(" "))
root = words.popleft()
if words.popleft() == '(': # if the root has children
self.parse_flat_tree_helper(self.t, root, words)
return self.t | [
"def from_file(cls, fPath):\n with open(fPath) as f:\n s = ' '.join(line for line in f).replace('\\n', ' ')\n s = s.replace('(', ' ( ')\n s = s.replace(')', ' ) ')\n nodes = s.split()\n\n trees = []\n # stack holds the ancestors of the given node\n stack = [Tree(nodes[2], None, [])]\n idx = 3 # position within the nodes list\n while idx < len(nodes):\n n = nodes[idx]\n if n == '(':\n chlds = stack[-1].children # children of the parent node\n if nodes[idx+2] == '(':\n # internal node:\n newN = Tree(nodes[idx+1], stack[-1], [])\n chlds.append(newN)\n stack.append(newN)\n idx += 2\n else:\n # leaf and its POS tag:\n newN = Tree(nodes[idx+1], stack[-1], [Tree(nodes[idx+2])])\n newN.children[0].parent = newN\n chlds.append(newN)\n idx += 4\n elif n == ')':\n if len(stack) == 1:\n trees.append(cls(stack[0].val, None, stack[0].children))\n stack.pop()\n if idx+5 < len(nodes):\n stack.append(Tree(nodes[idx+4], None, []))\n idx += 5\n continue\n else:\n break\n else:\n stack.pop()\n idx += 1\n else:\n raise NameError(\"ill-formed tree\")\n\n return trees",
"def make_tree(dataset):\n\treturn make_tree_helper(dataset)",
"def build_tree(t):\n root = ParseTree(None)\n\n if isinstance(t, str):\n root = ParseTree(t)\n\n elif t is not None:\n root = ParseTree(None)\n for c in t:\n if c is '[':\n node = build_tree(range_to_id(t))\n root.children.append(node)\n break\n else:\n node = build_tree(c)\n root.children.append(node)\n\n return root",
"def from_string(cls, s):\n nodes = cls.__splitAndCheckTreeString(s)\n order_nb = 0 # numbering the leaves (i.e. word order position)\n # stack holds the ancestors of the given node\n stack = [Tree(nodes[1], None, [])]\n idx = 2 # position within the nodes list\n while len(stack) > 0:\n if idx >= len(nodes):\n raise NameError(\"ill-formed tree: didn't finish\")\n n = nodes[idx]\n if n == '(':\n chlds = stack[-1].children # children of the parent node\n if nodes[idx+2] == '(':\n # internal node:\n newN = Tree(nodes[idx+1], stack[-1], [])\n chlds.append(newN)\n stack.append(newN)\n idx += 2\n else:\n # leaf and its POS tag:\n newN = Tree(nodes[idx+1], stack[-1], [Tree(nodes[idx+2])], order_nb=order_nb)\n newN.children[0].parent = newN\n chlds.append(newN)\n idx += 4\n order_nb += 1\n elif n == ')':\n if len(stack) == 1:\n break\n else:\n stack.pop()\n idx += 1\n else:\n print(s)\n print([node.val for node in stack])\n print(n)\n raise NameError(\"ill-formed tree\")\n\n return cls(stack[0].val, None, stack[0].children)",
"def deserialize(self, data):\n\n\n # Your Codec object will be instantiated and called as such:\n # ser = Codec()\n # deser = Codec()\n # ans = deser.deserialize(ser.serialize(root))\n tree = data.split()\n print(tree)\n if(tree[0] == \"n\"):\n return None\n queue = []\n root = TreeNode(tree[0])\n\n queue.append(root)\n i = 1\n while queue:\n cur = queue.pop(0)\n if cur == None:\n break\n cur.left = TreeNode(int(tree[i])) if tree[i] != \"n\" else None\n cur.right = TreeNode(int(tree[i+1])) if tree[i+1] != \"n\" else None\n i += 2\n queue.append(cur.left)\n queue.append(cur.right)\n\n return root",
"def get_tree(self):\n return self.parse_tree",
"def parse_input():\n\n with open('input.txt', 'r') as txt:\n tree = txt.read().strip().split(' ')\n\n return tree",
"def tree_from_tuples(tuple_tree, parent=None):\n node = BinaryTree(tuple_tree[0], None, None, parent)\n left = tree_from_tuples(tuple_tree[1], node) if tuple_tree[1] else None\n right = tree_from_tuples(tuple_tree[2], node) if tuple_tree[2] else None\n node.left = left\n node.right = right\n return node",
"def str2tree(s, binarize=False):\n if not s.startswith('('):\n s = \"( {} )\".format(s)\n if binarize:\n s = s.replace(\"(\", \"(X\")\n return Tree.fromstring(s)",
"def tree_from_lines(lines):\n\n tree = []\n current_stack = []\n for line in lines:\n asl_indents = line[0]\n node = ([], line[1])\n if asl_indents == 0:\n tree += [node]\n current_stack = [node]\n else:\n while len(current_stack) > asl_indents:\n current_stack = current_stack[:-1]\n current_stack[-1][0].append(node)\n current_stack += [node]\n return tree",
"def disfile2tree(dis_filepath):\n with open(dis_filepath) as f:\n rst_tree_str = f.read().strip()\n rst_tree_str = fix_rst_treebank_tree_str(rst_tree_str)\n rst_tree_str = convert_parens_in_rst_tree_str(rst_tree_str)\n return ParentedTree.fromstring(rst_tree_str)",
"def parse(line):\n\n document = Document()\n root = document.createElement('tree')\n current_element = root\n rest = line\n\n while True:\n element, separator, rest = parse_element(rest, document)\n\n if isinstance(current_element.lastChild, Text) and \\\n current_element.lastChild.data == '':\n current_element.removeChild(current_element.lastChild)\n\n current_element.appendChild(element)\n\n if rest is None:\n break\n\n if separator == '<':\n current_element = current_element.parentNode\n elif separator == '+':\n current_element = current_element\n elif separator == '>':\n current_element = element\n\n expand_multipliers(root)\n\n return root",
"def test_write_tree(self):\n\n newick = '''(\n (\n a:1.000000,\n b:2.000000\n )x:3.000000,\n (\n c:4.000000,\n d:5.000000\n )y:6.000000\n)rra:0.000000;\n'''\n infile = StringIO(newick)\n tree = read_tree(infile)\n\n out = StringIO()\n tree.write(out, rootData=True)\n self.assertEqual(newick, out.getvalue())",
"def parse_tree(lines):\n regex = re.compile(r'^(?P<indent>(?: {4})*)(?P<name>\\S.*)')\n stack = []\n for line in lines:\n match = regex.match(line)\n if not match:\n raise ValueError(\n 'Indentation not a multiple of 4 spaces: \"{0}\"'.format(line)\n )\n level = len(match.group('indent')) // 4\n if level > len(stack):\n raise ValueError('Indentation too deep: \"{0}\"'.format(line))\n stack[level:] = [match.group('name')]\n yield level, match.group('name'), (stack[level - 1] if level else None)",
"def binary_tree_parse(matchObj, argv):\n nodes = [list(x) for x in re.findall(\n r'([A-Z]) \"([^\"]*?)\"', matchObj.group('tree'))]\n l = len(nodes)\n out = ''\n out += \"\\n\\\\medskip\\n\\\\begin{tikzpicture}[nodes={circle, draw}]\"\n out += \"\\n\\\\graph[binary tree layout, fresh nodes]{\\n\"\n # The package used to draw trees is TikZ and that requires LuaLaTeX\n # to compile (the algorithm that computes distance\n # between elements is written in Lua)\n # The traversal is a pre-order traversal\n # If you don't understand that code you should go to math spé in Lycée\n # Henri IV and ask E. T.\n\n def get_tree(argv):\n def aux(i, depth):\n if nodes[i][0] == 'L':\n f = nodes[i][1]\n return (('\"' + block_parse(f, argv) + '\"') if f != '()' else '', i + 1)\n else:\n (g, r1) = aux(i + 1, depth + 1)\n (d, r2) = aux(r1, depth + 1)\n return ('\"' + block_parse(nodes[i][1], argv) +\n '\"' + \" -- {\" + g + \",\" + d + \"}\", r2)\n (ans, r) = aux(0, 1)\n if r != l:\n return \"\"\n else:\n return re.sub(r\"\\n ?\\n\", r\"\\n\", ans) + \"};\\n\"\n\n out += get_tree(argv) + \"\\\\end{tikzpicture}\\n\\\\medskip\\n\"\n return out",
"def read_tree(tree_file):\n\ttree = Phylo.read(tree_file, 'newick')\n\ttree.root_at_midpoint()\n\treturn tree",
"def tree2biotree_converter(tree): \n def build_tree(children):\n #print \"Entering children=\",str(children)\n if not type(children) == list:\n return Bio.Phylo.BaseTree.Clade(name=str(children), clades=[]) \n children_clades = list( build_tree(child) for child in children ) \n return Bio.Phylo.BaseTree.Clade(name=\"\", clades=children_clades)\n return Bio.Phylo.BaseTree.Tree(root=build_tree(tree), rooted=True)",
"def create_tree(markdown):\n global blocks, pos\n # parse markdown\n blocks = parse_markdown(markdown)\n if config.DEBUG_MODE:\n print('[DEBUG]: Parsed markdown')\n print(blocks)\n\n # create root node\n title = blocks[0].content.get_clean()\n root = Node(title)\n\n # recursively generate children\n pos = 1\n while pos < len(blocks):\n c = recurse()\n if c:\n root.add_child(c)\n \n\n # clean up tree\n root = root.retract()\n return root",
"def traverse_tree(tree, return_flat_tree):\n tokens = []\n if (type(tree) == nltk.tree.Tree) and (tree.label() == NONE_NODE_LABEL):\n return tokens\n for subtree in tree:\n if type(subtree) == nltk.tree.Tree:\n #if subtree.label() == \"NP\":\n if subtree.label().startswith(\"NP\"):\n if tree.label() != NONE_NODE_LABEL:\n # If the subtree does not contain additional NP's, the leaves constitute a 'base NP'\n if not subtree_contains_np(subtree):\n if subtree.leaves() and len(subtree.leaves()) > 0: \n # Filter the base-NP tree (as it may contain -NONE- labels):\n filtered_leaves = filter_base_np(subtree)\n \n if len(filtered_leaves) > 0:\n if return_flat_tree:\n # use extend if a flat tree is desired:\n tokens.extend( [SONP_SYM] + filtered_leaves + [EONP_SYM])\n else:\n # base-NPs will be in a nested list\n tokens.append( [SONP_SYM] + filtered_leaves + [EONP_SYM])\n \n else:\n assert(False)\n \n else:\n # If the subtree contains NPs, continuing traversing in search of the base NP\n child_tokens = traverse_tree(subtree, return_flat_tree)\n if len(child_tokens) > 0:\n tokens.extend(child_tokens)\n else:\n child_tokens = traverse_tree(subtree, return_flat_tree)\n if len(child_tokens) > 0:\n tokens.extend(child_tokens)\n #tokens.append(traverse_tree(subtree))\n else:\n tokens.append(str(subtree))\n return tokens"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Finds the count of matching queries to matching_to query passed | def find_query_count(self, matching_to):
node, sep, query = matching_to.partition(" ")
q = deque()
q.append(node)
count = 0
while len(q) > 0:
c = q.popleft()
q.extend(self.t.treemap[c].children)
count += self.t.query(c, query)
return count | [
"def num_exact_matches(self, possible_matches):\n count = 0\n\n for score, request in possible_matches:\n if score.is_exact_match():\n count += 1\n\n return count",
"def number_of_matches(self):\n return len(self.matches)",
"def count(self, where_dict={}):\n return len(self.find(where_dict))",
"def count(self, where_dict):\n\n # return len(self.find(where_dict))\n count = 0\n for document in self.documents:\n if self.check_document(document, where_dict):\n count += 1\n return count",
"def get_number_of_subjects_where(survey, subjects_qs, match_df, match_value):\n facts = _get_facts(survey, subjects_qs=subjects_qs, \n desired_facts=match_df, value=match_value)\n return len(_get_subject_ids_for_facts(facts))",
"def count(self, **request_params):\n es_query = self._generate_es_query(count_query=True)\n return self.search_model_class.count(es_query, **request_params)",
"def count(self, value: str, *, exact_match: bool = False) -> int:\n return len(list(self.search(value, exact_match=exact_match)))",
"def _count_matches(regex, s):\n return len(regex.findall(s))",
"def nmatches(self):\n return self.__nmatches",
"def num_unigram_matches(entity1, entity2, recipe):\n pass",
"def CountMatches(self,filters=False):\n self.matchcount = 0\n for key, matches in self.matches.items():\n for match in matches:\n if filters:\n #Apply a filter:\n match.WillBeProcessed = True\n for attribute, value in filters.items():\n try:\n if getattr(match,attribute) != value:\n match.WillBeProcessed = False\n except AttributeError:\n #If the match object DOESNT have the attribute, accept it\n pass\n if match.WillBeProcessed:\n self.matchcount += 1\n else:\n self.matchcount += 1",
"def countSubStringMatch(target,key):\r\n \r\n target0 = target\r\n instances = 0\r\n x = 0\r\n y = 0\r\n while(x!=-1):\r\n x=find(target,key,y)\r\n if(x==-1):\r\n print 'Number of times that ', key,' appears in ',target0, 'is:',instances\r\n return instances\r\n\r\n else:\r\n instances+=1\r\n y=x\r\n\r\n return None",
"def searchCount(self,\n query,\n approxMatches=None,\n includeCancelled=None,\n misStatus=None,\n attributes=None):\n path = \"api/v1/person/search-count\"\n path_params = {}\n query_params = {\"query\": query,\n \"approxMatches\": approxMatches,\n \"includeCancelled\": includeCancelled,\n \"misStatus\": misStatus,\n \"attributes\": attributes}\n form_params = {}\n result = self.conn.invoke_method(\"GET\", path, path_params,\n query_params, form_params)\n if result.error:\n raise IbisException(result.error)\n return int(result.value)",
"def topic_match_count( query_topics_dict, document_topics_dict ):\r\n counter = 0\r\n\r\n if query_topics_dict is not None and document_topics_dict is not None:\r\n query_topics = list( query_topics_dict.keys() )\r\n document_topics = list( document_topics_dict.keys() )\r\n for topic in query_topics:\r\n if topic in document_topics:\r\n counter += 1\r\n\r\n return counter",
"def _amount_of_answers(self, tupla):\n\n if self._search_values(tupla):\n return 0\n else:\n return _count_answers(tupla)",
"def count_findings(self):\n return Finding.objects.filter(finding_type=self).count()",
"def pbh_match_count(db, table, key, data):\n\n field_map = db.get_entry(table, key)\n\n match_total = 0\n match_count = 0\n\n if PBH_RULE_GRE_KEY in field_map:\n if PBH_RULE_GRE_KEY in data:\n match_count += 1\n match_total += 1\n if PBH_RULE_ETHER_TYPE in field_map:\n if PBH_RULE_ETHER_TYPE in data:\n match_count += 1\n match_total += 1\n if PBH_RULE_IP_PROTOCOL in field_map:\n if PBH_RULE_IP_PROTOCOL in data:\n match_count += 1\n match_total += 1\n if PBH_RULE_IPV6_NEXT_HEADER in field_map:\n if PBH_RULE_IPV6_NEXT_HEADER in data:\n match_count += 1\n match_total += 1\n if PBH_RULE_L4_DST_PORT in field_map:\n if PBH_RULE_L4_DST_PORT in data:\n match_count += 1\n match_total += 1\n if PBH_RULE_INNER_ETHER_TYPE in field_map:\n if PBH_RULE_INNER_ETHER_TYPE in data:\n match_count += 1\n match_total += 1\n\n return match_total, match_count",
"def count_findings(self):\n return Finding.objects.filter(severity=self).count()",
"def result_count(self):\n return sum([len(m.results) for m in self.models.values()])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Retrieve root folder contents | def retrieve_folder():
root_folder_contents = account.get('storage/folders/root/contents')
for resource in root_folder_contents.get_paging_iterator():
print(resource.data) | [
"def get_root(self):\n return self.get_obj(self._root_path)",
"def getLocalFolderListing(self,root=None):\n root = 1 and root or self.options.path\n fileList = []\n forders = []\n\n for root, dirnames, files in os.walk(self.options.path):\n for subdirname in dirnames:\n url = os.path.join(root, subdirname)\n urlparts = url.split('/')\n urlparts = urlparts[urlparts.index(self.options.rootfoldername):]\n forders.append('/'.join(urlparts))\n for file in files:\n url = os.path.join(root,file)\n urlparts = url.split('/')\n urlparts = urlparts[urlparts.index(self.options.rootfoldername):]\n fileList.append('/'.join(urlparts))\n\n return {\"folders\":forders, \"files\":fileList}",
"def __root_directory__(config) :\n path_config = config.get('ContentPaths', {})\n return os.path.realpath(path_config.get('PService', os.path.join(os.environ['HOME'], '.toxaway')))",
"def _get_all_folder(self, root):\n\n result = []\n children_list = self._get_all_children(root)\n for child in children_list:\n if child.tagName == \"TESTFOLDER\":\n result.append(child)\n return result",
"def get_contents(self, folder: Folder):\n log.debug(\"Listing Contents of %s/%s\" % (folder.course.id, folder.id))\n if isinstance(folder, Course):\n response = json.loads(self._get('/api/documents/%s/folder' % folder.course.id).text)\n else:\n response = json.loads(self._get('/api/documents/%s/folder/%s' % (folder.course.id, folder.id)).text)\n log.debug(\"Got response: %s\" % response)\n\n documents = [Document.from_response(response, folder) for response in response[\"documents\"]]\n\n folders = [Folder.from_response(response, folder) for response in response[\"folders\"]]\n\n return documents + folders",
"def _load_folders_structure(self):\n root_id = self._get_root_id()\n #print(\"found root id = \",root_id)\n dom_root = self._do_soap_request('getFolderXML', '<folder id=\"%s\"/>' % root_id)\n root_folder_element = self._find_root_element(dom_root, root_id)\n #print(\"found root folder element = \",root_folder_element)\n root_folder = CMFolder(root_id, 'root')\n self._scan_folder_level(root_folder_element, root_folder)\n return root_folder",
"def get_root_path(self):\n mock_cmd = self._mock_cmd('--print-root-path')\n output = check_output(mock_cmd)\n return output.rstrip()",
"def _get_folder_readme_content():\n readme_path = os.path.join(SETTINGS[\"template_path\"], \"FOLDER_README.txt\")\n with open(readme_path, \"r\") as f:\n readme_content = f.read()\n return readme_content",
"def root_path():\n return Root()",
"def __get_file_root_location(self):\n\n return self.main_location",
"def folder_contents(self):\n url = self.absolute_url()\n self.REQUEST.RESPONSE.redirect(url)",
"def listFolderContents(contentFilter=None):",
"def load_folder_contents( self, trans, folder ):\n current_user_roles = trans.get_current_user_roles()\n is_admin = trans.user_is_admin()\n content_items = []\n for subfolder in folder.active_folders:\n if not is_admin:\n can_access, folder_ids = trans.app.security_agent.check_folder_contents( trans.user, current_user_roles, subfolder )\n if (is_admin or can_access) and not subfolder.deleted:\n subfolder.api_type = 'folder'\n content_items.append( subfolder )\n for dataset in folder.datasets:\n if not is_admin:\n can_access = trans.app.security_agent.can_access_dataset( current_user_roles, dataset.library_dataset_dataset_association.dataset )\n if (is_admin or can_access) and not dataset.deleted:\n dataset.api_type = 'file'\n content_items.append( dataset )\n return content_items",
"def get_package_data():\n filenames = []\n # The root dir.\n root_dir = os.path.join(os.path.dirname(os.path.abspath(\n inspect.getfile(inspect.currentframe()))), \"llnl_db_client\")\n # Recursively include all files in these folders:\n folders = [os.path.join(root_dir, \"tests\", \"data\")]\n for folder in folders:\n for directory, _, files in os.walk(folder):\n for filename in files:\n # Exclude hidden files.\n if filename.startswith(\".\"):\n continue\n filenames.append(os.path.relpath(\n os.path.join(directory, filename),\n root_dir))\n return filenames",
"def _get_root_component(self, context):\n\n components_dict = {}\n\n self._build_root_directory(context, components_dict)\n self._build_client_directory(context, components_dict)\n self._build_sample_directory(context, components_dict)\n self._build_docs_directory(context, components_dict)\n\n return components_dict[\"root\"]",
"def svn_fs_root_fs(root: \"svn_fs_root_t\") -> \"svn_fs_t *\":\n return _fs.svn_fs_root_fs(root)",
"def get_inner_fileserver_root():\n\n return seahub.settings.INNER_FILE_SERVER_ROOT",
"def test_traverse_folder(self):\n folder = self.api.traverse('soap')\n self.assertTrue(verifyObject(ICMISFolder, folder))\n self.assertEqual(aq_parent(folder), self.api.root)\n self.assertEqual(folder.getId(), 'soap')\n self.assertEqual(\n folder.getPhysicalPath(),\n ('', 'plone', 'browser', 'soap'))\n self.assertEqual(\n folder.absolute_url(),\n 'http://nohost/plone/browser/soap')\n self.assertEqual(\n folder.Identifier(),\n 'http://nohost/plone/browser/soap')\n self.assertEqual(folder.Type(), 'CMIS Folder')\n self.assertEqual(folder.Format(), 'text/html')\n\n # A Folder can list its own content\n contents = folder.getFolderContents()\n self.assertEqual(len(contents), 2)\n contents = sorted(contents, key=lambda c: c.getId())\n self.assertEqual(\n map(lambda c: c.getId(), contents),\n ['info', 'specs.txt'])\n self.assertEqual(\n map(lambda c: c.Type(), contents),\n ['CMIS Folder', 'CMIS Document'])\n self.assertEqual(\n map(lambda c: c.Format(), contents),\n ['text/html', 'text/plain'])\n self.assertEqual(\n map(lambda c: c.absolute_url(), contents),\n ['http://nohost/plone/browser/soap/info',\n 'http://nohost/plone/browser/soap/specs.txt'])",
"def testfolder_list(self):\n\n tcfolder_list = self._get_all_folder(self.root)\n for item in tcfolder_list:\n yield item"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get ratings similarity between movies in the movie recommendation pool and the target movie. | def get_ratings_similarity(self):
# Get average rating of the target movie
query_1 = "SELECT AVG(rating) FROM ratings WHERE movie_id=%i" % self.target_movie.movie_id
res = self.db.execute(query_1).fetchall()
target_movie_average_rating = res[0][0]
pmids = []
for rm in self.recommendation_pool:
pmids.append(rm[0].movie_id)
# rating_similarity dict contains movie_ids as keys and difference in rating as value
self.rating_similarity = {}
query_2 = """
SELECT movie_id, ABS(({tmr} - AVG(rating))) as rating_difference
FROM ratings r
WHERE movie_id IN ({pool_movie_ids})
GROUP BY movie_id
""".format(
tmr=target_movie_average_rating,
pool_movie_ids=str(pmids)[1:-1]
)
res = self.db.execute(query_2).fetchall()
for rec in res:
self.rating_similarity[rec[0]] = rec[1] | [
"def recommend(self, target_movie_id, num_recommendations):\n\n\n print(\" - Getting target movie record\")\n self.target_movie = self.db.query(Movie).filter_by(movie_id=target_movie_id).first()\n assert self.target_movie is not None\n\n self.get_movie_recommendation_pool(num_recommendations * 10)\n self.get_ratings_similarity()\n tags_similarity = self.get_tags_similarity()\n print(\" ** TAGS SIMILARITY **\")\n print(tags_similarity)\n\n self.final_ratings = {}\n for r in self.recommendation_pool:\n # r[0] is the movie object, so r[0].movie_id gives you the movie ID\n # r[1] contains the rating similarity value\n pool_movie_id = r[0].movie_id\n similarity = r[1]\n\n # self.rating_similarity[pool_movie_id]\n self.final_ratings[pool_movie_id] = similarity - (self.rating_similarity.get(pool_movie_id, 2.5) * self.RATING_SIMILARITY_WEIGHT)\n\n # tags similarity addition to final ratings\n for m_id, tag_similarity in tags_similarity.items():\n if m_id not in self.final_ratings:\n self.final_ratings[m_id] = 0.0\n\n self.final_ratings[m_id] += tag_similarity * self.TAGS_SIMILARITY_WEIGHT",
"def similarity(self, other):\n\n user_ratings = {}\n paired_ratings = []\n\n for rating in self.ratings:\n user_ratings[rating.movie_id] = rating\n\n for r in other.ratings:\n u_r = user_ratings.get(r.movie_id)\n\n if u_r is not None:\n paired_ratings.append((u_r.score, r.score))\n\n if paired_ratings:\n return pearson(paired_ratings)\n else:\n return 0.0",
"def predict_rating(self, movie):\n\n other_ratings = movie.ratings\n\n similarities = [\n (self.similarity(r.user), r)\n for r in other_ratings\n ]\n\n similarities.sort(reverse=True)\n\n similarities = [(sim, r) for sim, r in similarities if sim > 0]\n\n if not similarities:\n return None\n\n numerator = sum([r.score * sim for sim, r in similarities])\n denominator = sum([sim for sim, r in similarities])\n\n return numerator/denominator\n\n\n #this is the one we wrote",
"def get_similar_users(target_rating: Rating,\n user_ratings: UserRatingDict,\n movie_users: MovieUserDict) -> Dict[int, float]:\n\n # Your code here\n mov_list = []\n similar_p = {}\n for mov in target_rating:\n mov_list.append(mov)\n remove_unknown_movies(user_ratings, movie_users)\n p_watched = get_users_who_watched(mov_list, movie_users)\n for p in p_watched:\n if p in user_ratings:\n similarity = get_similarity(target_rating, user_ratings[p])\n similar_p[p] = similarity\n return similar_p",
"def similar(self):\n if \"similar\" in self.links:\n reviews_json = self._mapi._retrieve_rt_json(self.links[\"similar\"])\n movies = reviews_json.get(\"movies\", None)\n # build a similarity graph\n return (Movie(jdata[\"id\"], self._mapi, jdata) for jdata in movies) if movies else None\n return None",
"def scaled_dot_product(self, movie_id1, movie_id2, verbose=False):\n ratings1 = self.csv_data.movie_ratings[movie_id1]\n ratings2 = self.csv_data.movie_ratings[movie_id2]\n\n # have ratings1 be the movie with fewer reviews\n if len(ratings1) > len(ratings2):\n ratings1, ratings2 = ratings2, ratings1\n\n # look for common users first\n r1 = []\n r2 = []\n\n for user_id in ratings1:\n if user_id in ratings2:\n r1.append(ratings1[user_id])\n r2.append(ratings2[user_id])\n\n # if there are too few common users, return 0 (no similarity)\n if len(r1) < 3: return 0.0\n\n r1 = numpy.array(r1)\n r2 = numpy.array(r2)\n\n norm1 = numpy.linalg.norm(r1)\n norm2 = numpy.linalg.norm(r2)\n\n similarity = r1.dot(r2) / (norm1 * norm2)\n\n # Scale output due to number of common users.\n # The settings below buff score by 21.6% for having 162 users in common\n buff_limit = 0.216\n buff_point = 162.0\n n = len(r1) # number of common users\n\n # for tuning the above parameters, you need to know common users \"n\"\n if verbose:\n print(\"similarity =\", similarity,\n \"common reviewers =\", n)\n\n x_limit = 3 * math.exp(buff_limit)\n x = 3 + (x_limit - 3) * (n - 3) / (buff_point - 3)\n buff = math.log(x) - math.log(3)\n\n if buff > buff_limit: buff = buff_limit # for input > buff_point\n if buff < 0: buff = 0 # for input < 3, which shouldn't happen\n\n return similarity * (1.0 + buff)",
"def recommend_movies(target_rating: Rating,\n movies: MovieDict, \n user_ratings: UserRatingDict,\n movie_users: MovieUserDict,\n num_movies: int) -> List[int]:\n\n # Your code here\n \n movie_score = {}\n \n ## First step = 'we will need to find users similar'\n similar_user = get_similar_users(target_rating, user_ratings, movie_users) \n \n ## Second step = 'This will be our list of candidate movies'\n ## get_candidate_mov created\n candidate_mov = get_candidate_mov(similar_user, user_ratings, target_rating)\n \n ## Third step = 'track a \"score\" for each movie'\n ## get_mov_score created\n for mov in candidate_mov:\n movie_score[mov] = get_mov_score(mov, \n user_ratings, \n similar_user, \n candidate_mov) \n \n ## Forth step = 'The return list should contain movie ids with the highest scores'\n ## sort_score_list created\n sorted_list = sort_score_list(movie_score)\n \n ## Last step = ' list should be no longer than the value of this parameter'\n final_list = sorted_list[:num_movies]\n \n return final_list",
"def similarity(dataframe):\r\n main = dataframe\r\n \r\n dataframe = feature_selection(dataframe)\r\n train_size = round((len(dataframe)*0.9))\r\n train = dataframe[:train_size]\r\n test = dataframe[train_size:]\r\n \r\n test_value = test.iloc[np.random.randint(0,10),:]\r\n \r\n #compute cosine similarity\r\n neighbors = {}\r\n for i, r in train.iterrows():\r\n similarity = np.dot(test_value,r)/(np.linalg.norm(test_value)*np.linalg.norm(r))\r\n neighbors[i] = similarity\r\n \r\n #get similary movies in descending order\r\n neighbors = {k: v for k, v in sorted(neighbors.items(), key=lambda item: item[1], reverse=True)}\r\n \r\n test_final = pd.concat([test, main], axis=1, sort=False)\r\n train_final = pd.concat([train, main], axis=1, sort=False)\r\n \r\n test_movie = test_final.loc[test_value.name,['Title', 'Rated', 'Genre', 'imdbRating']]\r\n similar_movies = train_final.loc[list(neighbors.keys())[:5],['Title','Rated', 'Genre', 'Released', 'imdbRating']]\r\n \r\n return test_movie, similar_movies",
"def get_tags_similarity(self):\n\n target_movie_tags = self.get_tags_count_(self.target_movie.movie_id)\n print(\"get_tags_similarity: target_movie_tags: %r\" % target_movie_tags)\n\n tags_similarity = {}\n\n users_query = \"select distinct user_id from tags where movie_id=%i\" % \\\n self.target_movie.movie_id\n user_records = self.db.execute(users_query).fetchall()\n print(\"get_tags_similarity: %i users have tagged this movie\"\n % len(user_records))\n\n for urec in user_records:\n user_id = urec[0]\n print(\"get_tags_similarity: Processing user: %i\" % user_id)\n\n movie_ids_query = \"\"\"\n SELECT distinct movie_id\n FROM tags\n WHERE movie_id != %i\n AND user_id=%i\n \"\"\" % (self.target_movie.movie_id, user_id)\n res = self.db.execute(movie_ids_query).fetchall()\n\n print(\"get_tags_similarity: User has tagget %i movies\" % len(res))\n if res:\n for mid_rec in res:\n movie_id = mid_rec[0]\n print(\n \"get_tags_similarity: -> Processing movie: %i\" %\n movie_id\n )\n\n movie_tags = self.get_tags_count_(movie_id, user_id)\n tags_similarity[movie_id] = self.tags_jaccard_index(\n target_movie_tags, movie_tags)\n\n return tags_similarity",
"def _scaled_dot_product(self, movie_id1_index, movie_id2_index):\n ratings1 = self.movie_ratings[movie_id1_index][1]\n ratings2 = self.movie_ratings[movie_id2_index][1]\n\n # have ratings1 be the movie with fewer reviews\n if len(ratings1) > len(ratings2):\n ratings1, ratings2 = ratings2, ratings1\n\n # look for common users first\n r1 = []\n r2 = []\n\n for user_id in ratings1:\n if user_id in ratings2:\n r1.append(ratings1[user_id])\n r2.append(ratings2[user_id])\n\n # if there are too few common users, return 0 (no similarity)\n if len(r1) < 3: return 0.0, len(r1), 0.0\n\n r1 = numpy.array(r1)\n r2 = numpy.array(r2)\n\n norm1 = numpy.linalg.norm(r1)\n norm2 = numpy.linalg.norm(r2)\n\n similarity = r1.dot(r2) / (norm1 * norm2)\n\n # Scale output due to number of common users.\n buff_limit = self.buff_limit\n buff_point = self.buff_point\n n = len(r1) # number of common users\n\n x_limit = 3 * math.exp(buff_limit)\n x = 3 + (x_limit - 3) * (n - 3) / (buff_point - 3)\n buff = math.log(x) - math.log(3)\n\n if buff > buff_limit: buff = buff_limit # for input > buff_point\n if buff < 0: buff = 0 # for input < 3, which shouldn't happen\n\n return similarity * (1.0 + buff), n, similarity",
"def find_similar_movie(self, movie_id, num_results = 10):\n\n sim_scores = []\n\n for id2 in self.csv_data.movie_ratings:\n if id2 != movie_id:\n score = self.compare_two_movies(movie_id, id2)\n\n if score > 0.3:\n sim_scores.append((id2, score))\n\n sim_scores.sort(key = lambda e: e[1], reverse=True)\n\n return sim_scores[:num_results]",
"def _find_similar_movies(request):\n movie_ratings = _process_data[\"movie_ratings\"]\n movie_genres = _process_data[\"movie_genres\"]\n buff_point = _process_data[\"buff_point\"]\n buff_limit = _process_data[\"buff_limit\"]\n\n movies_finder = build_similar_movies_db.SimilarMovieFinder(\n movie_genres, movie_ratings,buff_limit, buff_point)\n\n similar_movies = {} # {movie_id: [similar_movie_ids]}\n\n start = _process_data[\"movie_ratings_start\"]\n length = _process_data[\"movie_ratings_length\"]\n start_time = time.time()\n\n for i in range(start, start + length):\n similar_movie_ids, _ = movies_finder.find_similar_movie(i)\n\n if len(similar_movie_ids) > 0:\n movie_id = movies_finder.movie_ratings[i][0]\n similar_movies[movie_id] = similar_movie_ids\n\n # progress estimation\n if i == start + 200:\n t_so_far = time.time() - start_time\n seconds_left = t_so_far * (length - 200) / 200\n finish_time = datetime.datetime.now() + datetime.timedelta(seconds=seconds_left)\n _lock.acquire()\n print(\"Process estimated completion time\", finish_time)\n _lock.release()\n\n _process_data[\"similar_movies\"] = similar_movies",
"def similarity(candidate, user):\n candidate_rating_vector = []\n user_rating_vector = []\n for i in candidate:\n if i in user:\n candidate_rating_vector.append(candidate[i])\n user_rating_vector.append(user[i])\n\n ratio = math.log(30 + len(user_rating_vector), 64)\n return [candidate['user_id'], candidate['target_rating'],\n custom_distance(candidate_rating_vector, user_rating_vector) / ratio,\n len(user_rating_vector)]",
"def compute_similarity():\n movie_data = pd.read_csv(\"movie_recsys/datasets/movie_data.csv\")\n\n # Compute TF-IDF representation.\n tfidf = TfidfVectorizer(stop_words=\"english\")\n tfidf_matrix = tfidf.fit_transform(movie_data[\"story\"])\n\n # Compute Cosine Similarity.\n cosine_sim_scores = linear_kernel(tfidf_matrix, tfidf_matrix)\n\n # Saving.\n file_path = Path.cwd() / \"movie_recsys/datasets/cosine_sim_scores.csv\"\n savetxt(file_path, cosine_sim_scores)\n return",
"def get_movie_ratings(movie):\n movie = (movie.lower()).replace(\" \", \"_\")\n URL = \"https://www.rottentomatoes.com/m/\" + movie\n try:\n page = requests.get(URL)\n if not page:\n raise Exception(page.status_code)\n except Exception as e:\n print(\"Cannot Find Movie!\" + str(e))\n sys.exit(0)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n ratings = soup.find_all(\"span\", class_=\"mop-ratings-wrap__percentage\")\n critic = soup.find_all(\n \"p\", class_=\"mop-ratings-wrap__text mop-ratings-wrap__text--concensus\"\n )\n\n print(\"Critic Consensus: \", (critic[0].get_text()).strip())\n print()\n print(\"TOMATOMETER: \", (ratings[0].get_text()).strip())\n print(\"AUDIENCE SCORE: \", (ratings[1].get_text()).strip())\n\n return 1",
"def get_candidate_mov(similar_user: Dict[int, float],\n user_ratings: UserRatingDict,\n target_rating: Rating,) -> List[int]: \n candidate_mov = []\n for p in similar_user:\n for mov in user_ratings[p]:\n if (mov not in candidate_mov) and (mov not in target_rating):\n if user_ratings[p][mov] >= 3.5:\n candidate_mov.append(mov)\n return candidate_mov",
"def fast_similarity(self, ratings, kind='user', epsilon=1e-9):\n\n if kind == 'user':\n sim = ratings.dot(ratings.T) + epsilon\n elif kind == 'item':\n sim = ratings.T.dot(ratings) + epsilon\n norms = np.array([np.sqrt(np.diagonal(sim))])\n return (sim / norms / norms.T)",
"def _extract_movie_ratings(request):\n user_ratings_train = _process_data[\"user_ratings_train\"]\n\n movie_ratings = [] # [(movie_id, [ratings list])]\n movie_id_to_index = {} # {movie_id : list index}\n\n for user_entry in user_ratings_train:\n for movie_id, rating in user_entry[1]:\n\n if movie_id in movie_id_to_index:\n # append \"rating\" to existing entry\n index = movie_id_to_index[movie_id]\n movie_ratings[index][1].append(rating)\n\n else:\n # new (movie_id, [ratings list]) entry\n index = len(movie_ratings)\n movie_id_to_index[movie_id] = index\n movie_ratings.append((movie_id, [rating]))\n\n\n movie_ratings.sort() # so to merge with other movie_ratings\n\n _process_data[\"movie_ratings\"] = movie_ratings",
"def compute_similarities(self):\n\n construction_func = {'cosine': sims.cosine,\n 'msd': sims.msd,\n 'pearson': sims.pearson,\n 'pearson_baseline': sims.pearson_baseline}\n\n if self.sim_options['user_based']:\n n_x, yr = self.trainset.n_users, self.trainset.ir\n else:\n n_x, yr = self.trainset.n_items, self.trainset.ur\n\n min_support = self.sim_options.get('min_support', 1)\n\n args = [n_x, yr, min_support]\n\n name = self.sim_options.get('name', 'msd').lower()\n if name == 'pearson_baseline':\n shrinkage = self.sim_options.get('shrinkage', 100)\n bu, bi = self.compute_baselines()\n if self.sim_options['user_based']:\n bx, by = bu, bi\n else:\n bx, by = bi, bu\n\n args += [self.trainset.global_mean, bx, by, shrinkage]\n\n try:\n if getattr(self, 'verbose', False):\n print('Computing the {0} similarity matrix...'.format(name))\n sim = construction_func[name](*args)\n if getattr(self, 'verbose', False):\n print('Done computing similarity matrix.')\n return sim\n except KeyError:\n raise NameError('Wrong sim name ' + name + '. Allowed values ' +\n 'are ' + ', '.join(construction_func.keys()) + '.')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get tags similarity between movies in the movie recommendation pool and the target movie. | def get_tags_similarity(self):
target_movie_tags = self.get_tags_count_(self.target_movie.movie_id)
print("get_tags_similarity: target_movie_tags: %r" % target_movie_tags)
tags_similarity = {}
users_query = "select distinct user_id from tags where movie_id=%i" % \
self.target_movie.movie_id
user_records = self.db.execute(users_query).fetchall()
print("get_tags_similarity: %i users have tagged this movie"
% len(user_records))
for urec in user_records:
user_id = urec[0]
print("get_tags_similarity: Processing user: %i" % user_id)
movie_ids_query = """
SELECT distinct movie_id
FROM tags
WHERE movie_id != %i
AND user_id=%i
""" % (self.target_movie.movie_id, user_id)
res = self.db.execute(movie_ids_query).fetchall()
print("get_tags_similarity: User has tagget %i movies" % len(res))
if res:
for mid_rec in res:
movie_id = mid_rec[0]
print(
"get_tags_similarity: -> Processing movie: %i" %
movie_id
)
movie_tags = self.get_tags_count_(movie_id, user_id)
tags_similarity[movie_id] = self.tags_jaccard_index(
target_movie_tags, movie_tags)
return tags_similarity | [
"def get_ratings_similarity(self):\n\n # Get average rating of the target movie\n query_1 = \"SELECT AVG(rating) FROM ratings WHERE movie_id=%i\" % self.target_movie.movie_id\n res = self.db.execute(query_1).fetchall()\n target_movie_average_rating = res[0][0]\n\n pmids = []\n for rm in self.recommendation_pool:\n pmids.append(rm[0].movie_id)\n\n # rating_similarity dict contains movie_ids as keys and difference in rating as value\n self.rating_similarity = {}\n query_2 = \"\"\"\n SELECT movie_id, ABS(({tmr} - AVG(rating))) as rating_difference\n FROM ratings r\n WHERE movie_id IN ({pool_movie_ids})\n GROUP BY movie_id\n \"\"\".format(\n tmr=target_movie_average_rating,\n pool_movie_ids=str(pmids)[1:-1]\n )\n\n res = self.db.execute(query_2).fetchall()\n for rec in res:\n self.rating_similarity[rec[0]] = rec[1]",
"def recommend(self, target_movie_id, num_recommendations):\n\n\n print(\" - Getting target movie record\")\n self.target_movie = self.db.query(Movie).filter_by(movie_id=target_movie_id).first()\n assert self.target_movie is not None\n\n self.get_movie_recommendation_pool(num_recommendations * 10)\n self.get_ratings_similarity()\n tags_similarity = self.get_tags_similarity()\n print(\" ** TAGS SIMILARITY **\")\n print(tags_similarity)\n\n self.final_ratings = {}\n for r in self.recommendation_pool:\n # r[0] is the movie object, so r[0].movie_id gives you the movie ID\n # r[1] contains the rating similarity value\n pool_movie_id = r[0].movie_id\n similarity = r[1]\n\n # self.rating_similarity[pool_movie_id]\n self.final_ratings[pool_movie_id] = similarity - (self.rating_similarity.get(pool_movie_id, 2.5) * self.RATING_SIMILARITY_WEIGHT)\n\n # tags similarity addition to final ratings\n for m_id, tag_similarity in tags_similarity.items():\n if m_id not in self.final_ratings:\n self.final_ratings[m_id] = 0.0\n\n self.final_ratings[m_id] += tag_similarity * self.TAGS_SIMILARITY_WEIGHT",
"def get_movie_tag_matrix(self):\n tag_df = self.genre_data\n unique_tags = tag_df.tag_string.unique()\n idf_data = tag_df.groupby(['movieid'])['tag_string'].apply(set)\n tf_df = tag_df.groupby(['movieid'])['tag_string'].apply(list).reset_index()\n movie_tag_dict = dict(zip(tf_df.movieid, tf_df.tag_string))\n tf_weight_dict = {movie: self.genre_tag.assign_tf_weight(tags) for movie, tags in\n list(movie_tag_dict.items())}\n idf_weight_dict = self.genre_tag.assign_idf_weight(idf_data, unique_tags)\n tag_df = self.genre_tag.get_model_weight(tf_weight_dict, idf_weight_dict, tag_df, 'tfidf')\n tag_df[\"total\"] = tag_df.groupby(['movieid','tag_string'])['value'].transform('sum')\n temp_df = tag_df[[\"movieid\", \"tag_string\", \"total\"]].drop_duplicates().reset_index()\n genre_tag_tfidf_df = temp_df.pivot_table('total', 'movieid', 'tag_string')\n genre_tag_tfidf_df = genre_tag_tfidf_df.fillna(0)\n\n return genre_tag_tfidf_df",
"def similar(self):\n if \"similar\" in self.links:\n reviews_json = self._mapi._retrieve_rt_json(self.links[\"similar\"])\n movies = reviews_json.get(\"movies\", None)\n # build a similarity graph\n return (Movie(jdata[\"id\"], self._mapi, jdata) for jdata in movies) if movies else None\n return None",
"def similarity(self, other):\n\n user_ratings = {}\n paired_ratings = []\n\n for rating in self.ratings:\n user_ratings[rating.movie_id] = rating\n\n for r in other.ratings:\n u_r = user_ratings.get(r.movie_id)\n\n if u_r is not None:\n paired_ratings.append((u_r.score, r.score))\n\n if paired_ratings:\n return pearson(paired_ratings)\n else:\n return 0.0",
"def scaled_dot_product(self, movie_id1, movie_id2, verbose=False):\n ratings1 = self.csv_data.movie_ratings[movie_id1]\n ratings2 = self.csv_data.movie_ratings[movie_id2]\n\n # have ratings1 be the movie with fewer reviews\n if len(ratings1) > len(ratings2):\n ratings1, ratings2 = ratings2, ratings1\n\n # look for common users first\n r1 = []\n r2 = []\n\n for user_id in ratings1:\n if user_id in ratings2:\n r1.append(ratings1[user_id])\n r2.append(ratings2[user_id])\n\n # if there are too few common users, return 0 (no similarity)\n if len(r1) < 3: return 0.0\n\n r1 = numpy.array(r1)\n r2 = numpy.array(r2)\n\n norm1 = numpy.linalg.norm(r1)\n norm2 = numpy.linalg.norm(r2)\n\n similarity = r1.dot(r2) / (norm1 * norm2)\n\n # Scale output due to number of common users.\n # The settings below buff score by 21.6% for having 162 users in common\n buff_limit = 0.216\n buff_point = 162.0\n n = len(r1) # number of common users\n\n # for tuning the above parameters, you need to know common users \"n\"\n if verbose:\n print(\"similarity =\", similarity,\n \"common reviewers =\", n)\n\n x_limit = 3 * math.exp(buff_limit)\n x = 3 + (x_limit - 3) * (n - 3) / (buff_point - 3)\n buff = math.log(x) - math.log(3)\n\n if buff > buff_limit: buff = buff_limit # for input > buff_point\n if buff < 0: buff = 0 # for input < 3, which shouldn't happen\n\n return similarity * (1.0 + buff)",
"def get_similarities(tags):\n simtags3 = {}\n for i in tags:\n prodtags3 = list(product([i,''], tags))\n for j in prodtags3:\n seqtags3 = SequenceMatcher(None, j[0].lower(), j[1].lower())\n if seqtags3.ratio() != 0.0 and seqtags3.ratio() >= SIMILAR and seqtags3.ratio() != 1.0:\n if j[0] not in simtags3 and j[0] not in simtags3.values():\n simtags3[j[0]] = j[1]\n return simtags3",
"def similarity(dataframe):\r\n main = dataframe\r\n \r\n dataframe = feature_selection(dataframe)\r\n train_size = round((len(dataframe)*0.9))\r\n train = dataframe[:train_size]\r\n test = dataframe[train_size:]\r\n \r\n test_value = test.iloc[np.random.randint(0,10),:]\r\n \r\n #compute cosine similarity\r\n neighbors = {}\r\n for i, r in train.iterrows():\r\n similarity = np.dot(test_value,r)/(np.linalg.norm(test_value)*np.linalg.norm(r))\r\n neighbors[i] = similarity\r\n \r\n #get similary movies in descending order\r\n neighbors = {k: v for k, v in sorted(neighbors.items(), key=lambda item: item[1], reverse=True)}\r\n \r\n test_final = pd.concat([test, main], axis=1, sort=False)\r\n train_final = pd.concat([train, main], axis=1, sort=False)\r\n \r\n test_movie = test_final.loc[test_value.name,['Title', 'Rated', 'Genre', 'imdbRating']]\r\n similar_movies = train_final.loc[list(neighbors.keys())[:5],['Title','Rated', 'Genre', 'Released', 'imdbRating']]\r\n \r\n return test_movie, similar_movies",
"def get_similar_users(target_rating: Rating,\n user_ratings: UserRatingDict,\n movie_users: MovieUserDict) -> Dict[int, float]:\n\n # Your code here\n mov_list = []\n similar_p = {}\n for mov in target_rating:\n mov_list.append(mov)\n remove_unknown_movies(user_ratings, movie_users)\n p_watched = get_users_who_watched(mov_list, movie_users)\n for p in p_watched:\n if p in user_ratings:\n similarity = get_similarity(target_rating, user_ratings[p])\n similar_p[p] = similarity\n return similar_p",
"def predict_rating(self, movie):\n\n other_ratings = movie.ratings\n\n similarities = [\n (self.similarity(r.user), r)\n for r in other_ratings\n ]\n\n similarities.sort(reverse=True)\n\n similarities = [(sim, r) for sim, r in similarities if sim > 0]\n\n if not similarities:\n return None\n\n numerator = sum([r.score * sim for sim, r in similarities])\n denominator = sum([sim for sim, r in similarities])\n\n return numerator/denominator\n\n\n #this is the one we wrote",
"def compute_semantic_similarity(predictions_list, output_path, dataset, perplexity=40):\n nlp = spacy.load(\"en_core_web_md\")\n labels = dataset.labels\n\n gt_labels = {e for pl in predictions_list for e in pl['groundTruth']['labels']}\n pred_labels = {e for pl in predictions_list for e in pl['predictions']['labels']}\n used_labels = list(gt_labels.union(pred_labels))\n\n #embeddings = np.array([nlp(label).vector for label in [labels[l] for l in used_labels]])\n embeddings = np.array([nlp(label).vector for label in labels])\n tsne_embedding = TSNE(n_components=2, perplexity=perplexity).fit_transform(embeddings)\n\n for index, image in enumerate(predictions_list):\n generate_semantic_map(index, image, output_path, tsne_embedding, labels, used_labels)",
"def _scaled_dot_product(self, movie_id1_index, movie_id2_index):\n ratings1 = self.movie_ratings[movie_id1_index][1]\n ratings2 = self.movie_ratings[movie_id2_index][1]\n\n # have ratings1 be the movie with fewer reviews\n if len(ratings1) > len(ratings2):\n ratings1, ratings2 = ratings2, ratings1\n\n # look for common users first\n r1 = []\n r2 = []\n\n for user_id in ratings1:\n if user_id in ratings2:\n r1.append(ratings1[user_id])\n r2.append(ratings2[user_id])\n\n # if there are too few common users, return 0 (no similarity)\n if len(r1) < 3: return 0.0, len(r1), 0.0\n\n r1 = numpy.array(r1)\n r2 = numpy.array(r2)\n\n norm1 = numpy.linalg.norm(r1)\n norm2 = numpy.linalg.norm(r2)\n\n similarity = r1.dot(r2) / (norm1 * norm2)\n\n # Scale output due to number of common users.\n buff_limit = self.buff_limit\n buff_point = self.buff_point\n n = len(r1) # number of common users\n\n x_limit = 3 * math.exp(buff_limit)\n x = 3 + (x_limit - 3) * (n - 3) / (buff_point - 3)\n buff = math.log(x) - math.log(3)\n\n if buff > buff_limit: buff = buff_limit # for input > buff_point\n if buff < 0: buff = 0 # for input < 3, which shouldn't happen\n\n return similarity * (1.0 + buff), n, similarity",
"def get_similarity(concept1='dog',concept2='dog'):\n query_args = {\"filter\": '/c/' + settings.LANGUAGE + \"/\" + concept2}\n enc_query_args = urllib.parse.urlencode(query_args)\n url = ''.join(['%s/c/%s/%s?' % (settings.BASE_ASSOCIATION_URL, settings.LANGUAGE,concept1)]) + enc_query_args\n json_data = make_http_request(url)\n parsed = parse_similar_concepts(json_data)\n if parsed:\n return parsed[0][1]\n else:\n return 0",
"def get_similarity(user_embeddings, group_embeddings):\n similarities = cosine_similarity(user_embeddings, group_embeddings)\n \n return similarities",
"def get_similarities(tags):\n similar_tags = []\n s_tags = set(tags)\n for tag in s_tags:\n for compare_tag in s_tags:\n if tag == compare_tag:\n continue\n else:\n compare = SequenceMatcher(None, tag, compare_tag).ratio()\n if compare > SIMILAR:\n if (compare_tag, tag) not in similar_tags:\n if len(tag) < len(compare_tag):\n similar_tags.append((tag, compare_tag))\n else:\n similar_tags.append((compare_tag, tag))\n return similar_tags",
"def weight_by_tags(video1, video2):\n\n tags_set_1 = set()\n tags_set_2 = set()\n\n if video1 in vid_to_tags:\n tags_set_1 = vid_to_tags[video1]\n if video2 in vid_to_tags:\n tags_set_2 = vid_to_tags[video2]\n\n # weight is the length of the intersection (set) of the two video's tags sets\n weight = len(tags_set_1.intersection(tags_set_2))\n\n return weight",
"def recommend(target_text, N, tfv, word_vector, database):\n clean_target = process_text([target_text])\n target_vector = tfv.transform(clean_target).toarray()\n similarity = (1 - pairwise_distances(target_vector, word_vector, metric = 'cosine')).reshape(-1)\n topN_idx = similarity.argsort()[-N:][::-1]\n # res = []\n # for idx in topN_idx:\n # res.append(database[idx])\n # return res\n return topN_idx",
"def concept_tags_similarity(method1, method2, nl_dict, nl_model):\n # nl_sim = gensim_lang_cossim(method1, method2, nl_dict, nl_model)\n jaccard_sim, info_dict = counter_cossim(method1.concepts, method2.concepts)\n # avg_sim = (jaccard_sim + nl_sim) / 2\n # if len(info_dict) > 1:\n # print(method2)\n return jaccard_sim, info_dict",
"def find_similar_movie(self, movie_id, num_results = 10):\n\n sim_scores = []\n\n for id2 in self.csv_data.movie_ratings:\n if id2 != movie_id:\n score = self.compare_two_movies(movie_id, id2)\n\n if score > 0.3:\n sim_scores.append((id2, score))\n\n sim_scores.sort(key = lambda e: e[1], reverse=True)\n\n return sim_scores[:num_results]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recommend movies that are similar to target_movie_id. | def recommend(self, target_movie_id, num_recommendations):
print(" - Getting target movie record")
self.target_movie = self.db.query(Movie).filter_by(movie_id=target_movie_id).first()
assert self.target_movie is not None
self.get_movie_recommendation_pool(num_recommendations * 10)
self.get_ratings_similarity()
tags_similarity = self.get_tags_similarity()
print(" ** TAGS SIMILARITY **")
print(tags_similarity)
self.final_ratings = {}
for r in self.recommendation_pool:
# r[0] is the movie object, so r[0].movie_id gives you the movie ID
# r[1] contains the rating similarity value
pool_movie_id = r[0].movie_id
similarity = r[1]
# self.rating_similarity[pool_movie_id]
self.final_ratings[pool_movie_id] = similarity - (self.rating_similarity.get(pool_movie_id, 2.5) * self.RATING_SIMILARITY_WEIGHT)
# tags similarity addition to final ratings
for m_id, tag_similarity in tags_similarity.items():
if m_id not in self.final_ratings:
self.final_ratings[m_id] = 0.0
self.final_ratings[m_id] += tag_similarity * self.TAGS_SIMILARITY_WEIGHT | [
"def _find_similar_movies(request):\n movie_ratings = _process_data[\"movie_ratings\"]\n movie_genres = _process_data[\"movie_genres\"]\n buff_point = _process_data[\"buff_point\"]\n buff_limit = _process_data[\"buff_limit\"]\n\n movies_finder = build_similar_movies_db.SimilarMovieFinder(\n movie_genres, movie_ratings,buff_limit, buff_point)\n\n similar_movies = {} # {movie_id: [similar_movie_ids]}\n\n start = _process_data[\"movie_ratings_start\"]\n length = _process_data[\"movie_ratings_length\"]\n start_time = time.time()\n\n for i in range(start, start + length):\n similar_movie_ids, _ = movies_finder.find_similar_movie(i)\n\n if len(similar_movie_ids) > 0:\n movie_id = movies_finder.movie_ratings[i][0]\n similar_movies[movie_id] = similar_movie_ids\n\n # progress estimation\n if i == start + 200:\n t_so_far = time.time() - start_time\n seconds_left = t_so_far * (length - 200) / 200\n finish_time = datetime.datetime.now() + datetime.timedelta(seconds=seconds_left)\n _lock.acquire()\n print(\"Process estimated completion time\", finish_time)\n _lock.release()\n\n _process_data[\"similar_movies\"] = similar_movies",
"def metadata_recommender_with_keywords(self, movie_id):\n logging.debug(\n f'[{self.metadata_recommender_with_keywords.__name__}] - start function with movie id: {movie_id}')\n if movie_id not in self.movie_metadata:\n return []\n genres = self.movie_metadata[movie_id][GENRES_COL]\n keywords = self.movie_metadata[movie_id][KEYWORDS_COL]\n\n movie_scores_ref = list()\n Recommender.add_score_to_list(genres, 2, movie_scores_ref)\n Recommender.add_score_to_list(keywords, 10, movie_scores_ref)\n\n movie_points_jaccard = dict()\n\n for key, movie in self.movie_metadata.items():\n if key == movie_id:\n continue\n movie_scores = list()\n Recommender.match_with_bias(movie[GENRES_COL], genres, 2, 0, movie_scores)\n Recommender.match_with_bias(movie[KEYWORDS_COL], keywords, 10, 5, movie_scores)\n\n movie_points_jaccard[key] = float(sm.jaccard_similarity(movie_scores_ref, movie_scores))\n recommendation = sorted(movie_points_jaccard, key=lambda x: movie_points_jaccard[x], reverse=True)\n return recommendation[:5]",
"def recommend_movies(target_rating: Rating,\n movies: MovieDict, \n user_ratings: UserRatingDict,\n movie_users: MovieUserDict,\n num_movies: int) -> List[int]:\n\n # Your code here\n \n movie_score = {}\n \n ## First step = 'we will need to find users similar'\n similar_user = get_similar_users(target_rating, user_ratings, movie_users) \n \n ## Second step = 'This will be our list of candidate movies'\n ## get_candidate_mov created\n candidate_mov = get_candidate_mov(similar_user, user_ratings, target_rating)\n \n ## Third step = 'track a \"score\" for each movie'\n ## get_mov_score created\n for mov in candidate_mov:\n movie_score[mov] = get_mov_score(mov, \n user_ratings, \n similar_user, \n candidate_mov) \n \n ## Forth step = 'The return list should contain movie ids with the highest scores'\n ## sort_score_list created\n sorted_list = sort_score_list(movie_score)\n \n ## Last step = ' list should be no longer than the value of this parameter'\n final_list = sorted_list[:num_movies]\n \n return final_list",
"def model_find_similar(book_id):\n max_pred = 100\n book_id = book_data[book_data.book_id==book_id].index.values.tolist()[0]\n similarity = list(enumerate(books_similarity[book_id]))\n similarity = sorted(similarity, key=lambda x: x[1], reverse=True)\n similarity = similarity[1:max_pred]\n movie_indices = [i[0] for i in similarity]\n return movie_indices",
"def find_similar_movie(self, movie_id, num_results = 10):\n\n sim_scores = []\n\n for id2 in self.csv_data.movie_ratings:\n if id2 != movie_id:\n score = self.compare_two_movies(movie_id, id2)\n\n if score > 0.3:\n sim_scores.append((id2, score))\n\n sim_scores.sort(key = lambda e: e[1], reverse=True)\n\n return sim_scores[:num_results]",
"def get_candidate_mov(similar_user: Dict[int, float],\n user_ratings: UserRatingDict,\n target_rating: Rating,) -> List[int]: \n candidate_mov = []\n for p in similar_user:\n for mov in user_ratings[p]:\n if (mov not in candidate_mov) and (mov not in target_rating):\n if user_ratings[p][mov] >= 3.5:\n candidate_mov.append(mov)\n return candidate_mov",
"def get_similar_users(target_rating: Rating,\n user_ratings: UserRatingDict,\n movie_users: MovieUserDict) -> Dict[int, float]:\n\n # Your code here\n mov_list = []\n similar_p = {}\n for mov in target_rating:\n mov_list.append(mov)\n remove_unknown_movies(user_ratings, movie_users)\n p_watched = get_users_who_watched(mov_list, movie_users)\n for p in p_watched:\n if p in user_ratings:\n similarity = get_similarity(target_rating, user_ratings[p])\n similar_p[p] = similarity\n return similar_p",
"def get_ratings_similarity(self):\n\n # Get average rating of the target movie\n query_1 = \"SELECT AVG(rating) FROM ratings WHERE movie_id=%i\" % self.target_movie.movie_id\n res = self.db.execute(query_1).fetchall()\n target_movie_average_rating = res[0][0]\n\n pmids = []\n for rm in self.recommendation_pool:\n pmids.append(rm[0].movie_id)\n\n # rating_similarity dict contains movie_ids as keys and difference in rating as value\n self.rating_similarity = {}\n query_2 = \"\"\"\n SELECT movie_id, ABS(({tmr} - AVG(rating))) as rating_difference\n FROM ratings r\n WHERE movie_id IN ({pool_movie_ids})\n GROUP BY movie_id\n \"\"\".format(\n tmr=target_movie_average_rating,\n pool_movie_ids=str(pmids)[1:-1]\n )\n\n res = self.db.execute(query_2).fetchall()\n for rec in res:\n self.rating_similarity[rec[0]] = rec[1]",
"def get_tags_similarity(self):\n\n target_movie_tags = self.get_tags_count_(self.target_movie.movie_id)\n print(\"get_tags_similarity: target_movie_tags: %r\" % target_movie_tags)\n\n tags_similarity = {}\n\n users_query = \"select distinct user_id from tags where movie_id=%i\" % \\\n self.target_movie.movie_id\n user_records = self.db.execute(users_query).fetchall()\n print(\"get_tags_similarity: %i users have tagged this movie\"\n % len(user_records))\n\n for urec in user_records:\n user_id = urec[0]\n print(\"get_tags_similarity: Processing user: %i\" % user_id)\n\n movie_ids_query = \"\"\"\n SELECT distinct movie_id\n FROM tags\n WHERE movie_id != %i\n AND user_id=%i\n \"\"\" % (self.target_movie.movie_id, user_id)\n res = self.db.execute(movie_ids_query).fetchall()\n\n print(\"get_tags_similarity: User has tagget %i movies\" % len(res))\n if res:\n for mid_rec in res:\n movie_id = mid_rec[0]\n print(\n \"get_tags_similarity: -> Processing movie: %i\" %\n movie_id\n )\n\n movie_tags = self.get_tags_count_(movie_id, user_id)\n tags_similarity[movie_id] = self.tags_jaccard_index(\n target_movie_tags, movie_tags)\n\n return tags_similarity",
"def related(self, movie_id):\n url = \"https://yts.ag/api/v2/movie_suggestions.json?movie_id=%s\" % movie_id\n res = requests.get(url)\n dic = res.json()\n return dic['data']['movies']",
"def tune(self, movie_id1, movie_id2, top_n, expected_search_size):\n\n # buff_point is the number of reviewers common to id1 and id2\n index1 = self.find_movie_index(movie_id1)\n index2 = self.find_movie_index(movie_id2)\n\n final_score, common_reviewers, pre_boost_score = \\\n self._scaled_dot_product(index1, index2)\n\n self.buff_point = common_reviewers\n\n # reset buff_limit, then increase it until it \"movie_id2\" is in \"top_n\"\n self.buff_limit = 0\n\n while self.buff_limit < 2:\n movie_ids, scores = self.find_similar_movie(\n index1, num_results = expected_search_size * 2)\n\n movie_ids = movie_ids[:top_n]\n\n # look for movie_id2 in results, exit if found\n for movie_id in movie_ids:\n if movie_id == movie_id2: return\n\n # movie_id2 not found in results; boost movie_id2 relative to\n # the top result\n top_score = scores[0]\n\n score, common_reviewers, pre_boost_score = \\\n self._scaled_dot_product(index1, index2)\n\n self.buff_limit = self.buff_limit * top_score / score + 0.01",
"def _fuzzy_match(movie_title:str) -> str:\n with open('resources/final_movies.csv', newline='') as movies_list:\n reader = csv.DictReader(movies_list)\n movies = [movie['Movie_Titles'].strip() for movie in reader]\n match, confidence = process.extract(movie_title, movies, limit=1, scorer=fuzz.token_sort_ratio)[0]\n \n if confidence >= 70:\n movie_title = match\n \n \n movies_list.close()\n \n return movie_title",
"def movie_suggestions(self, movie_id):\n self.endpoint = 'movie_suggestions.json'\n self.payload = {'movie_id': movie_id}\n return self.__make_request()",
"def similarity(self, other):\n\n user_ratings = {}\n paired_ratings = []\n\n for rating in self.ratings:\n user_ratings[rating.movie_id] = rating\n\n for r in other.ratings:\n u_r = user_ratings.get(r.movie_id)\n\n if u_r is not None:\n paired_ratings.append((u_r.score, r.score))\n\n if paired_ratings:\n return pearson(paired_ratings)\n else:\n return 0.0",
"def similarity(dataframe):\r\n main = dataframe\r\n \r\n dataframe = feature_selection(dataframe)\r\n train_size = round((len(dataframe)*0.9))\r\n train = dataframe[:train_size]\r\n test = dataframe[train_size:]\r\n \r\n test_value = test.iloc[np.random.randint(0,10),:]\r\n \r\n #compute cosine similarity\r\n neighbors = {}\r\n for i, r in train.iterrows():\r\n similarity = np.dot(test_value,r)/(np.linalg.norm(test_value)*np.linalg.norm(r))\r\n neighbors[i] = similarity\r\n \r\n #get similary movies in descending order\r\n neighbors = {k: v for k, v in sorted(neighbors.items(), key=lambda item: item[1], reverse=True)}\r\n \r\n test_final = pd.concat([test, main], axis=1, sort=False)\r\n train_final = pd.concat([train, main], axis=1, sort=False)\r\n \r\n test_movie = test_final.loc[test_value.name,['Title', 'Rated', 'Genre', 'imdbRating']]\r\n similar_movies = train_final.loc[list(neighbors.keys())[:5],['Title','Rated', 'Genre', 'Released', 'imdbRating']]\r\n \r\n return test_movie, similar_movies",
"def recommend(target_text, N, tfv, word_vector, database):\n clean_target = process_text([target_text])\n target_vector = tfv.transform(clean_target).toarray()\n similarity = (1 - pairwise_distances(target_vector, word_vector, metric = 'cosine')).reshape(-1)\n topN_idx = similarity.argsort()[-N:][::-1]\n # res = []\n # for idx in topN_idx:\n # res.append(database[idx])\n # return res\n return topN_idx",
"def answer_checker(tweet_id, movie_name):\n\n quote_data = db.get_quote_data_from_tweet(tweet_id)\n quote = quote_data['Quote']\n correct_movie = quote_data['Movie']\n movie_list = get_all_movies_from_quote(quote)\n correct_movie_check = correct_movie.lower().capitalize().translate(str.maketrans('', '', punctuation))\n movie_name_check = movie_name.lower().capitalize().translate(str.maketrans('', '', punctuation))\n correctness = fuzz.ratio(correct_movie_check, movie_name_check)\n if correct_movie == movie_name:\n return 1\n elif correctness >= 70:\n return 2 # , correct_movie\n elif movie_matcher(movie_list, movie_name):\n return 3\n elif 40 < correctness < 70:\n return 4\n else:\n return 0",
"def supports_find_similar (self):\n \n raise NotImplementedError",
"def similar(self):\n if \"similar\" in self.links:\n reviews_json = self._mapi._retrieve_rt_json(self.links[\"similar\"])\n movies = reviews_json.get(\"movies\", None)\n # build a similarity graph\n return (Movie(jdata[\"id\"], self._mapi, jdata) for jdata in movies) if movies else None\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print the top n recommended movies nicely | def print_recommendations(self, n=10):
print("Title: {}, Genres: {}".format(self.target_movie.title, self.target_movie.genres))
print("="*120)
r_count = 0
print('{} {} {}'.format('Similarity'.ljust(12), 'Movie'.ljust(60), 'Genres'))
print('-'*120)
for k, v in self.final_ratings.items():
m = self.db.query(Movie).filter_by(movie_id=k).first()
print('{} {} {}'.format(str(round(v, 5)).ljust(12), m.title.ljust(60), m.genres))
r_count += 1
if r_count > n:
break | [
"def less_criticized():\n reader = initialize_reader()\n movies_less = []\n for row in reader:\n if(row[2]):\n movies_less.append({\"name\": row[11], \"num_critic_for_users\": int(row[2])}) \n new_list = sorted(movies_less, key=lambda i: i['num_critic_for_users'])\n topTenList = new_list[:10]\n top = 0\n print(\"Top 10 Movies less criticized \\n\")\n for movie in topTenList:\n top = top + 1\n print(f\"Top {top} is {movie.get('name')} with {movie.get('num_critic_for_users')}\")",
"def top_by_num_of_ratings(self, n):\n top_movies = collections.Counter([record[1] for record in self.ratings.data]).most_common(n)\n ordered_top_movies = []\n for x in top_movies:\n try:\n ordered_top_movies.append((self.movies[x[0]], x[1]))\n except KeyError:\n continue\n #top_movies = collections.OrderedDict(map(lambda x: (self.movies[x[0]], x[1]), top_movies))\n return collections.OrderedDict(ordered_top_movies)",
"def display_top_games(collection, count, detailed):\n if detailed:\n print(f\"{'Rank':<6}{'Rating':<8}{'Weighted':<11}{'Plays':<7}\" \\\n f\"{'Last Played':<13}{'Game':<100}\")\n else:\n print(f\"{'Rank':<5}{'Game':<100}\")\n rank = 1\n rgx = re.compile('[%s]' % 'b\\'\\\"')\n for game in collection:\n if detailed:\n print(f\"{rank:<6d}{float(game['player_rate']):<8.1f}\" \\\n f\"{float(game['rating']):<11.4f}\" \\\n f\"{game['plays']:<7}{game['last_played']:<13}{rgx.sub('',game['name']):<100s}\")\n else:\n print(f\"{rank:<5d}{rgx.sub('',game['name']):<100s}\")\n if count:\n if rank < int(count):\n rank += 1\n else:\n sys.exit()\n else:\n rank += 1",
"def best_ten_movies(movie_data, rating_data):\r\n # get array of movie ids and frequency of ratings\r\n movies, ret = np.unique(rating_data['Movie Id'], return_counts=True)\r\n # get top ten rated movies' ratings\r\n # get indices that sort by number of ratings, low to high\r\n sorting_inds = np.argsort(movie_data['Mean Rating'])\r\n # indices of top 10 are at the end of the sorting list\r\n inds_best_ten = sorting_inds[-10:][::-1]\r\n \r\n return inds_best_ten.values",
"def print_top_books(self, n):\n k_books = pd.DataFrame(list(zip(list(self.kmeans.labels_),\n list(self.reviews.index))),\n columns=['k_label', 'book_id'])\n self.k_counter = Counter(k_books['k_label'])\n df_books = load_data.get_books()\n self.df_k_books = pd.merge(df_books[['best_book_id', 'title']], k_books, how='inner',\n left_on='best_book_id', right_on='book_id')\n for i in range(self.cluster_num):\n print(i)\n print(list(self.df_k_books[self.df_k_books['k_label'] == i]['title'])[:n])\n print(\"==\"*20)",
"def top_controversial(self, n):\n\n def get_variance(ratings: list):\n mean = sum(ratings) / len(ratings)\n if len(ratings) > 1:\n return sum(map(lambda x: (x - mean) * (x - mean), ratings)) / (len(ratings) - 1)\n else:\n return ratings[0]\n\n dist_movies = {}\n for x in self.ratings.data:\n try:\n dist_movies[self.movies[x[1]]] = dist_movies.setdefault(self.movies[x[1]], []) + [float(x[2])]\n except KeyError:\n continue\n movie_variances = sorted(map(lambda x: (x[0], get_variance(x[1])), dist_movies.items()),\n key=lambda y: -y[1])[:n]\n return collections.OrderedDict(movie_variances)",
"def print_top_articles():\n print('1. What are the most popular three articles of all time?')\n query = \"\"\"Select * from Top_Viewed_Articles limit 3;\"\"\"\n results = execute_query(query)\n for article, count in results:\n print('\"{}\" article viewed count is {}.'.format(article, int(count)))\n print('=' * 10)",
"def top_actors():\n reader = initialize_reader()\n actor_list = [{\"actor\": row[10], \"scored\": (float(row[4]) + float(row[25])) / 2 } for row in reader if row[4] and row[25]]\n actors = []\n for actor in actor_list:\n if actor.get('actor') not in list(x.get('actor') for x in actors):\n actors.append({\"actor\": actor.get('actor'), \"scored\": actor.get('scored')})\n else:\n actor_list.remove(actor) \n new_list = sorted(actors, key=lambda i: i['scored'], reverse=True)\n top_five = new_list[:5]\n\n if actors:\n print(\" \\n Top 5 the best actors \\n\")\n top = 0\n for actor in top_five:\n top = top + 1\n print(f\"Top {top} is {actor.get('actor')} with {actor.get('scored')} scored\")",
"def get_top_five():\n\n # this is simply a placeholder until I create the logic to query top movies based on num reviews and star ratings...\n t1 = Movie.objects.get(name__icontains='out of the past')\n t2 = Movie.objects.get(name__icontains='double indem')\n t3 = Movie.objects.get(name__icontains='big sleep')\n t4 = Movie.objects.get(name__icontains='scarlet street')\n t5 = Movie.objects.get(name__icontains='maltese falcon')\n\n top_five = [t1, t2, t3, t4, t5]\n\n return top_five",
"def print_popular_articles():\n articles = 0\n views = 1\n articlesandviews = top3populararticles()\n print \"Top 3 Popular Articles:\"\n print \"Article Title ------ Views\"\n for result in articlesandviews:\n print result[articles] + \" ------ \" + str(result[views])",
"def get_top_links(movies, N=3):\n link_counts = Counter()\n for movie in movies:\n link_counts.update(movie[2])\n\n top_links = [link for link, c in link_counts.items() if c >= N]\n\n return top_links, link_counts",
"def longest_duration():\n reader = initialize_reader()\n movies_longest = []\n for row in reader:\n if(row[3]):\n movies_longest.append({\"name\": row[11], \"duration\": int(row[3])})\n new_list = sorted(movies_longest, key=lambda i: i['duration'], reverse=True)\n topTenList = new_list[:20]\n top = 0\n print(\"\\nTop 20 Movies longest-running duration \\n\")\n for movie in topTenList:\n top = top + 1\n print(f\"Top {top} is {movie.get('name')} with {movie.get('duration')}\")",
"def print_top_s(filename):\n word_count = words_count(filename)\n\n # Each item is a (word, count) tuple.\n # Sort them so the big counts are first using key=get_count() to extract count.\n items = sorted(word_count.items(), key= lambda w : w[1], reverse=True)\n\n # Print the first 20\n for item in items[:20]:\n print (item[0], item[1])",
"def pop_ten_movies(rating_data):\r\n # get array of movie ids and frequency of ratings\r\n movies, rating_freq = np.unique(rating_data['Movie Id'], return_counts=True)\r\n # get indices that sort by number of ratings, low to high\r\n sorting_inds = np.argsort(rating_freq)\r\n # indices of top 10 are at the end of the sorting list\r\n inds_pop_ten = sorting_inds[-10:][::-1]\r\n \r\n return inds_pop_ten",
"def list_movies():\n number = 1\n for movie in movies:\n print(f\"{number}- {movie['title']}\")\n number += 1 \n print(f\"{number}- Back to main menu\")",
"def most_genres(self, n):\n movies = [[x[1], len(x[2])] for x in self.data]\n movies = sorted(movies, key=lambda elem: -int(elem[1]))[:n]\n return collections.OrderedDict(movies)",
"def top_movies(self):\n top_movies = {}\n data = requests.get(self.url.format('Top250Movies',self.api_key)).json()\n # Loops through the dictionary\n for item in data['items']:\n top_movies.setdefault(item['rank'], item['title'])\n\n return top_movies",
"def top_controversial_valuers(self, n):\n\n def get_variance(ratings: list):\n mean = sum(ratings) / len(ratings)\n if len(ratings) > 1:\n return sum(map(lambda x: (x - mean) * (x - mean), ratings)) / (len(ratings) - 1)\n else:\n return ratings[0]\n\n dist_movies = {}\n for x in self.ratings.data:\n dist_movies[x[0]] = dist_movies.setdefault(x[0], []) + [float(x[2])]\n movie_variances = sorted(map(lambda x: (x[0], get_variance(x[1])), dist_movies.items()),\n key=lambda y: -y[1])[:n]\n return collections.OrderedDict(movie_variances)",
"def scrape_imdb_top250():\n # IMDB top 250 Movies\n source = requests.get('https://www.imdb.com/chart/top/?ref_=nv_mv_250').text\n\n soup = BeautifulSoup(source, 'lxml')\n\n table = soup.find('tbody', class_='lister-list')\n\n rank = 1\n movies = []\n for rowRaw in table.find_all('tr'):\n row = rowRaw.find('td', class_='titleColumn')\n title = row.a.text\n year = row.span.text.strip(\"()\")\n ratingRaw = rowRaw.find('td', class_='ratingColumn imdbRating')\n rating = float(ratingRaw.text)\n no_of_users = ratingRaw.strong['title'].split(' ')[3]\n no_of_users = int(no_of_users.replace(',', ''))\n review_url = row.a['href']\n movie = (\n rank,\n title,\n year,\n rating,\n no_of_users,\n review_url\n )\n rank += 1\n movies.append(movie)\n\n return movies",
"def top_by_ratings(self, n, metric=\"average\"):\n dist_movies = {}\n for x in self.ratings.data:\n try:\n dist_movies[self.movies[x[1]]] = dist_movies.setdefault(self.movies[x[1]], []) + [float(x[2])]\n except KeyError:\n continue\n\n if metric == \"average\":\n average_ratings = sorted(map(lambda x: (x[0], sum(x[1]) / len(x[1])), dist_movies.items()),\n key=lambda y: -y[1])[:n]\n return collections.OrderedDict(average_ratings)\n else:\n\n def get_median(x: list):\n if len(x) % 2:\n return x[len(x) // 2]\n else:\n return (x[len(x) // 2] + x[(len(x) + 1) // 2]) / 2\n\n median_ratings = sorted(list(map(lambda x: (x[0], get_median(x[1])), dict(\n map(lambda x: (x[0], list(sorted(x[1]))), dist_movies.items())).items())), key=lambda x: -x[1])[:n]\n return collections.OrderedDict(median_ratings)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the kernel id. | def get_kernel_id():
import ipykernel
connection_file = os.path.basename(ipykernel.get_connection_file())
return connection_file.split('-', 1)[1].split('.')[0] | [
"def get_instance_kernel(self,instance_id):\n instkernel = conn.get_instance_attribute(instance_id, 'kernel')\n try:\n str_kernel = str(instkernel).split(':', 2) #convert object to string and split to parse the string elements\n a = str_kernel[1].split(\"'\", 2)\n kernel = a[1]\n logger.info(\"Kernel-ID [%s]\" % (kernel))\n except:\n kernel=None\n logger.warn(\"Kernel-ID could not be determined [%s]\" % (kernel))\n finally:\n return kernel",
"def get_device_id():\n\n import ctypes as ct\n from .util import safe_call as safe_call\n from .library import backend as backend\n\n if (backend.name() != \"opencl\"):\n raise RuntimeError(\"Invalid backend loaded\")\n\n idx = ct.c_int(0)\n safe_call(backend.get().afcl_get_device_id(ct.pointer(idx)))\n return idx.value",
"def get_id(self):\n return self.data[self.system_idx][\"id\"]",
"def device_id(self):\n return self.id",
"def node_id_device(self):\n ret = self._get_attr(\"nodeIdDevice\")\n return ret",
"def kernel_spec(self) -> str:\n return pulumi.get(self, \"kernel_spec\")",
"def kernel() -> str:\n x = platform.release()\n return x",
"def get_dev_gid() -> int:\n return int(subprocess.check_output([\"id\", \"-g\"])[:-1])",
"def find_kernel_base():\n return idaapi.get_fileregion_ea(0)",
"def node_id(self):\n ret = self._get_attr(\"nodeId\")\n return ret",
"def get_node_id():\n from smdebug.core.json_config import get_node_id_from_resource_config # prevent circular import\n\n rank = get_distributed_worker()\n\n node_id = get_node_id_from_resource_config()\n rank = rank if rank is not None else os.getpid()\n node_id = f\"{rank}-{node_id}\" if node_id else f\"{rank}_{socket.gethostname()}\"\n return node_id.replace(\"_\", \"-\")",
"def id(self):\n\n from mbed_cloud.foundation._custom_methods import pre_shared_key_id_getter\n\n return pre_shared_key_id_getter(self=self)",
"def read_device_id(self):\n self._is_tool_not_connected_raise()\n self._is_session_not_active_raise()\n\n return self.programmer.read_device_id()",
"def get_log_id(self):\n return # osid.id.Id",
"def get_kernel(self):\n return self.kernel",
"def ftduino_id_get(self):\n return self.comm('ftduino_id_get')",
"def get_dev_uid() -> int:\n return int(subprocess.check_output([\"id\", \"-u\"])[:-1])",
"def id(self):\n return self.joystick_obj.get_id()",
"def getSysObjectId( self ):\n\t\t# XXX find something useful...\n\t\treturn '1.3.6.1.1.2.3.4.1'"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reload report template data. | def reloadReportData(self, tmpl_filename=None):
if tmpl_filename is None:
tmpl_filename = self._report_template_filename
report_gen_system.iqReportGeneratorSystem.reloadReportData(self, tmpl_filename) | [
"def reload_data(self):\r\n self.pre_requisite()",
"def reset(self):\n self.__template = None",
"def reloadData(self, event):\n print(\"Reload data from the data folder. \")\n self.dataMgr.loadCSVData('D')\n gv.iChartPanel1.updateDisplay()",
"def load_generated_report(self, gen_report: dict) -> None:\n self.dataframes_dict = object_conversions.json_dict_to_dataframes_dict(gen_report)\n self.json_dict = object_conversions.dataframes_dict_to_json_dict(self.dataframes_dict)\n self.generated_report = self.dataframes_dict",
"def template_loaded(self, template):\n self.template = template",
"def refreshEditorTemplates():\n pass",
"def reload(self):\n if isinstance(self.source, Config):\n self.source.reload()",
"def refreshView(self):\n HopperLowLevel.refreshView(self.__internal_document_addr__)",
"def refresh(self):\n if 'view' in self.objectIds():\n self.manage_delObjects(['view'])\n self._set_form()\n self._set_views()\n return 'refreshed form and pagetemplate'",
"def reload(self):\n s, self._data = self.client.get(self.endpoint)",
"def refresh(self):\n\n self.article_footer_html = Website.read_file(self.config.value('template_path'),\n self.ARTICLE_ITEM_FOOTER_TEMPLATE_FILENAME)\n self.static_footer_html = Website.read_file(self.config.value('template_path'),\n self.STATIC_ITEM_FOOTER_TEMPLATE_FILENAME)\n\n css_contents = Website.read_file(self.config.value('resources_path'),\n self.config.value('website_css_filename'))\n css_hash = hashlib.md5(bytes(css_contents, encoding='utf-8')).hexdigest()\n self.css_filename = self.config.value('website_css_filename') + '?' + css_hash",
"def gen_report_data(self):\n pass",
"def reload(self):\n if not self.filePath:\n return\n\n if os.path.exists(self.filePath):\n with open(self.filePath, \"r\") as f:\n file_contents = f.read()\n\n tabs_layout_children = pm.formLayout(self.tab, q=True, ca=True)\n tab = tabs_layout_children[0]\n\n pm.cmdScrollFieldExecuter(tab, e=True, text=file_contents)\n logging.info(\"Tab Reloaded: {}\".format(self.fileName))",
"def refresh_pdf(self, event):\n self.show_message('Refreshing pdf ...')\n self.pdf_file = self.report.generate_pdf(self.raweditor.GetValue(), FORCE=True)\n\n self.pdfviewer.load_file(self.pdf_file)\n self.pdfviewer.Refresh()\n self.show_message('Reloaded pdf')",
"def clear_report_results(self):",
"def template_data(self, template_data):\n\n self._template_data = template_data",
"def populatetemplate(new_template,output_type,output,RPI,yeartocalculate):\n #get the answerfile\n for f in glob(output + 'final answerset*.csv'):\n answerfile = pd.read_csv(f)\n \n #get the answerfile lookup\n answerslookup = pd.read_csv(output + 'answerfile_template_lookup.csv')\n \n #join the foreign key fields from the template\n answerfile_with_lookup = pd.merge(answerfile,answerslookup,how='left',left_on='parts_of_the_grouping',right_on='final_answers')\n \n #join the answerfile to the lookup file\n merged_template = pd.merge(new_template,answerfile_with_lookup[['Sector','Ticket category','average_price_change','superweights','percentage_share_of_superweights_in_grouping']]\n ,how='left',left_on=['Sector','Ticket category'], right_on=['Sector','Ticket category'],suffixes=('x','y'))\n \n #duplicate rows generated during lookup are deleted here\n merged_template = merged_template.drop_duplicates()\n merged_template.reset_index()\n \n #set the RPI value here\n merged_template.at[merged_template.index.max(),'value'] = RPI\n \n\n #prepare all tickets, all operator annual change here\n allticketsalloperators = getallticketsalloperators(merged_template, output_type , yeartocalculate)\n \n\n merged_template['alltickets'] = np.where(\n #sector merge\n ((merged_template['Sector']=='All operators') & (merged_template['Ticket category']=='All tickets') & (merged_template['Year & stats']=='Average change in price (%)')\n |\n #tt merge\n (merged_template['Sector']=='All tickets')&(merged_template['Ticket category']=='All tickets') & (merged_template['Year & stats']=='Average change in price (%)')),\n allticketsalloperators,\n merged_template['value']\n )\n \n merged_template['value'] = np.where((merged_template['Year & stats']=='Average change in price (%)') &((merged_template['Sector']!='All tickets')| (merged_template['Sector']!='All operators' ) ) \n ,merged_template['average_price_change']\n ,merged_template['value'])\n \n \n merged_template['value'] = np.where(merged_template['Year & stats']=='Expenditure weights (%) total',merged_template['percentage_share_of_superweights_in_grouping']*100,merged_template['value'])\n \n # 'all tickets' are fixed at 100 of percentage share \n merged_template['alltickets'] = np.where(\n ((merged_template['Year & stats']=='Expenditure weights (%) total') &(merged_template['Sector']=='All tickets') &(merged_template['Ticket category']=='All tickets') ) |\n ((merged_template['Year & stats']=='Expenditure weights (%) total') & (merged_template['Sector']=='All operators')&(merged_template['Ticket category']=='All tickets'))\n \n ,100.000,merged_template['alltickets'])\n\n #remove unecessary columns\n del merged_template['average_price_change']\n del merged_template['percentage_share_of_superweights_in_grouping']\n del merged_template['superweights']\n \n #calculated the latest year change; shift 1 = previous year, shift -1 = Average change in year\n merged_template = getlatestyearchange(merged_template,'value',yeartocalculate)\n merged_template = getlatestyearchange(merged_template,'alltickets',yeartocalculate)\n\n #get yoy change in realterms\n merged_template = getyoychange(merged_template,'value',yeartocalculate,RPI)\n merged_template = getyoychange(merged_template,'alltickets',yeartocalculate,RPI)\n\n #get allitems index\n merged_template['value']= np.where((merged_template['Sector']=='RPI') & (merged_template['Ticket category']=='All items index') & (merged_template['Year & stats']==yeartocalculate) |\n (merged_template['Sector']=='RPI (all items)') & (merged_template['Ticket category']=='RPI (all items)') & (merged_template['Year & stats']==yeartocalculate),\n ((merged_template['value'].shift(1) #previous year's value\n * RPI)/100)+merged_template['value'].shift(1)\n ,\n merged_template['value']\n )\n \n #define the RPI change since the beginning of the series here\n globalRPI = merged_template['value'].to_list()[-2]\n \n #get yonstart change in realterms\n merged_template = getyonstartchange(merged_template,'value',yeartocalculate,globalRPI)\n merged_template = getyonstartchange(merged_template,'alltickets',yeartocalculate,globalRPI)\n\n #where value is blank, fill with 'all ticket' values\n merged_template['value'].fillna(merged_template['alltickets'],inplace=True)\n\n #drop the redundant 'alltickets' column\n del merged_template['alltickets']\n\n return merged_template",
"def refresh( self ):\n dt = from_json(open(self.json_spec,\"r\"))\n if dt:\n rows,cols = dt.get_bounds()\n for idx in range(cols):\n self.replace_column(idx,dt.get_column(idx))\n\n if dt.get_name():\n self.name = dt.get_name()\n\n self.refresh_minutes = dt.refresh_minutes\n\n self.changed()\n DataTable.refresh(self)",
"def reset(self):\n self.data = {}\n self.updates = {}\n self._convert_raw(self.raw_filepath)",
"def persist_report():"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get report folder path. | def getReportDir(self):
return self._report_dir | [
"def get_reportDirectory(self):\n return self.get_basefdir()",
"def reports_dir():\n return _mkifnotexists(\"web/reports\")",
"def def_report_path():\n if os.name == 'nt':\n return(getwindoc())\n else:\n return(os.getenv(\"HOME\"))",
"def filename(self):\n # create the folder if it doesn't exist'\n if not os.path.exists(self.report_path):\n os.makedirs(self.report_path)\n time_now = datetime.now().strftime(\"%m_%d_%Y_%H_%M\")\n filename = f\"{self.report_path}/report_{time_now}.csv\"\n return os.path.join(self.report_path, filename)",
"def create_report_dir(self) -> str:\n return create_report_dir_with_rotation(self.dir)",
"def report_path(\n project: str,\n location: str,\n report_config: str,\n report: str,\n ) -> str:\n return \"projects/{project}/locations/{location}/reportConfigs/{report_config}/reports/{report}\".format(\n project=project,\n location=location,\n report_config=report_config,\n report=report,\n )",
"def __set_report_path__(self):\n self.report_path = os.path.join(self.get_report_path(), \"pattern_and_similarity_report\")\n Path(self.report_path).mkdir(parents=True, exist_ok=True)",
"def _GetCoverageHtmlReportPath(file_or_dir_path, output_dir):\n assert os.path.isabs(output_dir), 'output_dir must be an absolute path.'\n html_path = (os.path.join(os.path.abspath(output_dir), 'coverage') +\n os.path.abspath(file_or_dir_path))\n if os.path.isdir(file_or_dir_path):\n return os.path.join(html_path, DIRECTORY_COVERAGE_HTML_REPORT_NAME)\n else:\n return os.extsep.join([html_path, 'html'])",
"def log_folder(self):\n ret = self._get_attr(\"logFolder\")\n return ret",
"def getLogPath(self):\n # Define the file name for logging\n temp = datetime.now().strftime('%Y_%m_%d-%H-%M-%S')\n logFileName = \"gsp_inventory_xlsx2tsv_\" + temp + \".log\"\n\n # If the platform is windows, set the log file path to the current user's Downloads/log folder\n if sys.platform == 'win32' or sys.platform == 'win64': # Windows\n logFilePath = os.path.expandvars(r'%USERPROFILE%')\n logFilePath = os.path.join(logFilePath, 'Downloads')\n logFilePath = os.path.join(logFilePath, 'log')\n if os.path.exists(logFilePath):\n return os.path.join(logFilePath, logFileName)\n else: # Create the log directory\n os.mkdir(logFilePath)\n return os.path.join(logFilePath, logFileName)\n\n # If Linux, set the download path to the $HOME/downloads folder\n elif sys.platform == 'linux' or sys.platform == 'linux2': # Linux\n logFilePath = os.path.expanduser('~')\n logFilePath = os.path.join(logFilePath, 'log')\n if os.path.exists(logFilePath):\n return os.path.join(logFilePath, logFileName)\n else: # Create the log directory\n os.mkdir(logFilePath)\n return os.path.join(logFilePath, logFileName)",
"def subdir(self) -> str:\n return self._subdir",
"def get_folder(self):\n return os.path.join(\n settings.PRIVATE_STORAGE_ROOT, Syllabus.SYLLABUS_FILES_LOCATION,\n str(self.unique_id)[0:2])",
"def get_output_folder(self):\n return self.output_folder",
"def _job_log_directory(self, job_name: str) -> Path:\n parts = job_name.split(\"/\")\n if len(parts) > 1:\n path_components = [self.log_base_directory]\n # The last portion of the job name does not form part of the directory\n # because it is used to name the log file itself.\n path_components.extend([parts[:-1]])\n return Path(*path_components)\n return self.log_base_directory",
"def _get_resourceFolder(self) -> \"std::string\" :\n return _core.Workspace__get_resourceFolder(self)",
"def relative_folder_path(self):\n return copy.deepcopy(self.__PROJECT.RELATIVE_PATH_TO_OUTPUT_FOLDER)",
"def getMeasurementStatPath(self, meas: str) -> str:\n return os.path.join(self.statPath, meas)",
"def report_config_path(\n project: str,\n location: str,\n report_config: str,\n ) -> str:\n return \"projects/{project}/locations/{location}/reportConfigs/{report_config}\".format(\n project=project,\n location=location,\n report_config=report_config,\n )",
"def get_result_path():\n return os.getcwd() + '/' + _result_folder",
"def export_dir(self):\n\n return self._export_dir"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generate report and save it in RTF file. | def _genRTFReport(self, report):
if report is None:
report = self._report_template
data_rep = self.generateReport(report)
if data_rep:
rep_file_name = os.path.join(self.getReportDir(), '%s_report_result.rtf' % data_rep['name'])
template_file_name = os.path.abspath(data_rep['generator'])
log_func.info(u'Save report <%s> to file <%s>' % (template_file_name, rep_file_name))
data = self._prevGenerateAllVariables(data_rep['__data__'])
rtf_report.genRTFReport(data, rep_file_name, template_file_name)
return rep_file_name
return None | [
"def generate_report():",
"def save_report():\n ct.save_report()",
"def generate_report(self):\n if self.report_format == \"csv\":\n print(\"[+] Building the report -- you selected a csv report.\")\n self.output_csv_report = self._build_output_csv_file_name()\n self.write_csv_report()\n elif self.report_format == \"word\":\n print(\"[+] Building the report -- you selected a Word/docx report.\")\n print(\"[+] Looking for the template.docx to be used for the Word report.\")\n if os.path.isfile(\"template.docx\"):\n print(\"[+] Template was found -- proceeding with report generation...\")\n print(\"L.. This may take a while if you provided a lot of \\\nIDs for a combined report or have a lot of targets.\")\n self.output_word_report = self._build_output_word_file_name()\n self.write_word_report()\n else:\n print(\"[!] Could not find the template document! Make sure \\\n'template.docx' is in the GoReport directory.\")\n sys.exit()\n elif self.report_format == \"quick\":\n print(\"[+] Quick report stats:\")\n self.get_quick_stats()",
"def oo_render(self, context=None):\n if context is None: context = {}\n self.log(\"Generating report: step 1...\")\n\n # Generate the stream from the template (Relatorio)\n data = self.generate(**context).render().getvalue()\n\n self.log(\"...step 1 done.\")\n\n #\n # Next steps need OpenOffice to perform some tasks\n # like file insertion or format conversions.\n # Using OpenOffice brings some overhead, so we will try to avoid\n # connecting to it unless it is necesary.\n #\n if len(self.oo_subreports)>0 or (self.output_format != self.source_format):\n self.log(\"Step 2....\")\n\n # Connect to OpenOffice\n oohelper = OOHelper(self.openoffice_port, self.autostart_openoffice, logger=self.log)\n\n #\n # Create a temporary (input for OpenOffice) file\n #\n dummy_fd, temp_file_name = tempfile.mkstemp(suffix=\".%s\" % self.source_format, prefix='openerp_oot_i_')\n temp_file = open(temp_file_name, 'wb')\n try:\n #\n # Write the data to the file\n #\n try:\n temp_file.write(data)\n finally:\n temp_file.close()\n\n # Reopen the file with OpenOffice\n document = oohelper.open_document(temp_file_name)\n\n #\n # Insert subreport files if needed\n #\n if len(self.oo_subreports)>0:\n self.log(\"Inserting subreport files\")\n for subreport in self.oo_subreports:\n placeholder_text = \"${insert_doc('%s')}\" % subreport\n oohelper.replace_text_with_file_contents(document, placeholder_text, subreport)\n # Remove the subreport temp file\n os.unlink(subreport)\n\n #\n # Save the file (does the format conversion) on a temp file\n #\n dummy_fd, output_file_name = tempfile.mkstemp(suffix=\".%s\" % self.output_format, prefix='openerp_oot_o_')\n try:\n # Save the document\n oohelper.save_document(document, output_file_name, close_document=True)\n\n #\n # Read back the data\n #\n output_file = open(output_file_name, 'rb')\n try:\n data = output_file.read()\n finally:\n output_file.close()\n finally:\n # Remove the temp file\n os.unlink(output_file_name)\n finally:\n # Remove the temp file\n os.unlink(temp_file_name)\n\n self.log(\"...step 2 done.\")\n\n\n # TODO: As a long term feature it would be nice to be able to tell OpenOffice to refresh/recalculate the Table Of Contents (if there is any)\n\n\n # Return the data (byte string)\n return data",
"def generate_report(request):\n report_merge_dict = report_content_util.get_report_merge_dict(request)\n # print(\"GLOBAL MERGE DICT:\", report_merge_dict)\n # for k, v in report_merge_dict.items():\n # print(\"{} : {}\".format(k, v))\n\n report_filepath = create_docx_document(report_merge_dict)\n\n return report_filepath",
"def generate(filename, title, body_text):\n styles = getSampleStyleSheet()\n report = SimpleDocTemplate(filename)\n report_title = Paragraph(title, styles[\"h1\"])\n report_info = Paragraph(body_text, styles[\"BodyText\"])\n empty_line = Spacer(1,20)\n report.build([report_title, empty_line, report_info, empty_line])",
"def _write_report(self, receipt_files):\n \n # Render the report\n \n template = Template(text=pkg_resources.resource_string(\n \"cass_check\", \"templates/report.mako\"))\n index_path = os.path.join(self.report_dir, \"index.html\")\n self.log.info(\"Writing report index to {index_path}\".format(\n index_path=index_path))\n with open(index_path, \"w\") as f:\n f.write(template.render(receipt_files=receipt_files))\n \n # copy assets\n for asset_name in pkg_resources.resource_listdir(\"cass_check\", \n \"assets/\"):\n \n res_name = \"assets/{asset_name}\".format(asset_name=asset_name)\n dest = os.path.join(self.report_dir, res_name)\n self.log.info(\"Copying report asset {asset_name} to \"\\\n \"{dest}\".format(asset_name=asset_name, dest=dest))\n \n with pkg_resources.resource_stream(\"cass_check\", res_name) as src:\n file_util.ensure_dir(os.path.dirname(dest))\n with open(dest, \"w\") as f:\n f.write(src.read())\n \n return index_path",
"def finalizeReport(self):\n self.writeReport(\"\"\"<hr /><div style=\"font-size:9pt;\">(C) This report has been generated by <a href=\"http://code.google.com/p/pytbull/\">pytbull</a>, Sebastien Damaye, aldeid.com</div>\"\"\")\n self.writeReport( \"</body></html>\" )\n self.finalReport.close()",
"def writeReport(self, content):\n self.finalReport.write( content )",
"def _dump_pdf(self) -> None:\n if shutil.which(\"latexmk\") is None and shutil.which(\"pdflatex\") is None:\n # No LaTeX Compiler is available\n self.doc.generate_tex(os.path.join(self.save_dir, self.report_name))\n suffix = '.tex'\n else:\n # Force a double-compile since some compilers will struggle with TOC generation\n self.doc.generate_pdf(os.path.join(self.save_dir, self.report_name), clean_tex=False, clean=False)\n self.doc.generate_pdf(os.path.join(self.save_dir, self.report_name), clean_tex=False)\n suffix = '.pdf'\n print(\"FastEstimator-TestReport: Report written to {}{}\".format(os.path.join(self.save_dir, self.report_name),\n suffix))",
"def generate_report_file(self):\n # Organize raw data into lines for HTML processing\n # All of self.users sorted\n sorteduserlist = sorted(self.users.itervalues(),\n key=lambda user: user.get_wasted_hours_percent(),\n reverse=True)\n\n # Generate report lines, with some cutoffs\n all_report_lines_gen = (\n (user.user,\n self.vo,\n NiceNum.niceNum(user.failure['CoreHours'], 1),\n NiceNum.niceNum(user.get_wasted_hours_percent(), 0.1),\n NiceNum.niceNum(user.total_CoreHours, 1),\n NiceNum.niceNum(user.failure['Njobs'], 1),\n NiceNum.niceNum(user.get_job_failure_percent(), 0.1),\n NiceNum.niceNum(user.total_Njobs, 1))\n for user in sorteduserlist\n # Cutoffs: Core hours and Wasted Hours Percent\n if user.total_CoreHours >= self.hours_cutoff\n and user.get_wasted_hours_percent() / 100. >= self.perc_cutoff\n )\n\n # Enforce cutoff for number of entries to include (self.numrank)\n top_lines_gen = ((count,) + line\n for count, line in enumerate(all_report_lines_gen, start=1)\n if count <= self.numrank\n )\n\n # Generate HTML for report\n\n # Column info in (column name, column alignment) form\n columns_setup = [('Rank', 'right'),\n ('User', 'left'),\n ('VO', 'left'),\n ('Hours Wasted', 'right'),\n ('% Hours Wasted of Total', 'right'),\n ('Total Used Wall Hours', 'right'),\n ('Total Jobs Failed', 'right'),\n ('% Jobs Failed', 'right'),\n ('Total Jobs Run', 'right')]\n table = ''\n\n # Generate table lines\n def tdalign(info, align):\n \"\"\"HTML generator to wrap a table cell with alignment\"\"\"\n return '<td align=\"{0}\">{1}</td>'.format(align, info)\n\n lineal = [elt[1] for elt in columns_setup]\n for line in top_lines_gen:\n if self.verbose:\n print line\n linemap = zip(line, lineal)\n table += '\\n<tr>' + ''.join((tdalign(info, al) for info, al in linemap)) + '</tr>'\n\n if len(table) == 0:\n self.logger.info('The report is empty. Will not send anything.')\n sys.exit(0)\n\n # Generate header HTML\n headernames = (elt[0] for elt in columns_setup)\n header = ''.join(('<th>{0}</th>'.format(elt) for elt in headernames))\n\n # Put it all into the template\n htmldict = dict(title=self.title, table=table, header=header)\n\n with open(self.template, 'r') as f:\n self.text = f.read()\n\n self.text = self.text.format(**htmldict)\n\n return",
"def generate_report(self) -> None:\n csv_data = self._run()\n self._write_csv(csv_data)",
"def make_book(self):\n\n self.doc.init()\n newpage = 0\n for rpt in self.rptlist:\n if newpage:\n self.doc.page_break()\n newpage = 1\n if rpt:\n rpt.begin_report()\n rpt.write_report()\n self.doc.close()\n \n if self.open_with_app.get_active():\n open_file_with_default_application(self.target_path)",
"def edit_report(self):\n\n\t\t#Instantiates the rules engine class as a checker object with a\n\t\t#LAR schema, a TS schema, and geographic geographic data.\n\t\tchecker = rules_engine(lar_schema=self.lar_schema_df,\n\t\t\tts_schema=self.ts_schema_df, geographic_data=self.geographic_data)\n\n\t\t#Seperates data from the filepath and filename into a TS dataframe\n\t\t#and a LAR dataframe.\n\t\tts_df, lar_df = utils.read_data_file(path=self.edit_report_config['data_filepath'],\n\t\t\tdata_file=self.edit_report_config['data_filename'])\n\n\t\t#Loads the TS and LAR dataframes into the checker object.\n\t\tchecker.load_data_frames(ts_df, lar_df)\n\n\t\t#Applies each function in the rules engine that checks for edits\n\t\t#and creates a results list of edits failed or passed.\n\t\tfor func in dir(checker):\n\t\t\tif func[:1] in (\"s\", \"v\", \"q\") and func[1:4].isdigit()==True:\n\t\t\t\tgetattr(checker, func)()\n\n\t\t#Creates a dataframe of results from the checker.\n\t\treport_df = pd.DataFrame(checker.results)\n\n\t\t#Writes the report to the filepath and name designated in\n\t\t#the test_fielpaths yaml\n\t\tedit_report_path = self.edit_report_config['edit_report_output_filepath']\n\n\t\tif not os.path.exists(edit_report_path):\n\t\t\tos.makedirs(edit_report_path)\n\n\t\treport_df.to_csv(edit_report_path +self.edit_report_config['edit_report_output_filename'])\n\n\t\t#Logs the result.\n\t\tlogging.info(\"Edit Report has been created in {filepath}\".format(\n\t\t\tfilepath=edit_report_path))",
"def generateReportAndLog(self, workspace_folder, output_file, log_file, report_file, report_title, name): \n rebot_payload = '%s/**/*.xml' % workspace_folder\n rebotCommand = \"rebot --outputdir %s --output %s --log %s --report %s --reporttitle '%s' --name '%s' %s\" % (workspace_folder, output_file, log_file, report_file, report_title, name, rebot_payload) \n print \"(PyroFactory) [generateReportAndLog]: Rebot Consolidation \" \n print \"(PyroFactory,generateReportAndLog) Starting the following rebot instance:-------> %s\" % rebotCommand \n rc = os.system(rebotCommand)\n return rc",
"def write_night_report_to_file(obsdate, txt, dirname='./logs/'):\n\n\n header = \"\"\"<html>\n<head><title>SALT Night Report for {0}</title></head>\n<body bgcolor=\"white\" text=\"black\" link=\"blue\" vlink=\"blue\">\\n\n \"\"\".format(obsdate)\n\n bottom=\"\"\"\\n<br><center> Updated: {0} </center>\n </body> \n </hmtl>\"\"\".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) \n html_txt = header+txt + bottom\n filename = 'night_report_{0}.html'.format(obsdate)\n\n with open(dirname+filename, 'w') as f:\n f.write(html_txt)",
"def write_report(stats, to):\n rendered_template = _render(stats)\n\n with open(to, \"wb\") as f:\n f.write(rendered_template.encode(\"utf-8\"))",
"def generate(self, *args, **kwargs):\n def _subreport(field=None, filename=None, source=None, filepath=None, source_format=None, encoding=None, context=None):\n \"\"\"\n Method that can be referenced from the template to include subreports.\n When called it will process the file as a template,\n write the generated data to a temp file, \n and return a reference (filename) to this output file for later usage.\n The OOTemplate will will use this data, after the main template\n is generated, to do an insertion pass using UNO.\n \"\"\"\n # Field is a binary field with a base64 encoded file that we will\n # use as source if it is specified\n source = field and base64.decodestring(field) or source\n\n #\n # Get the current report context so the subreport can see\n # the variables defined on the report.\n #\n if not context:\n context = {}\n try:\n frame = inspect.stack()[1][0]\n locals_context = frame.f_locals.copy()\n data_context = locals_context.get('__data__') or context\n if data_context and isinstance(data_context, genshi.template.base.Context):\n for c in data_context.frames:\n context.update(c)\n else:\n context = data_context\n except:\n self.log(\"Warning: Failed to get the context for the subreport from the stack frame!\")\n\n\n # Get the source_format from the file name:\n if not source_format and (filepath or filename):\n source_format = splitext(filepath or filename)[1][1:]\n source_format = source_format or self.source_format\n assert source_format\n\n #\n # Process the subreport file like a normal template\n # (we are recursive!)\n #\n self.log(\"Generating subreport (%s)...\" % source_format)\n subreport_template = OOTemplate(source=source,\n filepath=filepath,\n filename=filename,\n encoding=encoding,\n source_format=source_format,\n output_format=self.source_format,\n openoffice_port=self.openoffice_port,\n autostart_openoffice=self.autostart_openoffice,\n logger=self.log)\n data = subreport_template.oo_render(context)\n\n #\n # Save the subreport data to a temp file\n #\n dummy_fd, temp_file_name = tempfile.mkstemp(suffix=\".%s\" % source_format, prefix='openerp_oot_s_')\n temp_file = open(temp_file_name, 'wb')\n try:\n temp_file.write(data)\n finally:\n temp_file.close()\n\n #\n # Save a reference to this file for later usage\n #\n self.oo_subreports.append(temp_file_name)\n self.log(\"...subreport generated as %s.\" % temp_file_name)\n\n # Return a placeholder that will be replaced later,\n # on the insertion step, with the file contents:\n return \"${insert_doc('%s')}\" % temp_file_name\n\n # Add the include function to the report context\n kwargs['subreport'] = _subreport\n\n # Generate the template\n res = super(OOTemplate, self).generate(*args, **kwargs)\n\n return res",
"def export_as_text_file(forecast_report, new_file_path):\r\n\r\n with open(new_file_path, 'w+') as txt_file:\r\n txt_file.write(forecast_report)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Open RTF report in word in preview mode. | def previewOffice(self, rtf_filename):
try:
# Connect with Word
word_app = win32com.client.Dispatch('Word.Application')
# Hide
word_app.Visible = 0
# Open RTF
rep_tmpl_book = word_app.Documents.Open(rtf_filename)
# Show
word_app.Visible = 1
rep_tmpl_book.PrintPreview()
return True
except pythoncom.com_error:
log_func.fatal(u'Error preview report <%s>' % rtf_filename)
return False | [
"def openOffice(self, rtf_filename):\n try:\n # Connection with Word\n word_app = win32com.client.Dispatch('Word.Application')\n # Hide\n word_app.Visible = 0\n # Open RTF\n rep_tmpl_book = word_app.Open(rtf_filename)\n # Show\n word_app.Visible = 1\n return True\n except pythoncom.com_error:\n log_func.fatal(u'Error open report <%s>' % rtf_filename)\n return False",
"def printOffice(self, rtf_filename):\n try:\n # Connect with Word\n word_app = win32com.client.Dispatch('Word.Application')\n # Hide\n word_app.Visible = 0\n # Open RTF\n rep_tmpl_book = word_app.Documents.Open(rtf_filename)\n # Show\n word_app.Visible = 1\n \n rep_tmpl_book.PrintOut()\n return True\n except pythoncom.com_error:\n log_func.fatal(u'Error print report <%s>' % rtf_filename)\n return False",
"def printPreview(self):\n\t\tpdf_file = NamedTemporaryFile(suffix='.pdf',delete=False)\n\t\ttry:\n\t\t\tself.exportToPDF(pdf_file)\n\n\t\t\t\"\"\"\n\t\t\t#this clever pure-python implementation is probably not as portable as letting Qt handle it\n\t\t\t#though I'm not sure\n\n\t\t\t#get appropriate \"document opener\" program for the platform\n\t\t\tif 'linux' in sys.platform:\n\t\t\t\tprog_name = 'xdg-open'\n\t\t\telif sys.platform == 'darwin': #Mac OS X\n\t\t\t\tprog_name = 'open'\n\t\t\telif sys.platform == 'win32': #64 bit windows is still \"win32\"\n\t\t\t\tprog_name = 'start'\n\t\t\telse:\n\t\t\t\traise NotImplemented('Your Platform (%s) does not support the print preview feature,'\\\n\t\t\t\t\t'Export to PDF instead, please report this error' % sys.platform)\n\t\t\tsubprocess.check_call([prog_name, pdf_file.name])\n\t\t\t\"\"\"\n\n\t\t\tQDesktopServices.openUrl(QUrl.fromLocalFile(pdf_file.name))\n\t\tfinally:\n\t\t\tpdf_file.close()\n\t\t\t#FIXME? since delete=False has been passed, this file will not be deleted when closed (deisreable, since we're\n\t\t\t#handing it off to the PDF viewer, but still leaks the temporary file, I think this is acceptable)",
"def preview_resource_page(self):\n\n self.resource_preview.click()",
"def preview(record):\n pass",
"def on_preview(self, operation, preview, context, parent):\n operation.run(Gtk.PrintOperationAction.PREVIEW, None)\n return False",
"def setup_document(document_name=\"fSCAD-Preview\"):\n preview_doc = None\n saved_camera = None\n for document in app().documents:\n if document.name == document_name:\n preview_doc = document\n break\n if preview_doc is not None:\n preview_doc.activate()\n saved_camera = app().activeViewport.camera\n preview_doc.close(False)\n\n preview_doc = app().documents.add(adsk.core.DocumentTypes.FusionDesignDocumentType)\n preview_doc.name = document_name\n preview_doc.activate()\n if saved_camera is not None:\n is_smooth_transition_bak = saved_camera.isSmoothTransition\n saved_camera.isSmoothTransition = False\n app().activeViewport.camera = saved_camera\n saved_camera.isSmoothTransition = is_smooth_transition_bak\n app().activeViewport.camera = saved_camera\n design().designType = adsk.fusion.DesignTypes.DirectDesignType",
"def begin_preview(self, output_dir: str):",
"def render(view=False, preview=False):",
"def start_preview_stream(self) -> GoProResp:",
"def __startTRPreviewer(self):\n self.__startProc(\"eric6_trpreviewer.py\")",
"def launch_word():\r\n global WORD\r\n global TXT_FORMAT\r\n WORD = win32.gencache.EnsureDispatch('Word.Application')\r\n WORD.Visible = True\r\n TXT_FORMAT = win32.constants.wdFormatText\r\n WORD.DisplayAlerts = win32.constants.wdAlertsNone",
"def get_preview_file(self):\n import warnings\n warnings.warn(\"Documents.get_preview_file is deprecated. \"\n \"Use GetPreviewFile instead.\",\n DeprecationWarning,\n stacklevel=2)\n return self.GetPreviewFile()",
"def preview_capture_example():",
"def can_preview(file):\n return True",
"def _genRTFReport(self, report):\n if report is None:\n report = self._report_template\n data_rep = self.generateReport(report)\n if data_rep:\n rep_file_name = os.path.join(self.getReportDir(), '%s_report_result.rtf' % data_rep['name'])\n template_file_name = os.path.abspath(data_rep['generator'])\n log_func.info(u'Save report <%s> to file <%s>' % (template_file_name, rep_file_name))\n \n data = self._prevGenerateAllVariables(data_rep['__data__'])\n rtf_report.genRTFReport(data, rep_file_name, template_file_name)\n return rep_file_name\n return None",
"def _open_interactive_document_page(self, cik_num, acc_num):\n url = self.url_base + \\\n \"/cgi-bin/viewer?action=view&cik={}&accession_number={}&xbrl_type=v\".\\\n format(str(int(cik_num)), acc_num)\n\n self._open_url_with_retry(url)",
"def build_text_preview(\n self,\n file_path: str,\n preview_name: str,\n cache_path: str,\n page_id: int = 0,\n extension: str = \".txt\",\n ) -> None:\n raise UnavailablePreviewType()",
"def open_document(self, file_name):\n import uno\n file_url = uno.systemPathToFileUrl(abspath(file_name))\n\n if os.environ.get('OSTYPE', False) == 'FreeBSD':\n # Workaround a problemas con OpenOffice 3.1 en FreeBSD\n file_url = file_url.encode('UTF-8')\n\n load_properties = { \"Hidden\": True }\n file_ext = splitext(file_name)[1]\n file_ext = file_ext and file_ext[1:].lower() or None\n if self.IMPORT_FILTER_MAP.has_key(file_ext):\n load_properties.update(self.IMPORT_FILTER_MAP[file_ext])\n\n try:\n document = self.desktop.loadComponentFromURL(file_url, \"_blank\", 0, self.make_properties(load_properties))\n except Exception, ex:\n raise OOHelperException(_(\"Error loading file %s with OpenOffice: %s\") % (file_name, ex))\n try:\n document.refresh()\n except AttributeError:\n #print \"Warning: Ignoring AttributeError on document refresh\"\n pass\n\n return document"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Print RTF report by word. | def printOffice(self, rtf_filename):
try:
# Connect with Word
word_app = win32com.client.Dispatch('Word.Application')
# Hide
word_app.Visible = 0
# Open RTF
rep_tmpl_book = word_app.Documents.Open(rtf_filename)
# Show
word_app.Visible = 1
rep_tmpl_book.PrintOut()
return True
except pythoncom.com_error:
log_func.fatal(u'Error print report <%s>' % rtf_filename)
return False | [
"def previewOffice(self, rtf_filename):\n try:\n # Connect with Word\n word_app = win32com.client.Dispatch('Word.Application')\n # Hide\n word_app.Visible = 0\n # Open RTF\n rep_tmpl_book = word_app.Documents.Open(rtf_filename)\n # Show\n word_app.Visible = 1\n \n rep_tmpl_book.PrintPreview()\n return True\n except pythoncom.com_error:\n log_func.fatal(u'Error preview report <%s>' % rtf_filename)\n return False",
"def report_print ( report , title = '' , prefix = '' , more_rows = [] ) :\n table = report_as_table ( report ) \n return report_print_table ( table , title, prefix , more_rows )",
"def _genRTFReport(self, report):\n if report is None:\n report = self._report_template\n data_rep = self.generateReport(report)\n if data_rep:\n rep_file_name = os.path.join(self.getReportDir(), '%s_report_result.rtf' % data_rep['name'])\n template_file_name = os.path.abspath(data_rep['generator'])\n log_func.info(u'Save report <%s> to file <%s>' % (template_file_name, rep_file_name))\n \n data = self._prevGenerateAllVariables(data_rep['__data__'])\n rtf_report.genRTFReport(data, rep_file_name, template_file_name)\n return rep_file_name\n return None",
"def printDoc(doc):\n\tprint \" \".join(doc)",
"def cmd_draw(mpt):\n\n #word = mpt\n #subtree = transformations.nested_list(word)\n _dfs_print(mpt.root)",
"def print_on_paper(self):\n \n printer = Qsci.QsciPrinter()\n print_dialog = QPrintDialog(printer)\n print_dialog.setWindowTitle(self.tr(\"Print Document\"))\n print_dialog.addEnabledOption(QPrintDialog.PrintSelection)\n if print_dialog.exec_() == QPrintDialog.Accepted:\n printer.printRange(self)",
"def do_print_report(self, arg):\n print(self.last_fit_report)",
"def print_page(self, page, raw_text=False):\r\n\r\n for line in page['lines']:\r\n print(f\"{line['identifier'].rjust(8)} {self.cleaner.auto_spacer(line)}\")\r\n \r\n if raw_text:\r\n print(f\"{''.rjust(8)} {line['raw_data']}\")",
"def show_report(report):\n print()\n for line in report:\n print(line)\n print()",
"def print_report():\n print_days_percent_errors()\n print \"\"\n print_popular_authors()\n print \"\"\n print_popular_articles()\n print \"\"",
"def launch_word():\r\n global WORD\r\n global TXT_FORMAT\r\n WORD = win32.gencache.EnsureDispatch('Word.Application')\r\n WORD.Visible = True\r\n TXT_FORMAT = win32.constants.wdFormatText\r\n WORD.DisplayAlerts = win32.constants.wdAlertsNone",
"def w(text=''):\n if printing:\n print(text)\n else:\n _handle.write(text + '\\n')",
"def invoice_print(self):\n self.filtered(lambda inv: not inv.sent).write({'sent': True})\n if self.shop_id.format_print == 'ticket':\n return self.env.ref('odoope_einvoice_ose.account_einvoice_report_ticket').report_action(self)\n else:\n return self.env.ref('odoope_einvoice_ose.account_einvoice_report').report_action(self)",
"def print_report():\n print()\n title = ['Donor Name', '| Total Given ', '| Num Gifts',\n ' | Average Gift']\n print('{:<20}{:>14}{:^14}{:>14}'.format(title[0], title[1],\n title[2], title[3]))\n print('-'*65)\n print()\n # Creating list to hold donors info for printing\n donor_list = list()\n for donor in donors_list_dictionary:\n donor_fullname = \"{firstname} {lastname}\".format(**donor)\n # donor object will hold fullname, donation total, donation times, average donation\n donor_info = [donor_fullname, 0, 0, 0]\n for donor_amount in donor[\"donations\"]:\n donor_info[1] += donor_amount\n donor_info[2] += 1\n donor_info[3] = donor_info[1] // donor_info[2]\n donor_list.append(donor_info)\n\n print('{:<22}{}{:>12.2f}{:>10}{:>8}{:>12.2f}'.format(donor_fullname, '$',\n donor_info[1], donor_info[2], '$', donor_info[3]))\n print()",
"def print(self):\n for lang, df in self.ngrams.items():\n print('\\nNgram for the {} language'.format(lang))\n print('{}\\n'.format(df))",
"def print_bibtexsearch(term: str, request: Request):\n refs = mybib.searchReferences(term)\n output = \"\"\n for bibid in refs:\n output = (\n output\n + mybib.getBibtexEntry(\n bibid, newlinestr=\"<br>\", exported_keys=exported_bibkeys\n )\n + \"<br>\"\n )\n return output",
"def printSticker(text, fileName):\n myFile = open(\"%s.md\" % (fileName), \"w\")\n myFile.write(text)\n myFile.close()\n callShell(\"pandoc -V geometry:paperwidth=8.8cm -V geometry:paperheight=5cm -s %s.md -o %s.pdf\" % (fileName, fileName))\n callShell('lp -d MeterLabel -o landscape ' + fileName + '.pdf')",
"def print_report(self):\n print self.__report_str()",
"def print(self):\n if os.environ.get('TERM_PROGRAM', None) == 'iTerm.app':\n _print_thumbs(self.thumb_summaries)\n\n for summary in self.err_summaries:\n print(f'ERRORS for {summary.virtpath}')\n for err in summary.result.errors:\n print(f'\\t{err}')\n print()\n\n stats = [('Group', 'Files', 'Size', '% Files', '% Size')]\n stats.extend((g.name,\n g.count,\n g.size,\n g.count_pct,\n g.size_pct) for g in self.groups)\n table = AsciiTable(stats)\n for i in range(1, 5):\n table.justify_columns[i] = 'right'\n print(table.table)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert generate report to Office. | def convert(self, report=None, to_xls_filename=None, *args, **kwargs):
pass | [
"def oo_render(self, context=None):\n if context is None: context = {}\n self.log(\"Generating report: step 1...\")\n\n # Generate the stream from the template (Relatorio)\n data = self.generate(**context).render().getvalue()\n\n self.log(\"...step 1 done.\")\n\n #\n # Next steps need OpenOffice to perform some tasks\n # like file insertion or format conversions.\n # Using OpenOffice brings some overhead, so we will try to avoid\n # connecting to it unless it is necesary.\n #\n if len(self.oo_subreports)>0 or (self.output_format != self.source_format):\n self.log(\"Step 2....\")\n\n # Connect to OpenOffice\n oohelper = OOHelper(self.openoffice_port, self.autostart_openoffice, logger=self.log)\n\n #\n # Create a temporary (input for OpenOffice) file\n #\n dummy_fd, temp_file_name = tempfile.mkstemp(suffix=\".%s\" % self.source_format, prefix='openerp_oot_i_')\n temp_file = open(temp_file_name, 'wb')\n try:\n #\n # Write the data to the file\n #\n try:\n temp_file.write(data)\n finally:\n temp_file.close()\n\n # Reopen the file with OpenOffice\n document = oohelper.open_document(temp_file_name)\n\n #\n # Insert subreport files if needed\n #\n if len(self.oo_subreports)>0:\n self.log(\"Inserting subreport files\")\n for subreport in self.oo_subreports:\n placeholder_text = \"${insert_doc('%s')}\" % subreport\n oohelper.replace_text_with_file_contents(document, placeholder_text, subreport)\n # Remove the subreport temp file\n os.unlink(subreport)\n\n #\n # Save the file (does the format conversion) on a temp file\n #\n dummy_fd, output_file_name = tempfile.mkstemp(suffix=\".%s\" % self.output_format, prefix='openerp_oot_o_')\n try:\n # Save the document\n oohelper.save_document(document, output_file_name, close_document=True)\n\n #\n # Read back the data\n #\n output_file = open(output_file_name, 'rb')\n try:\n data = output_file.read()\n finally:\n output_file.close()\n finally:\n # Remove the temp file\n os.unlink(output_file_name)\n finally:\n # Remove the temp file\n os.unlink(temp_file_name)\n\n self.log(\"...step 2 done.\")\n\n\n # TODO: As a long term feature it would be nice to be able to tell OpenOffice to refresh/recalculate the Table Of Contents (if there is any)\n\n\n # Return the data (byte string)\n return data",
"def _make_xlsx(self, report_type, report_data, report_day):\r\n filename = \"{}_{}.xlsx\".format(report_type, report_day)\r\n path = os.path.join(self.report_path, filename)\r\n logger.debug(\"Reporter: creating {}\".format(filename))\r\n\r\n workbook = xlsxwriter.Workbook(path)\r\n\r\n cell_format = {\r\n \"hat\": workbook.add_format({\r\n 'bold': True,\r\n 'text_wrap': True,\r\n 'font_size': 14,\r\n }),\r\n \"header\": workbook.add_format({\r\n 'bold': True,\r\n 'align': 'center',\r\n 'valign': 'vcenter',\r\n 'text_wrap': True,\r\n 'border': 1,\r\n }),\r\n \"text\": workbook.add_format({\r\n 'valign': 'vcenter',\r\n 'align': 'center',\r\n 'border': 1,\r\n }),\r\n \"black\": workbook.add_format({\r\n 'font_color': 'black',\r\n }),\r\n \"grey\": workbook.add_format({\r\n 'font_color': '#b2b2b2',\r\n }),\r\n \"date\": workbook.add_format({\r\n 'num_format': 'yyyy.mm.dd',\r\n 'align': 'center',\r\n 'valign': 'vcenter',\r\n 'border': 1,\r\n }),\r\n \"time\": workbook.add_format({\r\n 'num_format': 'hh:mm:ss',\r\n 'align': 'center',\r\n 'valign': 'vcenter',\r\n 'border': 1,\r\n }),\r\n }\r\n\r\n for day in sorted(report_data.keys()):\r\n data = report_data[day]\r\n\r\n worksheet = workbook.add_worksheet(day)\r\n worksheet.set_column(0, 0, 5)\r\n worksheet.set_column(1, 2, 12)\r\n worksheet.set_column(3, 3, 25)\r\n worksheet.set_column(4, 5, 15)\r\n\r\n if data:\r\n worksheet.merge_range(0, 0, 0, 5,\r\n u'Количество записей в отчете: {}'.format(len(data)),\r\n cell_format[\"hat\"]\r\n )\r\n\r\n worksheet.write(1, 0, u\"№\", cell_format[\"header\"])\r\n worksheet.write(1, 1, u\"Дата\", cell_format[\"header\"])\r\n worksheet.write(1, 2, u\"Время\", cell_format[\"header\"])\r\n worksheet.write(1, 3, u\"Название канала\", cell_format[\"header\"])\r\n worksheet.write(1, 4, u\"Название зоны\", cell_format[\"header\"])\r\n worksheet.write(1, 5, u\"Кол-во людей\", cell_format[\"header\"])\r\n\r\n else:\r\n worksheet.merge_range(0, 0, 0, 5,\r\n u'Днные отсутствуют',\r\n cell_format[\"hat\"])\r\n\r\n for idx, row in enumerate(data, 2):\r\n\r\n dt = self.ts_to_datetime(row[-1])\r\n\r\n worksheet.write(idx, 0, idx - 1, cell_format['text'])\r\n worksheet.write(idx, 1, dt, cell_format['date'])\r\n worksheet.write(idx, 2, dt, cell_format['time'])\r\n worksheet.write(idx, 3, row[1], cell_format['text'])\r\n worksheet.write(idx, 4, row[2], cell_format['text'])\r\n worksheet.write(idx, 5, row[3], cell_format['text'])\r\n\r\n workbook.close()\r\n\r\n return path",
"def print_wo_xlsx_report(self):\n for vehicle in self:\n wo_obj = self.env[\"fleet.vehicle.log.services\"]\n records = wo_obj.search([])\n if vehicle.vehicle_ids:\n records = wo_obj.search([(\"vehicle_id\", \"in\", vehicle.vehicle_ids.ids)])\n if vehicle.select_report == \"wo_month_sum_rep\":\n wo_obj = self.env[\n \"report.fleet_operations.workorder.monthly.summary.xls\"\n ]\n file = wo_obj.generate_xlsx_report(records)\n vehicle.write(\n {\"name\": \"WorkOrder Monthly Summary Report.xls\", \"file\": file}\n )\n return {\n \"view_type\": \"form\",\n \"view_mode\": \"form\",\n \"res_model\": \"work.order.reports\",\n \"type\": \"ir.actions.act_window\",\n \"target\": \"new\",\n \"res_id\": vehicle.id,\n }",
"def generate_report(request):\n report_merge_dict = report_content_util.get_report_merge_dict(request)\n # print(\"GLOBAL MERGE DICT:\", report_merge_dict)\n # for k, v in report_merge_dict.items():\n # print(\"{} : {}\".format(k, v))\n\n report_filepath = create_docx_document(report_merge_dict)\n\n return report_filepath",
"def generate(self):\n self.result = self.generate_pivot_table()\n self.result.to_excel(os.getcwd()+\"/\"+self.output_file_name+\".xlsx\",index_label=self.row)\n return None",
"def echo_excel_report_template(save_to_file):\n report = reports.discovery.ExcelReport(file_name=save_to_file)\n echo_report_tpl_command = commands.EchoReportTemplateCommand(report=report)\n echo_report_tpl_command.execute()",
"def _export_to_xls(self):\n # cellstyle = xlwt.easyxf(\n # 'align: wrap on, vert top, horiz left;', num_format_str='general'\n # )\n\n # response = HttpResponse(mimetype=\"application/csv\")\n response = self._get_initial_response(mimetype=\"application/csv\")\n response['Content-Disposition'] = \\\n 'attachment; filename=db_store_export_data.xls'\n wb = xlwt.Workbook(encoding=\"UTF-8\")\n ws = wb.add_sheet('Data')\n\n algn1 = xlwt.Alignment()\n algn1.wrap = 1\n style1 = xlwt.XFStyle()\n style1.alignment = algn1\n\n row = 0\n\n data_headers = self._get_data_headers()\n data_keys = data_headers.keys()\n data_values = data_headers.values()\n\n for cell, value in enumerate(data_values):\n ws.write(row, cell, text_type(value), xlwt.easyxf('font: bold on'))\n ws.col(cell).width = 256 * 20 # about 20 chars wide\n cell += 1\n row += 1\n\n for obj in self.queryset:\n data = json.loads(obj.saved_data)\n for cell, key in enumerate(data_keys):\n ws.write(row, cell, text_type(data.get(key, '')))\n cell += 1\n\n row += 1\n\n wb.save(response)\n return response",
"def docgen_export_to_xlsx(data, template_path, report_name):\n\n token = get_docgen_token()\n auth_header = f\"Bearer {token}\"\n\n template_data = open(\n template_path, \"rb\").read()\n base64_encoded = base64.b64encode(template_data).decode(\"UTF-8\")\n\n body = DocGenRequest(\n data=data,\n options=DocGenOptions(\n reportName=report_name,\n ).dict(),\n template=DocGenTemplateFile(\n encodingType=\"base64\",\n content=base64_encoded,\n fileType=\"xlsx\"\n ).dict()\n )\n\n logger.info('making POST request to common docgen: %s',\n config.COMMON_DOCGEN_ENDPOINT)\n\n try:\n res = requests.post(config.COMMON_DOCGEN_ENDPOINT, json=body.dict(), headers={\n \"Authorization\": auth_header, \"Content-Type\": \"application/json\"})\n res.raise_for_status()\n except requests.exceptions.HTTPError as e:\n logger.info(e)\n raise HTTPException(status_code=e.response.status_code, detail=str(e))\n\n return res.content",
"def make_book(self):\n\n self.doc.init()\n newpage = 0\n for rpt in self.rptlist:\n if newpage:\n self.doc.page_break()\n newpage = 1\n if rpt:\n rpt.begin_report()\n rpt.write_report()\n self.doc.close()\n \n if self.open_with_app.get_active():\n open_file_with_default_application(self.target_path)",
"def generate_report():",
"def testRunGenerateMethodConvertOdsToMsXslx(self):\n generate_result = self.proxy.run_generate('test.ods',\n encodestring(\n open(join('data', 'test.ods')).read()),\n None, 'ms.xlsx',\n \"application/vnd.oasis.opendocument.spreadsheet\")\n response_code, response_dict, response_message = generate_result\n self.assertEquals(response_code, 200)\n self.assertEquals(type(response_dict), DictType)\n self.assertNotEquals(response_dict['data'], '')\n self.assertEquals(response_dict['mime'], 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')",
"def generate_xlsx_report(self, workbook, data, parts):\n # add the worksheet\n worksheet = workbook.add_worksheet(\"product\")\n worksheet.set_column(0, 0, 10)\n worksheet.set_column(1, 1, 15)\n worksheet.set_column(2, 2, 10)\n worksheet.set_column(3, 3, 10)\n worksheet.set_column(4, 4, 9)\n worksheet.set_column(5, 5, 12)\n worksheet.set_column(6, 6, 10)\n worksheet.set_column(7, 7, 15)\n worksheet.set_column(8, 8, 10)\n worksheet.set_column(9, 9, 9)\n worksheet.set_column(10, 10, 9)\n worksheet.set_column(11, 11, 18)\n worksheet.set_column(12, 12, 15)\n worksheet.set_column(13, 13, 12)\n worksheet.set_column(14, 14, 12)\n worksheet.set_column(15, 15, 12)\n bold = workbook.add_format(\n {\"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n tot = workbook.add_format(\n {\"border\": 2, \"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n tot.set_bg_color(\"gray\")\n row = 0\n for pr in parts:\n row += 1\n row += 1\n worksheet.write(row, 3, \" General Parts Listing \", bold)\n row += 3\n worksheet.write(row, 0, \"No.\", tot)\n worksheet.write(row, 1, \"Part No:\", tot)\n worksheet.write(row, 2, \"Part Name\", tot)\n worksheet.write(row, 3, \"Vehicle Make\", tot)\n worksheet.write(row, 4, \"Location \", tot)\n worksheet.write(row, 5, \"Unit Type\", tot)\n worksheet.write(row, 6, \"Qty \", tot)\n worksheet.write(row, 7, \"Incomming \", tot)\n worksheet.write(row, 8, \"Outgoing\", tot)\n worksheet.write(row, 9, \"Ending Balance\", tot)\n worksheet.write(row, 10, \"Reorder point\", tot)\n worksheet.write(row, 11, \"Reorder Qty\", tot)\n row += 2\n counter = 1\n for line in pr:\n worksheet.write(row, 0, counter, bold)\n worksheet.write(row, 1, line.default_code or \"\")\n worksheet.write(row, 2, line.name or \"\")\n worksheet.write(\n row, 3, line.vehicle_make_id and line.vehicle_make_id.name or \"\"\n )\n worksheet.write(row, 4, \"Location\")\n worksheet.write(row, 5, line.uom_id and line.uom_id.name or \"\")\n worksheet.write(row, 6, line.qty_available or 0.0)\n worksheet.write(row, 7, line.incoming_qty or 0.0)\n worksheet.write(row, 8, line.outgoing_qty or 0.0)\n worksheet.write(row, 9, line.virtual_available or 0.0)\n worksheet.write(row, 10, line.re_order_point or 0.0)\n worksheet.write(row, 11, line.re_order_qty or 0.0)\n counter += 1\n row += 8",
"def testRunGenerateMethodConvertOdsToHTML(self):\n generate_result = self.proxy.run_generate('test.ods',\n encodestring(\n open(join('data', 'test.ods')).read()),\n None, 'html',\n \"application/vnd.oasis.opendocument.spreadsheet\")\n response_code, response_dict, response_message = generate_result\n self.assertEquals(response_code, 200)\n self.assertEquals(type(response_dict), DictType)\n self.assertNotEquals(response_dict['data'], '')\n self.assertEquals(response_dict['mime'], 'application/zip')\n output_url = join(self.tmp_url, \"zip.zip\")\n open(output_url, 'w').write(decodestring(response_dict['data']))\n self.assertTrue(is_zipfile(output_url))\n filename_list = [file.filename for file in ZipFile(output_url).filelist]\n for filename in filename_list:\n if filename.endswith(\"impr.html\"):\n break\n else:\n self.fail(\"Not exists one file with 'impr.html' format\")\n if exists(output_url):\n remove(output_url)",
"def write(self) -> None:\n loan_details = self.loan\n headers = self.header\n\n home_path = os.path.expanduser(\"~\")\n filename = \"loan.xlsx\"\n file_path = os.path.join(home_path, \"Downloads\", filename)\n\n with xlsxwriter.Workbook(file_path) as workbook:\n ws = workbook.add_worksheet()\n bold_font = workbook.add_format({\"bold\": True})\n date_format = workbook.add_format({\"num_format\": \"DD.MM.YYYY\"})\n money_format = workbook.add_format({\"num_format\": \"#,##0.00\"})\n\n # Iterate over the headers and write it out column by column\n for col, header in enumerate(headers):\n ws.write_string(0, col, header, bold_font)\n\n # Iterate over the data and write it out row by row\n for row, loan in enumerate(loan_details, start=1):\n ws.write(row, 0, loan.date, date_format)\n ws.write(row, 1, loan.day)\n ws.write(row, 2, loan.principal, money_format)\n ws.write(row, 3, loan.interest, money_format)\n ws.write(row, 4, loan.payment, money_format)\n ws.write(row, 5, loan.balance, money_format)",
"def create_xls_obj(self):\n\n response = urllib2.urlopen(self.file_url)\n zipfile_name = tempfile.mkstemp()[1]\n zipfile = open(zipfile_name, 'w')\n zipfile.write(response.read())\n zipfile.close()\n\n zipfile = ZipFile(zipfile_name)\n xlsfile_name = tempfile.mkstemp()[1]\n xlsfile = open(xlsfile_name, 'w')\n fromzip_file = zipfile.open(zipfile.infolist()[0])\n xlsfile.write(fromzip_file.read())\n xlsfile.close()\n zipfile.close()\n\n return open_workbook(xlsfile_name)",
"def create_docx_document(merge_dict):\n\n template_name = UNIVERSAL_TEMPLATE_NAME\n if not os.path.exists(OUTPUT_REPORTS_DIR):\n try:\n os.mkdir(OUTPUT_REPORTS_DIR)\n except OSError:\n print(\"Creation of the directory %s failed\" % OUTPUT_REPORTS_DIR)\n\n template_path = os.path.join(REPORT_TEMPLATES_DIR, template_name)\n document = MailMerge(template_path)\n # print(\"Merge fields at {} document: {}\".format(template_name, document.get_merge_fields()))\n document.merge(**merge_dict)\n now = datetime.now()\n merged_file_path = os.path.join(OUTPUT_REPORTS_DIR, now.strftime(\"%d %H_%M_%S\") + '_report.docx')\n document.write(merged_file_path)\n return merged_file_path",
"def generate_pending_repairs_xlsx_report(self, res, fleet_pending):\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet(\"fleet_pending\")\n worksheet.col(0).width = 6000\n worksheet.col(1).width = 6000\n worksheet.col(2).width = 7500\n worksheet.col(3).width = 12500\n worksheet.col(4).width = 5500\n worksheet.col(5).width = 6000\n worksheet.col(6).width = 7500\n worksheet.col(7).width = 5000\n worksheet.col(8).width = 2500\n font = xlwt.Font()\n # borders = xlwt.Borders()\n font.bold = True\n font.name = \"Arial\"\n font.height = 200\n # pattern = xlwt.Pattern()\n tot = xlwt.easyxf(\"font: bold 1; font: name 1; font: height 200\")\n style1 = xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200\", num_format_str=\"DD/MM/YYYY\"\n )\n # border = xlwt.easyxf('font: name 1; font: height 200')\n format1 = xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200;\\\n pattern: pattern solid, fore_colour yellow;\"\n )\n\n row = 0\n row += 1\n worksheet.write(row, 2, \"Fleet With Pending Repairs\", format1)\n row += 2\n for obj in fleet_pending:\n if obj.pending_repair_type_ids:\n row += 3\n worksheet.write(row, 0, \"Vehicle Information :\", format1)\n row += 2\n worksheet.write(row, 2, \"Kilometer :\", format1)\n worksheet.write(row, 3, obj.odometer or \"\", tot)\n row += 1\n worksheet.write(row, 2, \"Vehicle ID :\", format1)\n worksheet.write(row, 3, obj.name or \"\", tot)\n row += 1\n worksheet.write(row, 2, \"Type :\", format1)\n worksheet.write(\n row,\n 3,\n obj.vechical_type_id and obj.vechical_type_id.name or \"\",\n tot,\n )\n row += 1\n worksheet.write(row, 2, \"VIN :\", format1)\n worksheet.write(row, 3, obj.vin_sn or \"\", tot)\n row += 1\n worksheet.write(row, 2, \"Color :\", format1)\n worksheet.write(\n row,\n 3,\n obj.vehical_color_id and obj.vehical_color_id.name or \"\",\n tot,\n )\n row += 1\n worksheet.write(row, 2, \"Driver :\", format1)\n worksheet.write(row, 3, obj.driver_id and obj.driver_id.name or \"\", tot)\n row += 1\n worksheet.write(row, 2, \"Driver Contact :\", format1)\n worksheet.write(row, 3, obj.driver_contact_no or \"\", tot)\n row += 4\n worksheet.write(row, 0, \"Repair Types :\", format1)\n row += 2\n worksheet.write(row, 1, \"No. :\", format1)\n worksheet.write(row, 2, \"Ref. WO# :\", format1)\n worksheet.write(row, 3, \"Repair Type :\", format1)\n worksheet.write(row, 4, \"Category :\", format1)\n worksheet.write(row, 5, \"Actual Date Issued :\", format1)\n row += 1\n counter = 1\n for line in obj.pending_repair_type_ids:\n worksheet.write(row, 1, counter, tot)\n worksheet.write(row, 2, line.name or \"\", tot)\n worksheet.write(\n row,\n 3,\n line.repair_type_id and line.repair_type_id.name or \"\",\n tot,\n )\n worksheet.write(\n row, 4, line.categ_id and line.categ_id.name or \"\", tot\n )\n\n date = \"\"\n if line.issue_date:\n date = format_date(\n self.env,\n line.issue_date,\n self._context.get(\"lang\"),\n date_format=False,\n )\n worksheet.write(row, 5, date or \"\", style1)\n row += 1\n counter += 1\n row += 3\n worksheet.write(row, 0, \"**************************\")\n worksheet.write(row, 1, \"**************************\")\n worksheet.write(row, 2, \"**************************\")\n worksheet.write(row, 3, \"**************************\")\n worksheet.write(row, 4, \"**************************\")\n worksheet.write(row, 5, \"**************************\")\n worksheet.write(row, 6, \"**************************\")\n row += 1\n worksheet.write(row, 0, \"**************************\")\n worksheet.write(row, 1, \"**************************\")\n worksheet.write(row, 2, \"**************************\")\n worksheet.write(row, 3, \"**************************\")\n worksheet.write(row, 4, \"**************************\")\n worksheet.write(row, 5, \"**************************\")\n worksheet.write(row, 6, \"**************************\")\n fp = io.BytesIO()\n workbook.save(fp)\n fp.seek(0)\n data = fp.read()\n fp.close()\n res = base64.encodebytes(data)\n return res",
"def create_example_xl():\n if XL_FILE.exists(): # Don't need to recreate it\n return\n\n df = pd.DataFrame(\n {\n \"tracking\": [\"F12\", \"U23\", \"F34\", \"U45\"],\n \"invoice\": [\"I120\", \"I230\", \"I340\", \"I450\"],\n }\n )\n df.to_excel(XL_FILE, index=False)",
"def generate_report(self):\n if self.report_format == \"csv\":\n print(\"[+] Building the report -- you selected a csv report.\")\n self.output_csv_report = self._build_output_csv_file_name()\n self.write_csv_report()\n elif self.report_format == \"word\":\n print(\"[+] Building the report -- you selected a Word/docx report.\")\n print(\"[+] Looking for the template.docx to be used for the Word report.\")\n if os.path.isfile(\"template.docx\"):\n print(\"[+] Template was found -- proceeding with report generation...\")\n print(\"L.. This may take a while if you provided a lot of \\\nIDs for a combined report or have a lot of targets.\")\n self.output_word_report = self._build_output_word_file_name()\n self.write_word_report()\n else:\n print(\"[!] Could not find the template document! Make sure \\\n'template.docx' is in the GoReport directory.\")\n sys.exit()\n elif self.report_format == \"quick\":\n print(\"[+] Quick report stats:\")\n self.get_quick_stats()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Open RTF file in Word. | def openOffice(self, rtf_filename):
try:
# Connection with Word
word_app = win32com.client.Dispatch('Word.Application')
# Hide
word_app.Visible = 0
# Open RTF
rep_tmpl_book = word_app.Open(rtf_filename)
# Show
word_app.Visible = 1
return True
except pythoncom.com_error:
log_func.fatal(u'Error open report <%s>' % rtf_filename)
return False | [
"def previewOffice(self, rtf_filename):\n try:\n # Connect with Word\n word_app = win32com.client.Dispatch('Word.Application')\n # Hide\n word_app.Visible = 0\n # Open RTF\n rep_tmpl_book = word_app.Documents.Open(rtf_filename)\n # Show\n word_app.Visible = 1\n \n rep_tmpl_book.PrintPreview()\n return True\n except pythoncom.com_error:\n log_func.fatal(u'Error preview report <%s>' % rtf_filename)\n return False",
"def printOffice(self, rtf_filename):\n try:\n # Connect with Word\n word_app = win32com.client.Dispatch('Word.Application')\n # Hide\n word_app.Visible = 0\n # Open RTF\n rep_tmpl_book = word_app.Documents.Open(rtf_filename)\n # Show\n word_app.Visible = 1\n \n rep_tmpl_book.PrintOut()\n return True\n except pythoncom.com_error:\n log_func.fatal(u'Error print report <%s>' % rtf_filename)\n return False",
"def open_document(self, file_name):\n import uno\n file_url = uno.systemPathToFileUrl(abspath(file_name))\n\n if os.environ.get('OSTYPE', False) == 'FreeBSD':\n # Workaround a problemas con OpenOffice 3.1 en FreeBSD\n file_url = file_url.encode('UTF-8')\n\n load_properties = { \"Hidden\": True }\n file_ext = splitext(file_name)[1]\n file_ext = file_ext and file_ext[1:].lower() or None\n if self.IMPORT_FILTER_MAP.has_key(file_ext):\n load_properties.update(self.IMPORT_FILTER_MAP[file_ext])\n\n try:\n document = self.desktop.loadComponentFromURL(file_url, \"_blank\", 0, self.make_properties(load_properties))\n except Exception, ex:\n raise OOHelperException(_(\"Error loading file %s with OpenOffice: %s\") % (file_name, ex))\n try:\n document.refresh()\n except AttributeError:\n #print \"Warning: Ignoring AttributeError on document refresh\"\n pass\n\n return document",
"def read_txt(filename):\n with open(filename, \"r\") as f:\n text_read = f.read()\n docx_tf = TextFrame(text_read)\n return docx_tf",
"def launch_word():\r\n global WORD\r\n global TXT_FORMAT\r\n WORD = win32.gencache.EnsureDispatch('Word.Application')\r\n WORD.Visible = True\r\n TXT_FORMAT = win32.constants.wdFormatText\r\n WORD.DisplayAlerts = win32.constants.wdAlertsNone",
"def open_file():\r\n filepath = askopenfilename(\r\n filetypes=[(\"Text Files\", \"*.txt\"), (\"All Files\", \"*.*\")]\r\n )\r\n if not filepath:\r\n return\r\n txt_edit.delete(1.0, tk.END)\r\n with open(filepath, \"r\") as input_file:\r\n text = input_file.read()\r\n txt_edit.insert(tk.END, text)\r\n window3.title(f\"Text Editor Application - {filepath}\")",
"def open_file(self, text_file_name):\r\n\t\tself.text_file_name = text_file_name\r\n\t\tfile_obj = open(self.text_file_name)\r\n\t\tcontent = file_obj.read()\r\n\t\tfile_obj.close()\r\n\t\treturn content",
"def open(self):\n if len(self.docs) < self.maxDocs:\n self.filename = tkFileDialog.askopenfilename(filetypes=[(\"Python files\",\".py\"),(\"All files\",\"*\")], initialdir=self.lastDir)\n if self.filename:\n self.lastDir = os.path.dirname(self.filename)\n else:\n return\n if OS == 'ce': # Just passing filename fails...\n self.filename = self.filename.replace('/','\\\\')\n try:\n file = open(self.filename)\n self.newDoc(os.path.basename(self.filename))\n text = file.readlines()\n file.close()\n self.editor.put(text)\n self.editor.changed(reset=True)\n except IOError, info:\n tkMessageBox.showerror('Exception!',info)\n else:\n tkMessageBox.showerror('Too many files!', 'You have reached the open file limit.')",
"def open_text(filename, **kwargs):\n kwargs.setdefault(\"mode\", \"rt\")\n kwargs.setdefault(\"encoding\", TEXT_ENCODING)\n return io.open(str(py_path(filename)), **kwargs)",
"def open_file_in_editor(file_name):\n global user_configurations\n if True:\n #alternative_editors = ['gedit', 'leafpad', 'notepad', 'nano', ]\n command = user_configurations['EDITOR'].replace('%f', '\"%s\"' % file_name)\n try:\n subprocess.call([command], shell=True)\n except OSError:\n pass\n else:\n fulltext = \"\"\n with open(file_name, 'r') as f:\n fulltext = f.read()",
"def open_document(self, fileURL):\n self.interface.factory.not_implemented(\"DocumentApp.open_document()\")",
"def openWordlist(self, wordlistFileName: str):\n try:\n self.__wordlistFile = open('../input/'+wordlistFileName, 'r')\n except FileNotFoundError:\n oh.errorBox(\"File '\"+wordlistFileName+\"' not found. Did you put it in the correct directory?\")",
"def open(self,mode, ctx=None):\n\t\tf = self.fs.FileType(self,mode)\n\t\tf.open()\n\t\treturn f",
"def _winoffice(self):\n oid = oletools.oleid.OleID(self.src_path) # First assume a valid file\n if not olefile.isOleFile(self.src_path):\n # Manual processing, may already count as suspicious\n try:\n ole = olefile.OleFileIO(self.src_path, raise_defects=olefile.DEFECT_INCORRECT)\n except Exception:\n self.make_dangerous('Unparsable WinOffice file')\n if ole.parsing_issues:\n self.make_dangerous('Parsing issues with WinOffice file')\n else:\n if ole.exists('macros/vba') or ole.exists('Macros') \\\n or ole.exists('_VBA_PROJECT_CUR') or ole.exists('VBA'):\n self.make_dangerous('WinOffice file containing a macro')\n else:\n indicators = oid.check()\n for i in indicators:\n if i.id == 'ObjectPool' and i.value:\n self.make_dangerous('WinOffice file containing an object pool')\n elif i.id == 'flash' and i.value:\n self.make_dangerous('WinOffice file with embedded flash')\n elif i.id == 'encrypted' and i.value:\n self.make_dangerous('Encrypted WinOffice file')\n elif i.id == 'vba_macros' and i.value:\n self.make_dangerous('WinOffice file containing a macro')\n\n self.add_description('WinOffice file')",
"def openDoc(self, filepath):\n with open(filepath, 'r') as f:\n htmlData = f.read()\n self.soup = BeautifulSoup(htmlData, 'html.parser')",
"def open_text_viewer(text_file):\n linux_editors = {'gnome':'gedit', 'kde':'kate', \n 'xfce':'mousepad', 'generic':'nano'}\n\n if os.name == 'nt':\n executable = 'notepad.exe'\n elif os.name == 'posix':\n # Try to detect current deskop environment, and\n # then select the most probable text editor\n # for the platform.\n de = detect_desktop_environment()\n executable = linux_editors[de]\n else:\n raise ValueError(\"A text editor not defined for %s\" % os.name)\n\n # Launch the viewer\n os.system(executable + ' ' + text_file)",
"def open(filename):\r\n sc=Shortcut()\r\n sc.load(filename)\r\n return sc",
"def _winoffice(self):\n oid = oletools.oleid.OleID(self.src_path) # First assume a valid file\n if not olefile.isOleFile(self.src_path):\n # Manual processing, may already count as suspicious\n try:\n ole = olefile.OleFileIO(self.src_path, raise_defects=olefile.DEFECT_INCORRECT)\n except Exception:\n self.make_dangerous('Unparsable WinOffice file')\n if ole.parsing_issues:\n self.make_dangerous('Parsing issues with WinOffice file')\n else:\n if ole.exists('macros/vba') or ole.exists('Macros') \\\n or ole.exists('_VBA_PROJECT_CUR') or ole.exists('VBA'):\n self.make_dangerous('WinOffice file containing a macro')\n else:\n indicators = oid.check()\n # Encrypted can be set by multiple checks on the script\n if oid.encrypted.value:\n self.make_dangerous('Encrypted WinOffice file')\n if oid.macros.value or oid.ole.exists('macros/vba') or oid.ole.exists('Macros') \\\n or oid.ole.exists('_VBA_PROJECT_CUR') or oid.ole.exists('VBA'):\n self.make_dangerous('WinOffice file containing a macro')\n for i in indicators:\n if i.id == 'ObjectPool' and i.value:\n self.make_dangerous('WinOffice file containing an object pool')\n elif i.id == 'flash' and i.value:\n self.make_dangerous('WinOffice file with embedded flash')\n self.add_description('WinOffice file')",
"def parse(self, rtf_text, filename, file=None):\n parsed_text = self._remove_tags(self._clean_url_field(self._create_newlines(rtf_text)))\n date = self._find_date(parsed_text)\n time = self._find_time(parsed_text)\n if date is None:\n # print('no date')\n return\n else:\n try:\n filename = filename + date + '_' + time\n except TypeError:\n print('halt')\n write_file(parsed_text, filename, self.output_directory)\n self.files_output[file] += 1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A trace method to assist with the trace messages for the daemon in the production version. Simply check tracing boolean value, and print | def trace(message):
if tracing == True:
now = datetime.datetime.now()
date = now.strftime("%Y %m %d - %H:%M:%S")
trace_file.write('%r %s\n'%(date, message))
print date, 'sptlqry.py:', message | [
"async def request_enable_debug(self, ctx) -> str:\n self.cbf_ingest.enable_debug(True)\n return \"Debug logging enabled.\"",
"def extrae_tracing_is_enabled():\n global TRACING_ENABLED\n return TRACING_ENABLED",
"def trace(self, msg, *args, **kwargs):\n self.write(msg, level='TRACE', *args, **kwargs)",
"def trace(self, msg: str, *args: Any, **kwargs: Any) -> None:\n self.log(TRACE, msg, *args, **kwargs)",
"def trace_enabled(self):\n ret = self._get_attr(\"traceEnabled\")\n return ret",
"async def request_disable_debug(self, ctx) -> str:\n self.cbf_ingest.enable_debug(False)\n return \"Debug logging disabled.\"",
"def handle_trace(self, args=None):\n return self.session_handler.dispatch_event(\"trace\", args)",
"def dbtrace(off=bool, keyword=\"string\", verbose=bool, mark=bool, info=bool, title=\"string\", output=\"string\", timed=bool, filter=\"string\"):\n pass",
"def trace(self):\n assert self._trace, \"Must run sample() first!\"\n return self._trace",
"def trace(*args):\n print(*args)",
"def trace(self, message, farg=None):\n self.write(message, farg=farg, level=u\"TRACE\")",
"def test_tracing(self):\n self.cluster.populate(1)\n self.cluster.start()\n\n node1, = self.cluster.nodelist()\n\n stdout, stderr = self.run_cqlsh(node1, cmds=\"\"\"\n CREATE KEYSPACE tracing_checks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};\n USE tracing_checks;\n CREATE TABLE test (id int, val text, PRIMARY KEY (id));\n INSERT INTO test (id, val) VALUES (1, 'adfad');\n INSERT INTO test (id, val) VALUES (2, 'lkjlk');\n INSERT INTO test (id, val) VALUES (3, 'iuiou')\"\"\")\n\n assert 0 == len(stderr), \"Failed to execute cqlsh: {}\".format(stderr)\n\n self.verify_output(\"use tracing_checks; tracing on; select * from test\", node1, \"\"\"Now Tracing is enabled\n\n id | val\n----+-------\n 1 | adfad\n 2 | lkjlk\n 3 | iuiou\n\n(3 rows)\n\nTracing session:\"\"\")",
"def setTFETraceFlag(trace: bool = False) -> None:\n global __TFE_TRACE__\n if trace is True:\n logger.info(\"Writing trace files for every session.run() call with a tag\")\n\n __TFE_TRACE__ = trace",
"def _debug_print(self, s):\n if self.conf.debug:\n print('Main: %s' % s)",
"def _log_trace(self, message, *args, **kwargs):\n if self.isEnabledFor(logging.TRACE):\n # This is the pattern to hook a custom log level into\n # the logger without declaring a derived class.\n # pylint:disable=protected-access\n self._log(logging.TRACE, message, args, **kwargs)",
"def _is_network_tracing_enabled():\n global TRACE_ENABLED\n return TRACE_ENABLED",
"def tracing_enabled(self):\n ret = self._get_attr(\"tracingEnabled\")\n return ret",
"def _debug(self, msg):\n if self.verbose:\n print(str(msg))",
"def cmd_debug(self):\r\n self.log.setLevel(logging.DEBUG)\r\n self.log.debug('Switching to DEBUG threshold')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Draw instancelevel prediction results on an image. | def draw_instance_predictions(self, predictions, track_ids):
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
# set the color according to the track ids
colors = [cm.tab20(id_) for id_ in track_ids]
alpha = 0.6
labels = [f'Track {id_} {label}' for label, id_ in zip(labels,track_ids)]
# increase font size
if self._default_font_size < 20: self._default_font_size *= 1.3
if self._instance_mode == ColorMode.IMAGE_BW:
assert predictions.has("pred_masks"), "ColorMode.IMAGE_BW requires segmentations"
self.output.img = self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
)
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output | [
"def display_prediction(image_path, model, topk):\n\n # Get predictions\n img, ps, classes, y_obs = predict(image_path, model, topk)\n # Convert results to dataframe for plotting\n result = pd.DataFrame({'p': ps}, index=classes)\n\n # Show the image\n plt.figure(figsize=(16, 5))\n ax = plt.subplot(1, 2, 1)\n ax, img = imshow_tensor(img, ax=ax)\n\n # Set title to be the actual class\n ax.set_title(y_obs, size=20)\n\n ax = plt.subplot(1, 2, 2)\n # Plot a bar plot of predictions\n result.sort_values('p')['p'].plot.barh(color='blue', edgecolor='k', ax=ax)\n plt.xlabel('Predicted Probability')\n plt.tight_layout()\n\n display_prediction(random_test_image(), model, topk=5)\n\n display_prediction(random_test_image(), model, topk=5)",
"def plot_prediction(self, *args):\n pass",
"def plot_prediction_overlay(tile: np.ndarray, prediction: np.ndarray):\n plt.figure()\n plt.imshow(tile)\n plt.show()",
"def visualize_patch_segmentation_predictions(self, X, y=None, threshold=0.5, num_predictions=3):\n\n # Choose random samples\n random_samples = np.random.randint(0, len(X), num_predictions)\n X_rand = X[random_samples]\n y_pred = self.model.predict(X_rand)\n\n # Number of rows and columns for the figure\n ncols = 2\n nrows = num_predictions\n if y is not None:\n ncols = 3\n y_rand = y[random_samples]\n fig, axes = plt.subplots(nrows, ncols)\n\n if num_predictions == 1:\n if X_rand.shape[3] == 1:\n axes[0].imshow(X_rand[0, :, :, 0], cmap='gray')\n else:\n axes[0].imshow(X_rand[0])\n axes[0].set_xticks([])\n axes[0].set_yticks([])\n\n axes[1].imshow(y_pred[0, :, :, 0] > threshold, cmap='gray')\n axes[1].set_xticks([])\n axes[1].set_yticks([])\n\n axes[0].set_title(\"Original Image\")\n axes[1].set_title(\"Predicted Mask\")\n\n if y is not None:\n axes[2].imshow(y_rand[0, :, :, 0], cmap='gray')\n axes[2].set_xticks([])\n axes[2].set_yticks([])\n axes[2].set_title(\"Ground Truth Mask\")\n else:\n for idx in range(num_predictions):\n if X_rand.shape[3] == 1:\n axes[idx, 0].imshow(X_rand[idx, :, :, 0], cmap='gray')\n else:\n axes[idx, 0].imshow(X_rand[idx])\n axes[idx, 0].set_xticks([])\n axes[idx, 0].set_yticks([])\n\n axes[idx, 1].imshow(y_pred[idx, :, :, 0] > threshold, cmap='gray')\n axes[idx, 1].set_xticks([])\n axes[idx, 1].set_yticks([])\n\n if idx == 0:\n axes[idx, 0].set_title(\"Original Image\")\n axes[idx, 1].set_title(\"Predicted Mask\")\n\n if y is not None:\n axes[idx, 2].imshow(y_rand[idx, :, :, 0], cmap='gray')\n axes[idx, 2].set_xticks([])\n axes[idx, 2].set_yticks([])\n if idx == 0:\n axes[idx, 2].set_title(\"Ground Truth Mask\")\n\n plt.show()",
"def plot_preds(image, preds):\n plt.imshow(image)\n plt.axis('off')\n\n plt.figure()\n labels = (\"jumping\", \"laying\", \"rolling\", \"sitting\", \"standing\")\n plt.barh([0, 1, 2, 3, 4], preds, alpha=0.5)\n plt.yticks([0, 1, 2, 3, 4], labels)\n plt.xlabel('Probability')\n plt.xlim(0,1.01)\n plt.tight_layout()\n plt.show()",
"def draw_target_prediction(image, box):\n image = cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])),\n (255, 255, 255), thickness=1)\n return image",
"def plot(image, classified_boxes, window_size):\n fig1 = plt.figure(dpi=400)\n ax1 = fig1.add_subplot(1,1,1) \n ax1.imshow(image, cmap=plt.cm.gray)\n ax1.axis('off')\n for box in classified_boxes:\n x_min, y_min, x_max, y_max = box[0]-.5, box[1]-.5, box[0]+window_size[0]-.5, box[1]+window_size[1]-.5\n prediction, predict_score = box[2], box[3]\n ax1.text(x_min, y_min-3, \"%s %d%%\" % (prediction, predict_score*100), color=\"red\", fontsize=3)\n x = [x_max, x_max, x_min, x_min, x_max]\n y = [y_max, y_min, y_min, y_max, y_max]\n line, = ax1.plot(x,y,color=\"red\")\n line.set_linewidth(.5)\n fig1.savefig(\"classification.png\")\n plt.show()\n return",
"def visualize_full_segmentation_predictions(self, img, mask=None, threshold=.5):\n\n # Split the image into smaller patches that match the input size of the model\n X = Dataset.deconstruct_image(img, self.model.input_shape[1:])\n y_pred = self.model.predict(X)\n mask_pred = Dataset.reconstruct_image(y_pred, img.shape)\n\n # Number of rows and columns for the figure\n ncols = 2\n nrows = 1\n if mask is not None:\n ncols = 3\n fig, axes = plt.subplots(nrows, ncols)\n\n if img.shape[2] == 1: # grayscale\n axes[0].imshow(img[..., 0], cmap='gray')\n else: # RGB\n axes[0].imshow(img)\n axes[0].set_xticks([])\n axes[0].set_yticks([])\n axes[0].set_title(\"Image\")\n\n if mask_pred.shape[2] == 1:\n axes[1].imshow(np.squeeze(mask_pred >= threshold), cmap='gray')\n else:\n axes[1].imshow(np.argmax(mask_pred, axis=2), cmap='jet')\n axes[1].set_xticks([])\n axes[1].set_yticks([])\n axes[1].set_title(\"Predicted Mask\")\n\n if mask is not None:\n if mask.shape[2] == 1:\n axes[2].imshow(mask[..., 0], cmap='gray')\n else:\n axes[2].imshow(np.argmax(mask, axis=2), cmap='jet')\n axes[2].set_xticks([])\n axes[2].set_yticks([])\n axes[2].set_title(\"Ground Truth\")\n plt.show()",
"def display_prediction(test_x,test_labels_a,predicted):\n fig = plt.figure(figsize=(10, 10))\n j = 1\n for i in range(0, 1000, 50):\n truth = test_labels_a[i]\n prediction = predicted[i]\n plt.subplot(5, 4, j)\n j = j + 1\n plt.axis('off')\n color = 'green' if truth == prediction else 'red'\n plt.text(40, 10, \"Truth: {0}\\nPrediction: {1}\".format(truth, prediction),\n fontsize=12, color=color)\n plt.imshow(test_x[i], cmap='gray')",
"def visualize_predictions(model, data_loader, vis_dir):\n if not os.path.isdir(vis_dir):\n os.mkdir(vis_dir)\n\n model.eval()\n rgb_mean_train = dtst.calc_rgb_mean_train(args_dict)\n rgb_mean_train = rgb_mean_train[:, np.newaxis, np.newaxis]\n\n for sample in data_loader:\n gt_count = sample['dmap'].numpy().sum()\n image_normd = sample['image'].float().to('cuda')\n cls0_logits, cls1_logits, cls2_logits, DIV2 = model(image_normd)\n pred_count_16x16_blocks = DIV2.cpu().detach().numpy()\n pred = pred_count_16x16_blocks.squeeze(0).squeeze(0)\n pred_count = pred.sum()\n if gt_count >= 1:\n rel_err_percent = (pred_count - gt_count) / gt_count * 100\n else:\n rel_err_percent = 0.0\n image = image_normd.cpu().numpy() * 255 + rgb_mean_train\n image = image.astype(int).squeeze(0)\n # ^ shape = (3, w, h)\n image = np.transpose(image, (1, 2, 0))\n bleach_coef = 0.4\n image_bleached = (255 - (255 - image) * bleach_coef).astype(int)\n bname = sample['image_bname'][0]\n # ^ bnames appear in numerically sorted order ('IMG_1', 'IMG_2', ...)\n\n #h, w = image.shape[:2]\n w, h = 1024, 768\n # search for \"Relationship between dpi and figure size\"\n # on stackoverflow\n dpi = 100\n # ^ dpi = 100 by default\n fs_h = int(h / dpi)\n fs_w = int(w / dpi)\n fs = (fs_w, fs_h)\n fig, axs = plt.subplots(figsize=fs, dpi=dpi, ncols=2)\n axs[0].imshow(image, vmin=0, vmax=255)\n axs[0].set_title('Ground truth total count = %.1f' % gt_count)\n extent = (0, w, 0, h)\n axs[1].imshow(image_bleached, vmin=0, vmax=255, extent=extent)\n pred_im = axs[1].imshow(\n pred,\n cmap='bwr', alpha=0.5,\n extent=extent, interpolation='nearest')\n axs[1].set_title(\n 'Predicted total count = %.1f\\n(error = %+.1f, '\n 'relative error = %+.1f%%)'\n % (pred_count, pred_count - gt_count, rel_err_percent))\n divider = make_axes_locatable(axs[1])\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(pred_im, cax=cax)\n fig.tight_layout()\n plt.savefig(pjn(vis_dir, bname + '.png'), bbox_inches='tight')\n plt.close(fig)",
"def postprocess(self, prediction_dict, true_image_shapes, **params):\n field = fields.DetectionResultFields\n with tf.variable_scope('PostprocessInference'):\n detections_dict = {}\n # 1. Semantic prediction\n semantic_prediction, semantic_prediction_probability = \\\n self._postprocess_logits(\n prediction_dict['semantic_predictions'], true_image_shapes)\n detections_dict[field.detection_semantic] = semantic_prediction\n detections_dict[field.detection_semantic_heatmap] \\\n = semantic_prediction_probability\n if self._instance_segmentation:\n # 2. Instance prediction\n # instance_prediction, instance_prediction_probability = \\\n # self._postprocess_logits(\n # prediction_dict['instance_predictions'], true_image_shapes)\n # detections_dict[field.detection_masks] = instance_prediction\n # detections_dict[field.detection_masks_heatmap] \\\n # = instance_prediction_probability\n instance_prediction = \\\n self._postprocess_cluster(\n prediction_dict['instance_predictions'],\n semantic_prediction, true_image_shapes)\n detections_dict[field.detection_masks] = instance_prediction\n # 3. Panoptic prediction\n with tf.variable_scope('Panoptic'):\n sem_image = tf.cast(semantic_prediction, dtype=tf.uint8)\n ins_image = tf.cast(instance_prediction, dtype=tf.uint8)\n sem_mask = tf.ones_like(sem_image, dtype=sem_image.dtype)\n ins_mask = tf.where(\n tf.greater(sem_image, sem_mask*self.num_classes),\n tf.zeros_like(ins_image), ins_image)\n zero_image = tf.zeros_like(ins_image, dtype=tf.uint8)\n panoptic_image = tf.concat(\n [sem_image, ins_mask, zero_image], axis=-1)\n tf.summary.image('panoptic', panoptic_image)\n detections_dict[field.detection_masks_image] = ins_image\n detections_dict[field.detection_panoptic_image] = panoptic_image\n return detections_dict",
"def show_eval_images(self, predictions, truth):\n if self.task == 'object_detection':\n self.show_dect_eval_images(predictions, truth)\n elif self.task == 'instance_segmentation':\n self.show_segm_eval_images(predictions, truth)",
"def view_image(row, train_test):\n\n image_name, l, t, r, b, class_idx = row\n class_name = car_dict[class_idx]\n drawn_img = Image.open(\n Path(\"stanford_car\")\n / \"car_data\"\n / train_test\n / class_name\n / image_name\n )\n bbox = ImageDraw.Draw(drawn_img)\n bbox.rectangle([l, t, r, b], outline=\"red\", fill=None)\n drawn_img.show()",
"def visualize_training(self,batched_inputs, results):#image,heatmap):#,\n from pointscollection.utils import exVisualizer as Visualizer\n from detectron2.data.detection_utils import convert_image_to_rgb\n\n\n assert len(batched_inputs) == len(\n results\n ), \"Cannot visualize inputs and results of different sizes\"\n # storage = get_event_storage()\n max_boxes = 20\n\n image_index = 0 # only visualize a single image\n img = batched_inputs[image_index][\"image\"]\n img = convert_image_to_rgb(img.permute(1, 2, 0), \"BGR\")\n print(batched_inputs[0]['file_name'],batched_inputs[0]['image_id'])\n\n\n # v_gt = Visualizer(img, None)\n # # v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index][\"instances\"].gt_boxes)\n # anno_img = v_gt.get_image()\n processed_results = _postprocess(results[image_index], img.shape[0], img.shape[1])\n predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()\n predicted_mask = processed_results.pred_masks.detach().cpu().numpy()\n predicted_points=processed_results.pred_points.detach().cpu().numpy()\n\n v_pred = Visualizer(img, None)\n v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes],masks=predicted_mask[0:max_boxes],points=predicted_points[0:max_boxes])\n prop_img = v_pred.get_image()\n vis_img =prop_img# np.vstack((anno_img, prop_img))\n # vis_img = vis_img.transpose(2, 0, 1)\n # vis_name = f\"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results\"\n # plt.imshow(vis_img)\n # plt.show()\n plt.imsave('output/result_show/{:0>12}.png'.format(batched_inputs[0]['image_id']),vis_img)\n \n\n # storage.put_image(vis_name, vis_img)\n # img = image[0]\n # img=img*self.pixel_std+self.pixel_mean\n # img = convert_image_to_rgb(img.permute(1, 2, 0), \"BGR\")\n # ht=heatmap[0]\n # ht=torch.sigmoid(ht)\n\n # ht=ht.cpu().numpy()\n # ht=np.max(ht,axis=0)\n # plt.imshow(np.uint8(img))\n # plt.show()\n # plt.imshow(ht)\n # plt.show()",
"def save_annotated_image(image, annotations,CLASS_NAMES,outfile):\n #test_metadata is required to get label names in the image\n if \"test\" not in DatasetCatalog.list():\n register_dataset_from_dicts([],\"test\",CLASS_NAMES)\n test_metadata = MetadataCatalog.get(\"test\")\n try:\n visualizer = Visualizer(image, metadata=test_metadata, scale=1.0)\n except TypeError as err:\n print(err)\n return 1\n else:\n vis = visualizer.draw_instance_predictions(annotations[\"instances\"])\n vis.save(outfile)\n return 0",
"def plot_classes_preds(model, images, labels, classes):\n preds, probs = images_to_probs(model, images)\n # plot the images in the batch, along with predicted and true labels\n fig = plt.figure(figsize=(4, 5))\n for idx in np.arange(4):\n ax = fig.add_subplot(2, 2, idx + 1, xticks=[], yticks=[])\n matplotlib_imshow(images[idx].cpu(), one_channel=True)\n ax.set_title(\n \"{0}, {1:.1f}%\\n(label: {2})\".format(\n classes[preds[idx]], probs[idx] * 100.0, classes[labels[idx]]\n ),\n color=(\"green\" if preds[idx] == labels[idx].item() else \"red\"),\n )\n return fig",
"def vizualize_predictions(image, pred_mask, gt_mask, out_path):\n img_1, pred_contours, hierarchy = cv2.findContours(\n pred_mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE\n )\n img_2, gt_contours, hierarchy = cv2.findContours(\n gt_mask.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE\n )\n image = cv2.drawContours(image, pred_contours, -1, (255, 0, 0), 3)\n image = cv2.drawContours(image, gt_contours, -1, (0, 255, 0), 3)\n cv2.imwrite(out_path, image)",
"def plot_examples_with_predictions_and_proba(*, \n model_predictions_dict, \n n=100, \n examples_to_plot=\"all\",\n module_names, \n dataset_name, \n subset_name,\n img_batch_subset_names, \n path_to_raw_img_batch,\n class_colors,\n make_plot_with_img_examples=True,\n plot_classyfication_summary=True, \n max_img_per_col=10,\n verbose=False \n \n): \n \n for module_nr, module_name in enumerate(module_names): \n\n # extract info required for plots, \n original_labels = model_predictions_dict[module_nr][subset_name[0]][\"original_labels\"]\n model_predictions = model_predictions_dict[module_nr][subset_name[0]][\"model_predictions\"]\n model_predictions_proba = model_predictions_dict[module_nr][subset_name[0]][\"model_predictions_proba\"]\n acc_restuls_and_params = model_predictions_dict[module_nr][subset_name[0]][\"acc_restuls_and_params\"]\n class_decoding = model_predictions_dict[module_nr][subset_name[0]]['class_decoding']\n\n\n # Load raw img, \n \"reloading each time to avoid having problems\"\n raw_img_batch = load_raw_img_batch(\n load_datasetnames=img_batch_subset_names, \n path=path_to_raw_img_batch, \n image_size=(500,500), verbose=False)\n\n\n # select images for plot, \n\n if examples_to_plot==\"incorrect\":\n searched_predictions = [x!=y for x, y in zip(original_labels.tolist(), model_predictions.tolist())]\n sel_img_idx = np.arange(0, raw_img_batch.shape[0])[searched_predictions]\n\n if examples_to_plot==\"correct\":\n searched_predictions = [x==y for x, y in zip(original_labels.tolist(), model_predictions.tolist())]\n sel_img_idx = np.arange(0, raw_img_batch.shape[0])[searched_predictions]\n\n if examples_to_plot==\"all\":\n searched_predictions = [True]*raw_img_batch.shape[0]\n sel_img_idx = np.arange(0, raw_img_batch.shape[0])[searched_predictions]\n\n # if there are no examples, to display, here is an option to stop\n if np.array(searched_predictions).sum()==0:\n if verbose==True:\n print(f\"No - {examples_to_plot} - image example found in that dataset\")\n else:\n pass\n\n if np.array(searched_predictions).sum()>0:\n\n # check whther selection is required at all, \n if isinstance(n, int):\n # create up to n examples, where possible, using sel_img_idx\n which_idx_to_use = np.unique(np.floor(np.linspace(0,sel_img_idx.shape[0], n, endpoint=False)).astype(int)).tolist()\n img_idx = sel_img_idx[which_idx_to_use] \n \n if isinstance(n, str):\n # use asll instances, and all images - \n #. t is especially designed to work with plot_classyfication_summary==True, and lot_img_examples==False\n img_idx = np.arange(0, raw_img_batch.shape[0]) \n \n \n \n \n # create img names, with class name and probability \n\n # .. helper funciton, \n def create_image_description(row, sorted_class_names):\n row= np.array(row).flatten()\n class_idx = np.where(row==row.max())[0][0]\n img_name = f\"{sorted_class_names[class_idx]}: {np.round(row[class_idx]*100,1)}%\"\n return img_name\n # ..\n img_names = pd.DataFrame(model_predictions_proba).apply(\n create_image_description, \n sorted_class_names=np.array(list(class_decoding.values())), \n axis=1\n )\n\n # disable some fucntiosn in the plot, when only small nr of images is displayed - to make it nice looking, \n if len(img_idx)>1:\n # fig with img examples, \n subplots_adjust_top=0.75\n title = f\"{module_name}, results: {examples_to_plot} ({len(img_idx)} available examples from {raw_img_batch.shape[0]} in total)\"\n class_colors_for_legend = class_colors\n pie_title = None\n \n else:\n title = None\n pie_title = None\n class_colors_for_legend = None\n\n \n # create img_names and img_groupnames\n if examples_to_plot==\"incorrect\":\n img_groupname = [\"Inorectly Classified Images\"]*len(img_idx)\n else:\n img_groupname = np.array([f\"Classified as:\\n {x}\" for x in model_predictions.tolist()])[img_idx].tolist()\n img_names = img_names.values[img_idx].tolist()\n \n \n # plot image examples\n if make_plot_with_img_examples==True:\n plot_img_examples(\n selected_img_batch = raw_img_batch[img_idx],\n img_groupname = img_groupname,\n img_name = img_names,\n img_color = pd.Series(original_labels).map(class_colors).values[img_idx].tolist(),\n class_colors_for_legend = class_colors_for_legend,\n title = title,\n legend_loc = \"center\",\n max_img_per_col = max_img_per_col,\n figsize_scaling = 3,\n space_between_clusters = 0.5,\n subplots_adjust_top = subplots_adjust_top, \n space_for_color_box_factor= 0.01,\n fontScale = 2,\n img_name_fontcolor = \"lime\"\n )\n else:\n pass\n\n # Plot Pie charts summarizing items classified into each class, \n if plot_classyfication_summary==True:\n annotated_pie_chart_with_class_and_group(\n title=pie_title,\n classnames=np.array(original_labels)[img_idx].tolist(), \n class_colors=class_colors,\n ###\n groupnames=np.array([f\"Classified as:\\n{x}\" for x in model_predictions.tolist()])[img_idx].tolist(), \n #groupname_colors=class_colors, \n ###\n n_subplots_in_row=6, \n legend_loc=\"upper right\"\n )\n else:\n pass",
"def get_inference_image(self):\n for detection in self.cvOut[0,0,:,:]:\n score = float(detection[2])\n if score > self.Threshold:\n left = int(detection[3] * self.cols)\n top = int(detection[4] * self.rows)\n right = int(detection[5] * self.cols)\n bottom = int(detection[6] * self.rows)\n\n # Draw the bounding-box on the image\n cv2.rectangle(self.result_image,(left, top),(right, bottom), (23, 230, 210), thickness=2)\n cv2.drawMarker(self.result_image,get_rect_centre(left, top,right, bottom),(255,0,0))\n cv2.putText(self.result_image, self.label_dict[int(detection[1])] + \" : \" + str(round(score,4)),\\\n (int(left-10),int(top-10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), 2)\n\n print(\"[INFO] Result image generated successfully.\")\n return self.result_image"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
padding a list of indices with 0 until a maximum length (max_length_tweet) | def pad_sequence(self, t_index):
l = len(t_index)
if l < self.max_length_tweet:
req_d = self.max_length_tweet - l
t_index.extend([np.zeros_like(t_index[0])] * req_d)
elif l > self.max_length_tweet:
t_index = t_index[:self.max_length_tweet].copy()
return t_index | [
"def pad_indexes(indexes_batch, value):\n return torch.tensor(list(zip_longest(*indexes_batch, fillvalue=value)))",
"def pad(self, max_lengths: Dict[str, int]):\n self.word_indices = self.pad_word_sequence(self.word_indices, max_lengths)",
"def get_padded_tensor(tokens_list, batch_size):\n\ttoken_len = max(len(x) for x in tokens_list)\n\tpad_len = min(token_len, MAXLEN)\n\ttokens = torch.zeros(batch_size, pad_len, dtype=torch.long).fill_(PAD_ID)\n\tfor i, s in enumerate(tokens_list):\n\t\tcur_len = min(pad_len, len(s))\n\t\ttokens[i, :cur_len] = torch.tensor(s[:cur_len], dtype=torch.long)\n\treturn tokens",
"def target_pad_to_len(words, padded_word_len, word_padding=0):\n if len(words) < padded_word_len:\n words += [word_padding] * (padded_word_len - len(words))\n return words",
"def __pad(self, tensor_list, length):\n return torch.stack([torch.cat([tensor.data, tensor.new(length-tensor.size(0)).zero_()])\n for tensor in tensor_list]).to(self.device)",
"def _prepare_index_array(self, index_list):\n pad_len = self.max_token_sequence_len\n batch_size = len(index_list)\n padding_index = 0\n padded_sentences = np.full((batch_size, pad_len, 2), padding_index, dtype=np.int32)\n for i in range(batch_size):\n clipped_len = min(len(index_list[i]), pad_len)\n padded_sentences[i, :, 0] = i\n padded_sentences[i, pad_len - clipped_len:, 1] = index_list[i][:clipped_len]\n return padded_sentences",
"def padlist(list_to_pad, padlen, pad_token=0):\n padded_list = list_to_pad[:padlen]\n padded_list = padded_list + [pad_token] * (padlen - len(list_to_pad))\n return padded_list",
"def pad_trunc_seq(x, max_len):\n L = len(x)\n shape = x.shape\n if L < max_len:\n pad_shape = (max_len - L,) + shape[1:]\n pad = np.zeros(pad_shape)\n x_new = np.concatenate((x, pad), axis=0)\n else:\n x_new = x[0:max_len]\n return x_new",
"def add_padding(tweets_df):\n tweets_df['input_ids'] = pad_sequence(tweets_df.input_ids.tolist(), batch_first=True)\n tweets_df['attention_mask'] = pad_sequence(tweets_df.attention_mask.tolist(), batch_first=True)\n\n return tweets_df",
"def slices_padding(slice_list):\n max_size = torch.tensor([torch.tensor(slice.shape[1:]).max().item() for slice in slice_list]).max()\n return [slice_padding(slice, max_size) for slice in slice_list]",
"def pad_list(lst, padding):\n result = [None, padding] * len(lst)\n result[0::2] = lst\n return result",
"def pad_sequence(seq, size, padding=None):\n return (list(seq) + [padding for _ in range(size)])[:size]",
"def __padding_features(self, max_len=500, pad=0):\n padded_features = []\n for feature in self.__features:\n if len(feature) >= max_len:\n padded_feature = feature[:max_len]\n else:\n padded_feature = feature\n while len(padded_feature) < max_len:\n padded_feature.append(pad)\n padded_features.append(padded_feature)\n self.__features = padded_features",
"def pad_to_max_length(self, sdp_idx):\n pad_idx = vocab_dict[\"<PAD>\"]\n n_sdp_pad = (self.max_length_sdp - len(sdp_idx)) // 2\n sdp_idx = np.pad(sdp_idx, pad_width=(n_sdp_pad, self.max_length_sdp - n_sdp_pad - len(sdp_idx)),\n constant_values=(pad_idx, pad_idx))\n return sdp_idx",
"def addpaddings(tokens, toZero=False):\n max_length = len(max(tokens, key=len))\n for i in range(len(tokens)):\n if toZero:\n tokens[i] += [0 for i in range(max_length - len(tokens[i]))]\n else:\n tokens[i] += [PAD_TOKEN for i in range(max_length - len(tokens[i]))]\n return tokens",
"def _dynamic_padding(args, batch_data, pad_id):\n pad_p_len = args.max_p_len\n pad_q_len = args.max_q_len\n batch_data['passage_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['passage_token_ids']]\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n return batch_data, pad_p_len, pad_q_len",
"def input_pad_to_len(words, padded_word_len, padded_char_len, word_padding=0, char_padding=0):\n if len(words) < padded_word_len:\n words += [[word_padding]] * (padded_word_len - len(words))\n words = [word + [char_padding] * (padded_char_len - len(word)) if len(word) < padded_char_len else word for word in words]\n return words",
"def add_padding_tokens(\n self, token_ids: List[int], length: int, right: bool = True\n ) -> List[int]:\n padding = [self.pad_token_id] * (length - len(token_ids))\n if right:\n return token_ids + padding\n else:\n return padding + token_ids",
"def pad_to_len(arr, padded_len, padding=0):\n # TODO\n\n return arr[:padded_len] +[padding]*(padded_len-len(arr))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The id of the switch to which the Database connects. | def switch_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "switch_id") | [
"def switch_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"switch_id\")",
"def get_db_id(self):\n return self.db_id",
"def get_id(self):\n return self._hostID",
"def GetID(self):\n return self.__msgs.connectionid",
"def id(self) -> int:\n return self._context.id",
"def device_id(self):\n return self.id",
"def master_db_instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"master_db_instance_id\")",
"def get_datapath_id(self):\n return self.db_get_val('Bridge', self.br_name, 'datapath_id')",
"def get_log_id(self):\n return # osid.id.Id",
"def id(self):\n return self.joystick_obj.get_id()",
"def master_db_instance_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"master_db_instance_id\")",
"def get_id(self):\n return self.data[self.system_idx][\"id\"]",
"def get_id(self):\n return self[\"ds_id\"]",
"def get_server_id(self):",
"def r_default_switch_fid(self):\r\n switch_obj = self.r_default_switch_obj()\r\n return None if switch_obj is None else \\\r\n switch_obj.r_get('brocade-fibrechannel-logical-switch/fibrechannel-logical-switch/fabric-id')",
"def win_id(self):\n ret = self._get_attr(\"winId\")\n return ret",
"def connector_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connector_id\")",
"def id(cls) -> Global:\n return Global.current_application_id()",
"def get_id(self):\n return self.__player_id"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A list of IP address to assign to the LoadBalancer. . | def ip_addresses(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "ip_addresses") | [
"def test_ip_addresses_list(self):\n pass",
"def get_ip_addresses(self, task):\n return []",
"def _getAddresses(self, *ues):\n return [self._s1_util.get_ip(ue.ue_id) for ue in ues]",
"def get_ip_list(prefix):\n return list(map(lambda x: str(x),ipaddress.ip_network(prefix).hosts()))",
"def ex_static_ip_list(self):\r\n response = self.connection.request(action='/resources/ip/list',\r\n method='GET')\r\n\r\n if response.status != 200:\r\n raise CloudSigmaException('Could not retrieve IP list')\r\n\r\n ips = str2list(response.body)\r\n return ips",
"def get_list_ips(connection, listname, limit=None):\n cursor = connection.cursor()\n sql = '''\n SELECT address FROM ipv{0}_addresses JOIN {1}\n ON ipv{0}_addresses.id = {1}.v{0}_id_{1}'''\n if limit:\n sql = add_sql_limit(sql, limit)\n cursor.execute(sql.format(4, listname))\n result_v4 = cursor.fetchall()\n cursor.execute(sql.format(6, listname))\n result_v6 = cursor.fetchall()\n return [str(IPAddress(num[0])) for num in result_v4 + result_v6]",
"def get_asset_ip_list():\n conn = pg.connect(database=\"webhelpdesk\", user=\"psmith\", password=\"\")\n dbCur = conn.cursor(cursor_factory=DictCursor)\n dbCur.execute(\"\"\"select network_address from asset\"\"\")\n return [x[0] for x in dbCur.fetchall() if x is not None]",
"def get_haproxy_vip_addresses(lb_id):\n vips = []\n with open(config_path(lb_id), 'r', encoding='utf-8') as file:\n for line in file:\n current_line = line.strip()\n if current_line.startswith('bind'):\n for section in current_line.split(' '):\n # We will always have a port assigned per the template.\n if ':' in section:\n if ',' in section:\n addr_port = section.rstrip(',')\n vips.append(addr_port.rpartition(':')[0])\n else:\n vips.append(section.rpartition(':')[0])\n break\n return vips",
"def addresses(self) -> typing.List[str]:\n return typing.cast(\n typing.List[str],\n self._properties.get(\"addresses\"),\n )",
"def GetAddrList(self):\n return list(range(ADDR_START, ADDR_END + 1))",
"def get_indicators_ip(self) -> List[dict]:\n return [\n self._process_item(item, self._tags, self.tlp_color)\n for item in self._build_iterator_ip()\n ]",
"def get_instance_ips(self, vm):\n \n return [vm._instance.private_ip_address, vm._instance.ip_address]",
"def inet_visible_ip(self):\n def handle(results):\n ips = [result[1][0] for result in results if result[0]]\n logger.debug(\"other nodes think our ip is %s\", ips)\n return ips\n\n ds = []\n for neighbor in self.bootstrappable_neighbors():\n ds.append(self.protocol.stun(neighbor))\n future_list(ds, handle)",
"def get_all_ec2_public_ip_addresses(auto_scale_group_resource_id):\n\n group = get_auto_scaling_group(auto_scale_group_resource_id)\n public_ip_addresses = []\n\n if group:\n for asg_instance in group.get('Instances'):\n instance_id = asg_instance.get('InstanceId')\n ip_address = get_public_ip_address(instance_id)\n\n public_ip_addresses.append(ip_address)\n\n return public_ip_addresses",
"def service_addresses(self):\n return tuple(map(lambda p: Address(dict(ip=p['ip'], port=p['port'])), self.cfg))",
"def address_list(address):\n\n return [\n str(ip)\n for ip\n in ipaddress.IPv4Network(address)\n ][2:]",
"def __getIPpoolIPs(self,ippool_id):\n ip_list=[]\n ips_db=self.__getIPpoolIPsDB(ippool_id)\n for _dic in ips_db:\n ip_list.append(_dic[\"ip\"])\n return ip_list",
"def check_ip_addresses(list_of_ip):\n reach_iplist = []\n unreach_iplist = []\n for ip_address in list_of_ip:\n reply = subprocess.run(['ping', ip_address, '-n', '3'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding = 'ascii'\n )\n if reply.returncode == 0:\n ###pprint(reply.stdout)\n reach_iplist.append(ip_address)\n else:\n unreach_iplist.append(ip_address)\n\n return reach_iplist, unreach_iplist",
"def test_ip_addresses_read(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The id of the switch to which the LoadBalancer connects. | def switch_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "switch_id") | [
"def switch_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"switch_id\")",
"def load_balancer_id(self) -> Optional[str]:\n return pulumi.get(self, \"load_balancer_id\")",
"def get_switch_name(node):\n return node['ports'][0]['switch_name']",
"def r_default_switch_fid(self):\r\n switch_obj = self.r_default_switch_obj()\r\n return None if switch_obj is None else \\\r\n switch_obj.r_get('brocade-fibrechannel-logical-switch/fibrechannel-logical-switch/fabric-id')",
"def get_datapath_id(self):\n return self.db_get_val('Bridge', self.br_name, 'datapath_id')",
"def getLaneId(edgeId: str):\n return edgeId + \"_0\"",
"def getTypeId(self) -> \"SoType\":\n return _coin.SoVRMLSwitch_getTypeId(self)",
"def network_binding_host_id(self, context, instance):\n# LOG.error(\"network_binding_host_id( context=%s, instance=%s)\", context, instance)\n return instance.get('host')",
"def GetRouterID(self, protocol, instance):\n rid = \"\"\n instanceName = \"master\"\n if instance : instanceName = instance.Name\n if len(self.RouterID.get(instanceName, {})) == 0 : self.CalculateRouterIDAndASNumber(instance)\n instanceRIDs = self.RouterID.get(instanceName, None)\n if instanceRIDs != None:\n rid = instanceRIDs.get(str(protocol), \"\")\n return rid",
"def get_id(self):\n return self._hostID",
"def getSwitch(self) -> \"int32_t\":\n return _coin.SoCallbackAction_getSwitch(self)",
"def gateway_load_balancer_frontend_ip_configuration_id(self) -> Optional[str]:\n return pulumi.get(self, \"gateway_load_balancer_frontend_ip_configuration_id\")",
"def relay_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"relay_id\")",
"def getTypeId(self) -> \"SoType\":\n return _coin.SoSwitch_getTypeId(self)",
"def virtual_router_id(self) -> str:\n return self._virtual_router_id",
"def routing_id(self) -> RoutingID:\n return self._routing_id",
"def GetDefaultBoardID(self):\n #TODO\n return \"beaglebone\"",
"def wip_id(self) -> str:\n return self._wip_id",
"def getTypeId(self) -> \"SoType\":\n return _coin.SoPathSwitch_getTypeId(self)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The flag to enable slack notification when the traffic shaping is enabled. | def enable_slack(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_slack") | [
"def shaping_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"shaping_enabled\")",
"def can_register_for_superseding_event_notifications(self):\n return # boolean",
"def supports_gradebook_notification(self):\n return # boolean",
"def sending_enabled(self) -> bool:\n return pulumi.get(self, \"sending_enabled\")",
"async def _set_speaking(self, is_speaking):\n data = {\n 'op' :self.SPEAKING,\n 'd' : {\n 'speaking' : is_speaking,\n 'delay' : 0,\n },\n }\n \n await self.send_as_json(data)",
"def enable_monitoring(self) -> bool:\n return pulumi.get(self, \"enable_monitoring\")",
"def supports_grade_system_notification(self):\n return # boolean",
"def _set_isTrackingToImproveCommunicationEnabled(self, *args) -> \"bool\" :\n return _core.ProductUsageData__set_isTrackingToImproveCommunicationEnabled(self, *args)",
"def is_sms_notification_enabled():\n enabled = False\n try:\n enabled = config.getboolean(consts.SMS_NOTIFICATION_SECTION, consts.ENABLED)\n except:\n pass\n return enabled",
"def on(self):\n return self._system_on",
"def _set_areAutodesk360NotificationsShown(self, *args) -> \"bool\" :\n return _core.GeneralPreferences__set_areAutodesk360NotificationsShown(self, *args)",
"def include_if(self) -> bool:\n # TODO: Warn when notify socket is set and the SystemD library is not installed\n return \"NOTIFY_SOCKET\" in os.environ and bool(notify)",
"def enableNotify(self, on: 'SbBool') -> \"SbBool\":\n return _coin.SoField_enableNotify(self, on)",
"def enableNotify(self, flag: 'SbBool const') -> \"SbBool\":\n return _coin.SoFieldContainer_enableNotify(self, flag)",
"def _get_areAutodesk360NotificationsShown(self) -> \"bool\" :\n return _core.GeneralPreferences__get_areAutodesk360NotificationsShown(self)",
"async def bunker(self, ctx):\n try:\n bunker = await self.config.guild(ctx.guild).bunker()\n bunker = not bunker\n await self.config.guild(ctx.guild).bunker.set(bunker)\n if bunker:\n await ctx.send(f\"The bunker warning is now on\")\n else:\n await ctx.send(f\"The bunker warning is now off\")\n\n except (ValueError, KeyError, AttributeError):\n await ctx.send(\"There was a problem toggling the bunker\")",
"def supports_sequence_rule_enabler_notification(self):\n return False",
"def HasWSL(self):\n return self.__has('WSL')",
"def _should_automatically_send(business_process):\n return BrokerNoteBulkGeneral.should_automatically_send()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A list of `additional_certificate` blocks as defined below. | def additional_certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ProxyLBACMECertificateAdditionalCertificateArgs']]]]:
return pulumi.get(self, "additional_certificates") | [
"def additional_certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ProxyLBCertificateAdditionalCertificateArgs']]]]:\n return pulumi.get(self, \"additional_certificates\")",
"def tbs_certlist_bytes(self):",
"def tbs_certificate_bytes(self):",
"def get_certificates_der_v3(self):\n\n if self._v3_siging_data is None:\n self.parse_v3_signing_block()\n\n certs = []\n for signed_data in [signer.signed_data for signer in self._v3_siging_data]:\n for cert in signed_data.certificates:\n certs.append(cert)\n\n return certs",
"def read_certificates():\n\n cert_input = get_stdin().read()\n\n return [crt.strip() + '\\n' + PEM_FOOTER + '\\n' for crt in cert_input.split(PEM_FOOTER) if len(crt.strip()) > 0]",
"def test_extraChainFilesAreAddedIfSupplied(self):\n opts = sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n extraCertChain=self.extraCertChain,\n )\n opts._contextFactory = FakeContext\n ctx = opts.getContext()\n self.assertEqual(self.sKey, ctx._privateKey)\n self.assertEqual(self.sCert, ctx._certificate)\n self.assertEqual(self.extraCertChain, ctx._extraCertChain)",
"def additional_extensions(self) -> Optional[Sequence['outputs.AuthorityConfigX509ConfigAdditionalExtension']]:\n return pulumi.get(self, \"additional_extensions\")",
"def additional(self):\n return _ldns._ldns_pkt_additional(self)\n #parameters: const ldns_pkt *,\n #retvals: ldns_rr_list *",
"def additional_extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CertificateConfigX509ConfigAdditionalExtensionArgs']]]]:\n return pulumi.get(self, \"additional_extensions\")",
"def test_list_certificate_signing_request(self):\n pass",
"def additional(self, **filters):\n\t\tret = [resource_record(rr) for rr in self._ldns_pkt.additional().rrs()]\n\t\treturn filter(self._construct_rr_filter(**filters), ret)",
"def build_cert_chain(self,certificate):\r\n\t\tchain = []\r\n\t\tlast = None\r\n\t\tlogger.info(\"Starting to build trusting chain..\")\r\n\t\t\r\n\t\twhile True:\r\n\t\t\tif last == certificate:\r\n\t\t\t\tself.trusting_chain = []\r\n\t\t\t\treturn\r\n\t\t\tlast = certificate\r\n\t\t\t\r\n\t\t\tchain.append(certificate)\r\n\t\t\tissuer = certificate.issuer.rfc4514_string()\r\n\t\t\tsubject = certificate.subject.rfc4514_string()\r\n\t\t\t\r\n\t\t\tif issuer == subject and issuer in self.issuers_certs:\r\n\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\tif issuer in self.issuers_certs:\r\n\t\t\t\tcertificate = self.issuers_certs[issuer]\r\n\t\tlogger.info(\"Chain Built with success\")\r\n\t\tself.trusting_chain = chain",
"def gen_cert_block(data_dir, sign_bits) -> CertBlockV2:\n with open(os.path.join(data_dir, 'sb2_x', 'selfsign_' + str(sign_bits) + '_v3.der.crt'), 'rb') as f:\n cert_data = f.read()\n\n cert_obj = Certificate(cert_data)\n root_key = cert_obj.public_key_hash\n\n cb = CertBlockV2()\n cb.set_root_key_hash(0, root_key)\n cb.add_certificate(cert_data)\n return cb",
"def _add_encoded(console, certificates):\n for cert_dict in certificates:\n cert = console.certificates.list(\n filter_args={'name': cert_dict['name']})[0]\n cert_dict.update(cert.get_encoded())",
"def make_server_leaf_certificate_dict(self, leaf_certificate):\n ret = []\n for cert in leaf_certificate:\n ret.append({\n 'name': cert['aliasName'],\n 'type': 'CertificateInfoV2',\n 'certificateDetails':\n [\n {\n 'base64Data': cert['base64Data'],\n 'aliasName': cert['aliasName'],\n 'type': 'CertificateDetailV2'\n },\n ]\n })\n return ret",
"def fetch_certs(certificate_list, user_agent=None, timeout=10):\n\n output = []\n\n if user_agent is None:\n user_agent = 'certvalidator %s' % __version__\n elif not isinstance(user_agent, str_cls):\n raise TypeError('user_agent must be a unicode string, not %s' % type_name(user_agent))\n\n for url in certificate_list.issuer_cert_urls:\n request = Request(url)\n request.add_header('Accept', 'application/pkix-cert,application/pkcs7-mime')\n request.add_header('User-Agent', user_agent)\n response = urlopen(request, None, timeout)\n\n content_type = response.headers['Content-Type'].strip()\n response_data = response.read()\n\n if content_type == 'application/pkix-cert':\n output.append(x509.Certificate.load(response_data))\n\n elif content_type == 'application/pkcs7-mime':\n signed_data = cms.SignedData.load(response_data)\n if isinstance(signed_data['certificates'], cms.CertificateSet):\n for cert_choice in signed_data['certificates']:\n if cert_choice.name == 'certificate':\n output.append(cert_choice.chosen)\n else:\n raise ValueError('Unknown content type of %s when fetching issuer certificate for CRL' % repr(content_type))\n\n return output",
"def _parse_file_key_certs(certificate_file, validate = False):\n\n while True:\n keycert_content = _read_until_keywords('dir-key-certification', certificate_file)\n\n # we've reached the 'router-signature', now include the pgp style block\n block_end_prefix = PGP_BLOCK_END.split(' ', 1)[0]\n keycert_content += _read_until_keywords(block_end_prefix, certificate_file, True)\n\n if keycert_content:\n yield stem.descriptor.networkstatus.KeyCertificate(bytes.join(b'', keycert_content), validate = validate)\n else:\n break # done parsing file",
"def tbs_certrequest_bytes(self):",
"def test_extraChainDoesNotBreakPyOpenSSL(self):\n opts = sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n extraCertChain=self.extraCertChain,\n )\n ctx = opts.getContext()\n self.assertIsInstance(ctx, SSL.Context)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The proxy mode. This must be one of [`http`/`https`/`tcp`]. | def proxy_mode(self) -> pulumi.Input[str]:
return pulumi.get(self, "proxy_mode") | [
"def get_useproxyport(self):\n return self.options['useproxyport']",
"def proxy(self):\n if self._proxy is not None:\n if self._proxy[:7] == \"http://\":\n self._proxy = {'http://': self._proxy}\n Color.pl(\"{+} Proxy: %s\" % self._proxy['http://'])\n elif self._proxy[:8] == \"https://\":\n self._proxy = {'https://': self._proxy}\n Color.pl(\"{+} Proxy: %s\" % self._proxy['https://'])\n elif self._proxy[:3] == \"ftp\":\n self._proxy = {'ftp': self._proxy}\n Color.pl(\"{+} Proxy: %s\" % self._proxy['ftp'])\n else:\n self._proxy = \"\"\n return self._proxy",
"def enable_proxy_protocol(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enable_proxy_protocol\")",
"def enable_https_proxy(self):\n # type: () -> int\n return self._get_property('enable_https_proxy')",
"def use_system_proxy_setting(self):\n return \"true\" == self.get(\"network\", \"use_system_proxy_settings\", \"true\").lower()",
"def useproxyport(self) :\n try :\n return self._useproxyport\n except Exception as e:\n raise e",
"def proxy_service(self):\n return self.data.get(\"http_proxy\")",
"def supports_proxy(self):\n return # boolean",
"def proxy_ssl(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"proxy_ssl\")",
"def supports_proxy(self):\n # Implemented from template for\n # osid.resource.ResourceProfile.supports_resource_lookup\n return 'supports_proxy' in profile.SUPPORTS",
"def get_proxy(arguments):\n if (arguments.proxy_host is not None\n and arguments.proxy_port is not None\n and arguments.proxy_secret is not None):\n logging.debug(\"Using proxy: %s:%s\", arguments.proxy_host, arguments.proxy_port)\n return ((arguments.proxy_host, arguments.proxy_port, arguments.proxy_secret),\n ConnectionTcpMTProxyRandomizedIntermediate)\n return None, ConnectionTcpFull",
"def get_proxy_port(self):\n return self._proxy_port",
"def proxy_url(self) -> str:\n return pulumi.get(self, \"proxy_url\")",
"def get_protocol():\n protocol = 'http'\n if settings.LUTEFISK_USE_HTTPS:\n protocol = 'https'\n return protocol",
"def set_http_proxy(self, proxy_url):\r\n result = self._parse_proxy_url(proxy_url=proxy_url)\r\n scheme = result[0]\r\n host = result[1]\r\n port = result[2]\r\n username = result[3]\r\n password = result[4]\r\n\r\n self.proxy_scheme = scheme\r\n self.proxy_host = host\r\n self.proxy_port = port\r\n self.proxy_username = username\r\n self.proxy_password = password\r\n self.http_proxy_used = True\r\n\r\n self._setup_http_proxy()",
"def useProxy(self, config, logger=None):\n return self.use_proxy",
"def _set_networkProxySetting(self, *args) -> \"bool\" :\n return _core.NetworkPreferences__set_networkProxySetting(self, *args)",
"def __setHTTPProxy():\n\n global proxyHandler\n\n if not conf.proxy: \n if conf.hostname in ('localhost', '127.0.0.1') or conf.ignoreProxy:\n proxyHandler = urllib2.ProxyHandler({})\n return\n\n debugMsg = \"setting the HTTP proxy to pass by all HTTP requests\"\n logger.debug(debugMsg)\n\n __proxySplit = urlparse.urlsplit(conf.proxy)\n __hostnamePort = __proxySplit[1].split(\":\")\n\n __scheme = __proxySplit[0]\n __hostname = __hostnamePort[0]\n __port = None\n\n if len(__hostnamePort) == 2:\n try:\n __port = int(__hostnamePort[1])\n except:\n pass #drops into the next check block\n\n if not __scheme or not __hostname or not __port:\n errMsg = \"proxy value must be in format 'http://url:port'\"\n raise sqlmapSyntaxException, errMsg\n\n __proxyString = \"%s:%d\" % (__hostname, __port)\n\n # Workaround for http://bugs.python.org/issue1424152 (urllib/urllib2:\n # HTTPS over (Squid) Proxy fails) as long as HTTP over SSL requests\n # can't be tunneled over an HTTP proxy natively by Python (<= 2.5)\n # urllib2 standard library\n if conf.scheme == \"https\":\n proxyHandler = ProxyHTTPSHandler(__proxyString)\n else:\n proxyHandler = urllib2.ProxyHandler({\"http\": __proxyString})",
"def _get_networkProxySetting(self) -> \"adsk::core::NetworkProxySettings\" :\n return _core.NetworkPreferences__get_networkProxySetting(self)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The ssl policy. This must be one of [`TLS12201904`/`TLS12202106`/`TLS13202106`]. | def ssl_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ssl_policy") | [
"def get_ssl_protocol(self):\n return self._ssl_protocol",
"def get_force_https_protocol_value(self):\n return getattr(ssl, self.get_force_https_protocol_name())",
"def get_force_https_protocol_name(self):\n default = \"PROTOCOL_TLSv1_2\" if hasattr(ssl, \"PROTOCOL_TLSv1_2\") else \"PROTOCOL_TLSv1\"\n return self.get('security', 'force_https_protocol', default=default)",
"def test_tlsProtocolsAtLeastWillAcceptHigherDefault(self):\n opts = sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n raiseMinimumTo=sslverify.TLSVersion.SSLv3\n )\n opts._contextFactory = FakeContext\n ctx = opts.getContext()\n # Future maintainer warning: this will break if we change our default\n # up, so you should change it to add the relevant OP_NO flags when we\n # do make that change and this test fails.\n options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |\n SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_SSLv3)\n self.assertEqual(options, ctx._options & options)\n self.assertEqual(opts._defaultMinimumTLSVersion,\n sslverify.TLSVersion.TLSv1_0)",
"def sslservice_sslpolicy_bindings(self) :\n\t\ttry :\n\t\t\treturn self._sslservice_sslpolicy_binding\n\t\texcept Exception as e:\n\t\t\traise e",
"def test_tlsProtocolsSSLv3Only(self):\n opts = sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n insecurelyLowerMinimumTo=sslverify.TLSVersion.SSLv3,\n lowerMaximumSecurityTo=sslverify.TLSVersion.SSLv3,\n )\n opts._contextFactory = FakeContext\n ctx = opts.getContext()\n options = (SSL.OP_NO_SSLv2 | SSL.OP_NO_COMPRESSION |\n SSL.OP_CIPHER_SERVER_PREFERENCE | SSL.OP_NO_TLSv1 |\n SSL.OP_NO_TLSv1_1 | SSL.OP_NO_TLSv1_2 | opts._OP_NO_TLSv1_3)\n self.assertEqual(options, ctx._options & options)",
"def is_ssl(self):\n\t\treturn self.ssl",
"def sslsslv3handshakesrate(self) :\n try :\n return self._sslsslv3handshakesrate\n except Exception as e:\n raise e",
"def HasSSL(self):\n return self.__has('SSL')",
"def ssl(self):\n\t\tif 'with_openssl' in self.configure_options:\n\t\t\treturn True\n\t\t# Parameterized form in newer versions.\n\t\tfor x in self.configure_options:\n\t\t\tif 'with_ssl' in x:\n\t\t\t\treturn True\n\t\treturn False",
"def test_protocol_sslv3(self):\n if support.verbose:\n sys.stdout.write(\"\\n\")\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)\n if hasattr(ssl, 'PROTOCOL_SSLv2'):\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_SSLv3)\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)\n if no_sslv2_implies_sslv3_hello():\n # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,\n False, client_options=ssl.OP_NO_SSLv2)",
"def sslbenullciphersrate(self) :\n try :\n return self._sslbenullciphersrate\n except Exception as e:\n raise e",
"def ssl_certificate_validation_disabled():\n return bool(\n os.environ.get('LP_DISABLE_SSL_CERTIFICATE_VALIDATION', False))",
"def sslsslv2handshakesrate(self) :\n try :\n return self._sslsslv2handshakesrate\n except Exception as e:\n raise e",
"def ssl_check():\n return \"All ok, mm'kay.\"",
"def disable_ssl(self) -> bool:\n return pulumi.get(self, \"disable_ssl\")",
"def set_ssl_protocol(self, ssl_protocol):\n CheckValue.check_int_ge_zero(ssl_protocol, 'ssl_protocol')\n self._ssl_protocol = ssl_protocol\n return self",
"def test_protocol_sslv2(self):\n if support.verbose:\n sys.stdout.write(\"\\n\")\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)\n # SSLv23 client with specific SSL options\n if no_sslv2_implies_sslv3_hello():\n # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_SSLv2)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_SSLv3)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_TLSv1)",
"def enable_ssl(self):\n # type: () -> bool\n return self._get_property('enable_ssl')",
"def test_protocol_tlsv1(self):\n if support.verbose:\n sys.stdout.write(\"\\n\")\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)\n if hasattr(ssl, 'PROTOCOL_SSLv2'):\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_TLSv1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The flag to enable HTTP/2. This flag is used only when `proxy_mode` is `https`. | def support_http2(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "support_http2") | [
"def AddHttp2Flag(parser):\n parser.add_argument(\n '--use-http2',\n action=arg_parsers.StoreTrueFalseAction,\n help='Whether to use HTTP/2 for connections to the service.',\n )",
"def enable_https_proxy(self):\n # type: () -> int\n return self._get_property('enable_https_proxy')",
"def enable_https_proxy(self, value):\n self._set_property('enable_https_proxy', value)",
"def enable_proxy_protocol(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"enable_proxy_protocol\")",
"def enable_https_traffic_only(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_https_traffic_only\")",
"def use_system_proxy_setting(self):\n return \"true\" == self.get(\"network\", \"use_system_proxy_settings\", \"true\").lower()",
"def securie_proxy_ssl_header_is_set(audit_options):\n assert LOCAL_SETTINGS.get('SECURE_PROXY_SSL_HEADER') == \\\n ['HTTP_X_FORWARDED_PROTO', 'https'], \\\n \"SECURE_PROXY_SSL_HEADER should be set to \" \\\n \"('HTTP_X_FORWARDED_PROTO', 'https')\"",
"def use_https(self) -> bool:\n return self._caller.use_https",
"def enable_ssl(self):\n # type: () -> bool\n return self._get_property('enable_ssl')",
"def _advanced_networking(self, args: parser_extensions.Namespace):\n if flags.Get(args, 'enable_advanced_networking'):\n return True\n if flags.Get(args, 'disable_advanced_networking'):\n return False\n return None",
"def https_redirect_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"https_redirect_enabled\")",
"def get_http2_ssl_context():\n # Get the basic context from the standard library.\n ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)\n\n # RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2\n # or higher. Disable TLS 1.1 and lower.\n ctx.options |= (\n ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1\n )\n\n # RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable\n # compression.\n ctx.options |= ssl.OP_NO_COMPRESSION\n\n # RFC 7540 Section 9.2.2: \"deployments of HTTP/2 that use TLS 1.2 MUST\n # support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\". In practice, the\n # blacklist defined in this section allows only the AES GCM and ChaCha20\n # cipher suites with ephemeral key negotiation.\n ctx.set_ciphers(\"ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20\")\n\n # We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may\n # be absent, so allow that. This setup allows for negotiation of HTTP/1.1.\n ctx.set_alpn_protocols([\"h2\", \"http/1.1\"])\n\n try:\n ctx.set_npn_protocols([\"h2\", \"http/1.1\"])\n except NotImplementedError:\n print(\"BAD@get_http2_ssl_context\")\n\t\n return ctx",
"def proxy_ssl(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"proxy_ssl\")",
"def is_secure(self):\r\n return self.url.startswith(\"https\")",
"def test_get_proxy_information_with_proxy_over_https(self):\n self.configure_response(\n proxy_manager={'http://': 'http://local.proxy:3939'},\n )\n self.configure_request(\n url='https://example.com',\n method='GET',\n )\n\n assert dump._get_proxy_information(self.response) == {\n 'method': 'CONNECT',\n 'request_path': 'https://example.com'\n }",
"def is_ssl(self):\n\t\treturn self.ssl",
"def enable_ssl(self, value):\n self._set_property('enable_ssl', value)",
"def ssl(self):\n\t\tif 'with_openssl' in self.configure_options:\n\t\t\treturn True\n\t\t# Parameterized form in newer versions.\n\t\tfor x in self.configure_options:\n\t\t\tif 'with_ssl' in x:\n\t\t\t\treturn True\n\t\treturn False",
"def get_http_protocol(self, tls_parameter='tls_enabled'):\n protocol = 'http'\n if self.helpers.get_plugin_setting(self.settings.name, tls_parameter):\n protocol = 'https'\n return protocol"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
One or more `additional_certificate` blocks as defined below. | def additional_certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ProxyLBCertificateAdditionalCertificateArgs']]]]:
return pulumi.get(self, "additional_certificates") | [
"def additional_certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ProxyLBACMECertificateAdditionalCertificateArgs']]]]:\n return pulumi.get(self, \"additional_certificates\")",
"def test_extraChainFilesAreAddedIfSupplied(self):\n opts = sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n extraCertChain=self.extraCertChain,\n )\n opts._contextFactory = FakeContext\n ctx = opts.getContext()\n self.assertEqual(self.sKey, ctx._privateKey)\n self.assertEqual(self.sCert, ctx._certificate)\n self.assertEqual(self.extraCertChain, ctx._extraCertChain)",
"def add_certificate(certificate):\n new_certificate = Certificates(\n title=certificate['title'],\n description=certificate['description'],\n url=certificate['url'],\n image=certificate['image']\n )\n session.add(new_certificate)\n session.commit()\n return new_certificate",
"def test_extraChainDoesNotBreakPyOpenSSL(self):\n opts = sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n extraCertChain=self.extraCertChain,\n )\n ctx = opts.getContext()\n self.assertIsInstance(ctx, SSL.Context)",
"def load_certificate():\n db_uuid = read_file_first_line('odoo-db-uuid.conf')\n enterprise_code = read_file_first_line('odoo-enterprise-code.conf')\n if db_uuid and enterprise_code:\n url = 'https://www.odoo.com/odoo-enterprise/iot/x509'\n data = {\n 'params': {\n 'db_uuid': db_uuid,\n 'enterprise_code': enterprise_code\n }\n }\n urllib3.disable_warnings()\n http = urllib3.PoolManager(cert_reqs='CERT_NONE')\n response = http.request(\n 'POST',\n url,\n body = json.dumps(data).encode('utf8'),\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n )\n result = json.loads(response.data.decode('utf8'))['result']\n if result:\n write_file('odoo-subject.conf', result['subject_cn'])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,rw\", \"/\"])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,rw\", \"/root_bypass_ramdisks/\"])\n Path('/etc/ssl/certs/nginx-cert.crt').write_text(result['x509_pem'])\n Path('/root_bypass_ramdisks/etc/ssl/certs/nginx-cert.crt').write_text(result['x509_pem'])\n Path('/etc/ssl/private/nginx-cert.key').write_text(result['private_key_pem'])\n Path('/root_bypass_ramdisks/etc/ssl/private/nginx-cert.key').write_text(result['private_key_pem'])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,ro\", \"/\"])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,ro\", \"/root_bypass_ramdisks/\"])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,rw\", \"/root_bypass_ramdisks/etc/cups\"])\n subprocess.check_call([\"sudo\", \"service\", \"nginx\", \"restart\"])",
"def _addCACertsToContext(context):",
"def _get_certificates_arguments(\n self, ssl_cert_key, ssl_cert_crt, ssl_cert_generate):\n section = self._config[self._config_section]\n\n # Private key\n ssl_cert_key = ssl_cert_key or section['ssl_cert_key']\n\n # Public certificate\n if ssl_cert_crt is not False:\n ssl_cert_crt = ssl_cert_crt or section.get_literal('ssl_cert_crt')\n\n # Generated certificate\n ssl_cert_generate = (\n ssl_cert_generate or section.get_literal('ssl_cert_generate')\n or False)\n\n return ssl_cert_key, ssl_cert_crt, ssl_cert_generate",
"def install_certificate():\n stream = open(\"/bootflash/poap_device_recipe.yaml\", 'r')\n dictionary = yaml.load(stream)\n config_file_second = open(os.path.join(\"/bootflash\", options[\"split_config_second\"]), \"a+\")\n \n if (\"Trustpoint\" in dictionary):\n for ca in dictionary[\"Trustpoint\"].keys():\n ca_apply = 0\n for tp_cert, crypto_pass in dictionary[\"Trustpoint\"][ca].items():\n tp_cert = tp_cert.strip()\n file = tp_cert.split('/')[-1]\n if (file.endswith(\".p12\") or file.endswith(\".pfx\")):\n poap_log(\"Installing certificate file. %s\" % file)\n if (ca_apply == 0):\n config_file_second.write(\"crypto ca trustpoint %s\\n\" % ca)\n ca_apply = 1\n config_file_second.write(\"crypto ca import %s pkcs12 bootflash:poap_files/%s/%s %s\\n\" % (ca, ca, file, crypto_pass))\n poap_log(\"Installed certificate %s succesfully\" % file)",
"def test_constructorSetsExtraChain(self):\n opts = sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n extraCertChain=self.extraCertChain,\n )\n self.assertEqual(self.extraCertChain, opts.extraCertChain)",
"def build_cert_chain(self,certificate):\r\n\t\tchain = []\r\n\t\tlast = None\r\n\t\tlogger.info(\"Starting to build trusting chain..\")\r\n\t\t\r\n\t\twhile True:\r\n\t\t\tif last == certificate:\r\n\t\t\t\tself.trusting_chain = []\r\n\t\t\t\treturn\r\n\t\t\tlast = certificate\r\n\t\t\t\r\n\t\t\tchain.append(certificate)\r\n\t\t\tissuer = certificate.issuer.rfc4514_string()\r\n\t\t\tsubject = certificate.subject.rfc4514_string()\r\n\t\t\t\r\n\t\t\tif issuer == subject and issuer in self.issuers_certs:\r\n\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\tif issuer in self.issuers_certs:\r\n\t\t\t\tcertificate = self.issuers_certs[issuer]\r\n\t\tlogger.info(\"Chain Built with success\")\r\n\t\tself.trusting_chain = chain",
"def additional_extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['CertificateConfigX509ConfigAdditionalExtensionArgs']]]]:\n return pulumi.get(self, \"additional_extensions\")",
"def _add_encoded(console, certificates):\n for cert_dict in certificates:\n cert = console.certificates.list(\n filter_args={'name': cert_dict['name']})[0]\n cert_dict.update(cert.get_encoded())",
"def raw_certificate(self, raw_certificate):\n\n self._raw_certificate = raw_certificate",
"def tbs_certificate_bytes(self):",
"def additional_extensions(self) -> Optional[Sequence['outputs.AuthorityConfigX509ConfigAdditionalExtension']]:\n return pulumi.get(self, \"additional_extensions\")",
"def test_signed_xip_multiple_certificates_invalid_input(data_dir):\n # indexed certificate is not specified\n der_file_names = ['selfsign_4096_v3.der.crt', 'selfsign_3072_v3.der.crt', 'selfsign_2048_v3.der.crt']\n with pytest.raises(IndexError):\n certificate_block(data_dir, der_file_names, 3)\n\n # indexed certificate is not specified\n der_file_names = ['selfsign_4096_v3.der.crt', None, 'selfsign_3072_v3.der.crt', 'selfsign_2048_v3.der.crt']\n with pytest.raises(ValueError):\n certificate_block(data_dir, der_file_names, 1)\n\n # public key in certificate and private key does not match\n der_file_names = ['selfsign_4096_v3.der.crt']\n cert_block = certificate_block(data_dir, der_file_names, 0)\n priv_key_pem_data = _load_private_key(data_dir, 'selfsign_privatekey_rsa2048.pem')\n with pytest.raises(ValueError):\n MasterBootImage(app=bytes(range(128)), load_addr=0, image_type=MasterBootImageType.SIGNED_XIP_IMAGE,\n trust_zone=TrustZone.disabled(),\n cert_block=cert_block, priv_key_pem_data=priv_key_pem_data).export()\n\n # chain of certificates does not match\n der_file_names = ['selfsign_4096_v3.der.crt']\n chain_certificates = ['ch3_crt2_v3.der.crt']\n with pytest.raises(ValueError):\n certificate_block(data_dir, der_file_names, 0, chain_certificates)",
"def test_list_certificate_signing_request(self):\n pass",
"def gen_cert_block(data_dir, sign_bits) -> CertBlockV2:\n with open(os.path.join(data_dir, 'sb2_x', 'selfsign_' + str(sign_bits) + '_v3.der.crt'), 'rb') as f:\n cert_data = f.read()\n\n cert_obj = Certificate(cert_data)\n root_key = cert_obj.public_key_hash\n\n cb = CertBlockV2()\n cb.set_root_key_hash(0, root_key)\n cb.add_certificate(cert_data)\n return cb",
"def make_server_leaf_certificate_dict(self, leaf_certificate):\n ret = []\n for cert in leaf_certificate:\n ret.append({\n 'name': cert['aliasName'],\n 'type': 'CertificateInfoV2',\n 'certificateDetails':\n [\n {\n 'base64Data': cert['base64Data'],\n 'aliasName': cert['aliasName'],\n 'type': 'CertificateDetailV2'\n },\n ]\n })\n return ret"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The intermediate certificate for a server. | def intermediate_cert(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "intermediate_cert") | [
"def get_ssl_server_certificate():\n return get_conf().get(_SSL_SERVER_CERTIFICATE)",
"def get_server_certificate(addr,ssl_version=PROTOCOL_SSLv3,ca_certs=None):\n\tpass",
"def cert(self):\n return self.just_get_me_a_certificate()",
"def get_cert(self):\n return self.cert",
"def vpn_get_server_cert_paths(self):\n vpn_base = os.path.join(self.get_ejbca_home(), 'vpn')\n ca = os.path.join(vpn_base, 'VPN_Server-CA.pem')\n crt = os.path.join(vpn_base, 'VPN_Server.pem')\n key = os.path.join(vpn_base, 'VPN_Server-key.pem')\n return ca, crt, key",
"def get_cert(self, conn_context: context.Context) -> certs.CertStoreEntry:\n altnames: list[str] = []\n organization: str | None = None\n\n # Use upstream certificate if available.\n if ctx.options.upstream_cert and conn_context.server.certificate_list:\n upstream_cert = conn_context.server.certificate_list[0]\n if upstream_cert.cn:\n altnames.append(upstream_cert.cn)\n altnames.extend(upstream_cert.altnames)\n if upstream_cert.organization:\n organization = upstream_cert.organization\n\n # Add SNI. If not available, try the server address as well.\n if conn_context.client.sni:\n altnames.append(conn_context.client.sni)\n elif conn_context.server.address:\n altnames.append(conn_context.server.address[0])\n\n # As a last resort, add our local IP address. This may be necessary for HTTPS Proxies which are addressed\n # via IP. Here we neither have an upstream cert, nor can an IP be included in the server name indication.\n if not altnames:\n altnames.append(conn_context.client.sockname[0])\n\n # only keep first occurrence of each hostname\n altnames = list(dict.fromkeys(altnames))\n\n # RFC 2818: If a subjectAltName extension of type dNSName is present, that MUST be used as the identity.\n # In other words, the Common Name is irrelevant then.\n return self.certstore.get_cert(altnames[0], altnames, organization)",
"def cert_url(self):\n return self._cert_url",
"def ssl_cert(self):\n return \"\"\"--ssl-cert=file_name\"\"\"",
"def _get_cert_path(self, replica_id, *, is_client):\n if self.config.use_unified_certs :\n return os.path.join(self.config.certs_path, str(replica_id), \"node.cert\")\n\n cert_type = \"client\" if is_client else \"server\"\n return os.path.join(self.config.certs_path, str(replica_id), cert_type, cert_type + \".cert\")",
"def update_intermediate_ca_certificate(self, context,\n root_ca_crt, sc_ca_cert, sc_ca_key):\n return self.call(context,\n self.make_msg('update_intermediate_ca_certificate',\n root_ca_crt=root_ca_crt,\n sc_ca_cert=sc_ca_cert,\n sc_ca_key=sc_ca_key))",
"def ssl_verify_server_cert(self):\n return \"\"\"--ssl-verify-server-cert\"\"\"",
"def requestCertificate(self):\n # Get Cert from the request's environment\n if \"CLIENT_RAW_CERT\" in request.environ:\n return request.environ[\"CLIENT_RAW_CERT\"]\n if \"SSL_CLIENT_CERT\" in request.environ:\n return request.environ[\"SSL_CLIENT_CERT\"]\n return None",
"def get_cluster_certificate_info(self, server_host, server_cert):\n cert_file_location = self.root_path + \"cert.pem\"\n if self.os_name == \"windows\":\n cert_file_location = Windows.TMP_PATH_RAW + \"cert.pem\"\n shell = RemoteMachineShellConnection(server_host)\n cmd = \"%s/couchbase-cli ssl-manage -c %s:8091 -u Administrator -p password \"\\\n \" --cluster-cert-info > %s\" % (self.cli_command_location,\n server_cert.ip,\n cert_file_location)\n output, _ = shell.execute_command(cmd)\n if output and \"Error\" in output[0]:\n self.fail(\"Failed to get CA certificate from cluster.\")\n shell.disconnect()\n return cert_file_location",
"def access_cert(self) -> str:\n return pulumi.get(self, \"access_cert\")",
"def get_issuer(self):\n issuer = self.cert.get_issuer()\n return issuer",
"def ssl_certificate(tmpdir):\n raise NotImplementedError",
"def certificate_path(self) -> Union[bytes, str, os.PathLike]:\n\n return self._certificate_path",
"def get_cert(clientprofile):\n global f5rest_url\n fullurl = f5rest_url + \"ltm/profile/client-ssl/\" + clientprofile.replace(\"/\", \"~\")\n return (get_f5json(fullurl))",
"def certs(path):\n import shutil\n from .frozen import resource_path\n\n cert_path = os.path.join(path, 'server.crt')\n shutil.copyfile(resource_path('ssl/server.crt'), cert_path)\n echo(cert_path)\n root_path = os.path.join(path, 'root.crt')\n shutil.copyfile(resource_path('ssl_root/root.crt'), root_path)\n echo(root_path)\n echo('Done!')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The intermediate certificate for a server. | def intermediate_cert(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "intermediate_cert") | [
"def get_ssl_server_certificate():\n return get_conf().get(_SSL_SERVER_CERTIFICATE)",
"def get_server_certificate(addr,ssl_version=PROTOCOL_SSLv3,ca_certs=None):\n\tpass",
"def cert(self):\n return self.just_get_me_a_certificate()",
"def get_cert(self):\n return self.cert",
"def vpn_get_server_cert_paths(self):\n vpn_base = os.path.join(self.get_ejbca_home(), 'vpn')\n ca = os.path.join(vpn_base, 'VPN_Server-CA.pem')\n crt = os.path.join(vpn_base, 'VPN_Server.pem')\n key = os.path.join(vpn_base, 'VPN_Server-key.pem')\n return ca, crt, key",
"def get_cert(self, conn_context: context.Context) -> certs.CertStoreEntry:\n altnames: list[str] = []\n organization: str | None = None\n\n # Use upstream certificate if available.\n if ctx.options.upstream_cert and conn_context.server.certificate_list:\n upstream_cert = conn_context.server.certificate_list[0]\n if upstream_cert.cn:\n altnames.append(upstream_cert.cn)\n altnames.extend(upstream_cert.altnames)\n if upstream_cert.organization:\n organization = upstream_cert.organization\n\n # Add SNI. If not available, try the server address as well.\n if conn_context.client.sni:\n altnames.append(conn_context.client.sni)\n elif conn_context.server.address:\n altnames.append(conn_context.server.address[0])\n\n # As a last resort, add our local IP address. This may be necessary for HTTPS Proxies which are addressed\n # via IP. Here we neither have an upstream cert, nor can an IP be included in the server name indication.\n if not altnames:\n altnames.append(conn_context.client.sockname[0])\n\n # only keep first occurrence of each hostname\n altnames = list(dict.fromkeys(altnames))\n\n # RFC 2818: If a subjectAltName extension of type dNSName is present, that MUST be used as the identity.\n # In other words, the Common Name is irrelevant then.\n return self.certstore.get_cert(altnames[0], altnames, organization)",
"def cert_url(self):\n return self._cert_url",
"def ssl_cert(self):\n return \"\"\"--ssl-cert=file_name\"\"\"",
"def _get_cert_path(self, replica_id, *, is_client):\n if self.config.use_unified_certs :\n return os.path.join(self.config.certs_path, str(replica_id), \"node.cert\")\n\n cert_type = \"client\" if is_client else \"server\"\n return os.path.join(self.config.certs_path, str(replica_id), cert_type, cert_type + \".cert\")",
"def update_intermediate_ca_certificate(self, context,\n root_ca_crt, sc_ca_cert, sc_ca_key):\n return self.call(context,\n self.make_msg('update_intermediate_ca_certificate',\n root_ca_crt=root_ca_crt,\n sc_ca_cert=sc_ca_cert,\n sc_ca_key=sc_ca_key))",
"def ssl_verify_server_cert(self):\n return \"\"\"--ssl-verify-server-cert\"\"\"",
"def requestCertificate(self):\n # Get Cert from the request's environment\n if \"CLIENT_RAW_CERT\" in request.environ:\n return request.environ[\"CLIENT_RAW_CERT\"]\n if \"SSL_CLIENT_CERT\" in request.environ:\n return request.environ[\"SSL_CLIENT_CERT\"]\n return None",
"def get_cluster_certificate_info(self, server_host, server_cert):\n cert_file_location = self.root_path + \"cert.pem\"\n if self.os_name == \"windows\":\n cert_file_location = Windows.TMP_PATH_RAW + \"cert.pem\"\n shell = RemoteMachineShellConnection(server_host)\n cmd = \"%s/couchbase-cli ssl-manage -c %s:8091 -u Administrator -p password \"\\\n \" --cluster-cert-info > %s\" % (self.cli_command_location,\n server_cert.ip,\n cert_file_location)\n output, _ = shell.execute_command(cmd)\n if output and \"Error\" in output[0]:\n self.fail(\"Failed to get CA certificate from cluster.\")\n shell.disconnect()\n return cert_file_location",
"def access_cert(self) -> str:\n return pulumi.get(self, \"access_cert\")",
"def get_issuer(self):\n issuer = self.cert.get_issuer()\n return issuer",
"def ssl_certificate(tmpdir):\n raise NotImplementedError",
"def certificate_path(self) -> Union[bytes, str, os.PathLike]:\n\n return self._certificate_path",
"def get_cert(clientprofile):\n global f5rest_url\n fullurl = f5rest_url + \"ltm/profile/client-ssl/\" + clientprofile.replace(\"/\", \"~\")\n return (get_f5json(fullurl))",
"def certs(path):\n import shutil\n from .frozen import resource_path\n\n cert_path = os.path.join(path, 'server.crt')\n shutil.copyfile(resource_path('ssl/server.crt'), cert_path)\n echo(cert_path)\n root_path = os.path.join(path, 'root.crt')\n shutil.copyfile(resource_path('ssl_root/root.crt'), root_path)\n echo(root_path)\n echo('Done!')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The flag to change partition uuid. | def change_partition_uuid(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "change_partition_uuid") | [
"def _ID_changed(self, new):\n if new[:7].lower() != \"cluster\":\n self.ID = \"cluster_%s\" % new",
"def __setUUID(self,uuid):\n uustr = str(uuid).lower()\n if map(len,uustr.split('-')) == [8, 4, 4, 4, 12]:\n for c in uustr:\n if c not in '0123456789-abcdef':\n raise ValueError(\"%r is not valid in UUID format\" % c)\n else:\n old = getattr(self,'uuid',None)\n if old is not None and old<>uustr:\n raise TypeError(\n \"Can't change UUID once set (was %s)\" % old\n )\n self._uuid = uustr\n return\n\n raise ValueError(\"%r is not a valid UUID\" % (uuid,))",
"def _SetUUID(self, sd, uuid_value):\n\n with open(sd, 'r+b') as f:\n f.seek(SD_CARD_UUID_OFFSET)\n f.write(struct.pack('i', uuid_value))",
"def _set_uuid(self, uuid):\n\n self.uuid = uuid\n\n file_path = config.get_config_value('server', 'outputpath')\n\n file_url = config.get_config_value('server', 'outputurl')\n\n self.status_location = os.path.join(file_path, str(self.uuid)) + '.xml'\n self.status_url = os.path.join(file_url, str(self.uuid)) + '.xml'",
"def set_uuid(self, uuid=None):\n self._uuid = uuid if uuid else generate_uuid()",
"def update_uuid(node_name):\n\n ids = list()\n for attr in maya.cmds.ls('*.uuid'):\n node_id = maya.cmds.getAttr(attr)\n ids.append(node_id)\n\n uuid_attr = node_name + '.uuid'\n if not maya.cmds.objExists(uuid_attr):\n maya.cmds.addAttr(node_name, longName='uuid', dataType='string')\n new_id = str(uuid.uuid4())\n ids.append(new_id)\n else:\n existing_id = maya.cmds.getAttr(uuid_attr)\n if existing_id not in ids:\n ids.append(existing_id)\n return\n new_id = str(uuid.uuid4())\n maya.cmds.setAttr(uuid_attr, new_id, type='string')",
"def partition_name(self):\n return OPENSTACK_PREFIX + \"-\" + CONF.host + \"-\" + self.instance.uuid",
"def _config_set_reboot_required(config_uuid):\n uuid_str = str(config_uuid)\n uuid_int = int(uuid.UUID(uuid_str)) | constants.CONFIG_REBOOT_REQUIRED\n return str(uuid.UUID(int=uuid_int))",
"def get_uuid(self): # real signature unknown; restored from __doc__\n return \"\"",
"def test_partuuid_update(test_microvm_with_api):\n test_microvm = test_microvm_with_api\n test_microvm.spawn()\n\n # Set up the microVM with 1 vCPUs, 256 MiB of RAM\n test_microvm.basic_config(vcpu_count=1, add_root_device=False)\n test_microvm.add_net_iface()\n\n # Add the root block device specified through PARTUUID.\n test_microvm.add_drive(\n \"rootfs\", test_microvm.rootfs_file, is_root_device=True, partuuid=\"0eaa91a0-01\"\n )\n\n # Update the root block device to boot from /dev/vda.\n test_microvm.add_drive(\n \"rootfs\",\n test_microvm.rootfs_file,\n is_root_device=True,\n )\n\n test_microvm.start()\n\n # Assert that the final booting method is from /dev/vda.\n assert_dict = {}\n keys_array = [\"1-0\", \"1-6\"]\n assert_dict[keys_array[0]] = \"rw\"\n assert_dict[keys_array[1]] = \"/dev/vda\"\n _check_drives(test_microvm, assert_dict, keys_array)",
"def _gen_uuid(self):\r\n return uuid.uuid4().hex",
"def mark_unique(event_name: str, uuid: int, system: str = \"default\") -> None:\n _mark_unique(event_name, uuid, system, value=1)",
"def svn_fs_set_uuid(*args) -> \"svn_error_t *\":\n return _fs.svn_fs_set_uuid(*args)",
"def __generateuuid(self):\n uuid = virtinst.util.uuidToString(virtinst.util.randomUUID())\n\n return uuid",
"def ftduino_id_set(self, identifier):\n self.comm('ftduino_id_set {0}'.format(identifier))",
"def set_generate_ID(self, value):\n self.__generate_new_IDS = value",
"def setPart(self, partname: 'SbName', arg3: 'SoNode') -> \"SbBool\":\n return _coin.SoBaseKit_setPart(self, partname, arg3)",
"def setPart(self, partname: 'SbName', arg3: 'SoNode') -> \"SbBool\":\n return _coin.SoInteractionKit_setPart(self, partname, arg3)",
"def task_uuid_not(self, task_uuid_not):\n\n self._task_uuid_not = task_uuid_not"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The flag to disable password authentication. | def disable_pw_auth(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "disable_pw_auth") | [
"def disable_password_authentication(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"disable_password_authentication\")",
"def password_auth_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"password_auth_enabled\")",
"def ssh_disable_passwd():\n with settings(hide('running', 'user'), warn_only=True):\n sudo('echo PasswordAuthentication no >> /etc/ssh/sshd_config')\n sudo('service ssh restart')",
"def no_password(self):\n return \"password\" not in self.password.lower()",
"def without_password(self):\n return self.with_password('')",
"def password_required(self):\n return self._password_required",
"def disable_password_reveal(audit_options):\n assert LOCAL_SETTINGS.get('DISABLE_PASSWORD_REVEAL'), \\\n \"DISABLE_PASSWORD_REVEAL should be set to True\"",
"def force_password_change(self):\n return self._force_password_change",
"def password(self):\n return \"\"\"--password[=password]\"\"\"",
"def password(self) -> Optional[str]:\n return pulumi.get(self, \"password\")",
"def force_password_change(self, value):\n self._force_password_change = bool(value)",
"def disable(self, user: User):\n user.user.set_unusable_password()\n user.save()",
"def password(self, password):\n self._configuration.password = password",
"def enable_dummy_auth(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_dummy_auth\")",
"def fault_tolerance_password(self):\n ret = self._get_attr(\"faultTolerancePassword\")\n return ret",
"def test_set_password_mode(self):\n self.server_widget.password_mode = 'silent'\n assert self.client_widget.password_mode == self.server_widget.password_mode",
"def shared_password_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"shared_password_enabled\")",
"def unset_auth(self, **kwargs):\n return",
"def disable_password_autocomplete(audit_options):\n assert not LOCAL_SETTINGS.get('PASSWORD_AUTOCOMPLETE'), \\\n \"PASSWORD_AUTOCOMPLETE should be set to False\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The flag to enable DHCP client. | def enable_dhcp(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "enable_dhcp") | [
"def enable_dhcp(self, ip_host_num):\n return [\"ip-host %s dhcp-enable true ping-response true traceroute-response true\" % ip_host_num]",
"def dhcp_enabled(self):\n ret = self._get_attr(\"DHCPEnabled\")\n return ret",
"def enableDHCPClick():\n os.system(\"mount -o rw,remount /\")\n os.system(\"cp netctl/ethernet-dhcp /etc/netctl/eth0\")\n os.system(\"mount -o ro,remount /\")\n lcdPrint(\"Obtaining IP...\")\n lcd.setCursor(15,0)\n lcd.ToggleBlink()\n os.system(\"ip link set eth0 down\")\n os.system(\"netctl restart eth0\")\n ip = socket.gethostbyname(socket.getfqdn())\n lcd.ToggleBlink()\n lcdPrint(\"Enabled DHCP:\\n\"+ip, 2)",
"def need_dhcp_server(self):\n ret = self._get_attr(\"needDhcpServer\")\n return ret",
"def enable(self):\n interface_name = self.device_delegate.setup(self.network,\n reuse_existing=True)\n if self.active:\n self.restart()\n elif self._enable_dhcp():\n self.interface_name = interface_name\n self.spawn_process()",
"def _vmware_dhcp_ip_config(self, args: parser_extensions.Namespace):\n kwargs = {\n 'enabled': flags.Get(args, 'enable_dhcp'),\n }\n if flags.IsSet(kwargs):\n return messages.VmwareDhcpIpConfig(**kwargs)\n return None",
"def SetDHCPClient(self, client):\n print \"Setting dhcp client to %i\" % (int(client))\n self.dhcp_client = int(client)\n self.wifi.dhcp_client = int(client)\n self.wired.dhcp_client = int(client)\n self.config.set(\"Settings\", \"dhcp_client\", client, write=True)",
"def _set_dhcp_required(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"dhcp-required\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"dhcp_required must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"dhcp-required\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__dhcp_required = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_dhcp_required(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"dhcp-required\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"dhcp_required must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"dhcp-required\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__dhcp_required = t\n if hasattr(self, '_set'):\n self._set()",
"def enableConnection(self, flag: 'SbBool') -> \"void\":\n return _coin.SoField_enableConnection(self, flag)",
"def set_dhcp_server(self, config):\n self.navigate_to(self.CONFIGURE, self.CONFIGURE_SYSTEM, 2)\n if not self.s.is_element_present(self.info['loc_cfg_system_dhcps_fieldset']):\n raise Exception('The DHCP Server configurate field is invisible')\n\n else:\n enable_checkbox = self.info['loc_cfg_system_dhcps_enable_checkbox']\n enable_server = False if not config.has_key('enable') else config['enable']\n\n if not enable_server:\n # Uncheck the enable DHCPs server checkbox\n if self.s.is_checked(enable_checkbox):\n self.s.click_and_wait(enable_checkbox)\n\n # Click apply button\n self.s.click_and_wait(self.info['loc_cfg_system_dhcps_apply_button'], 3)\n\n return\n\n # Check the enable DHCPs server checkbox\n if not self.s.is_checked(enable_checkbox):\n self.s.click_and_wait(enable_checkbox)\n\n # Set starting ip value\n if config.has_key('start_ip'):\n self.s.type_text(self.info['loc_cfg_system_dhcps_starting_ip_textbox'], config['start_ip'])\n\n # Set ip range value\n if config.has_key('number_ip'):\n self.s.type_text(self.info['loc_cfg_system_dhcps_number_ip_textbox'], str(config['number_ip']))\n\n # Set lease time value\n if config.has_key('leasetime'):\n self.s.select_option(self.info['loc_cfg_system_dhcps_leasetime_options'], config['leasetime'])\n\n # Click 'Cancel' on the confirmation dialog to ZD do nothing if not ZD will auto correct the setting value.\n self.s.choose_cancel_on_next_confirmation()\n # Click apply button\n self.s.click_and_wait(self.info['loc_cfg_system_dhcps_apply_button'], 3)\n msg = ''\n # The ZD will be genarate an alert or an confirm dialog if there are any invalid or wrong setting value is setted.\n # Get any exist alert message\n if self.s.is_alert_present(5):\n msg = self.s.get_alert()\n\n # Get any confirmation message\n elif self.s.is_confirmation_present(5):\n msg = self.s.get_confirmation()\n\n if msg:\n raise Exception(msg)",
"def enable_adapter(self):\n return self.do_cmd(\"enable_adapter\")",
"def associate_dhcp_options(DryRun=None, DhcpOptionsId=None, VpcId=None):\n pass",
"def enable_di(self, value):\n self._set_property('enable_di', value)",
"def enableNotify(self, flag: 'SbBool const') -> \"SbBool\":\n return _coin.SoFieldContainer_enableNotify(self, flag)",
"def enable_ultra_ssd(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"enable_ultra_ssd\")",
"def preserve_client_ip_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"preserve_client_ip_enabled\")",
"def preserve_client_ip_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"preserve_client_ip_enabled\")",
"def create_dhcp_options(DryRun=None, DhcpConfigurations=None):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A list of the Note id. | def note_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "note_ids") | [
"def get_ids():",
"def getToonIdsAsList(self):\n return self.toonIds",
"def track_id_list(self) -> List[int]:\n _track_id_list: List[int] = np.unique(self.seq_df[\"TRACK_ID\"].values).tolist()\n return _track_id_list",
"def ids(self):\n names_ = self.metadata()\n return [plasma.ObjectID(x[\"value_id\"]) for x in names_.values()]",
"def identifiers(self):\n return [seq.identifier for seq in self]",
"def get_id_list(self, instance_list):\n id_list = []\n for i in instance_list:\n id_list.append(i.id_number if i else None)\n return id_list",
"def id_list(self):\n return numpy.array(self.spiketrains.keys(), int)",
"def mbr_identifiers(self):\n return []",
"def get_answer_ids(self):\n return # osid.id.IdList",
"def ids(self) -> ConfigNodePropertyArray:\n return self._ids",
"def get_known_artist_ids(self) -> List[str]:\n\n q = {}\n cols = {\"_id\": 1}\n r = list(self._artists.find(q, cols))\n\n return [x[\"_id\"] for x in r]",
"def getIdList(self, attribute_list): \n counter = 0 \n IdList = []\n for item in attribute_list: \n if (counter % 2) == 0: \n IdList.append(item[1]) \n counter += 1 \n return IdList",
"def job_ids(self):\n return [elem[\"id\"] for elem in self.all()]",
"def _get_ids(self, query):\n return [getattr(elm, 'id') for elm in query]",
"def list_all_ids(self):\n values = []\n list = self.redis_server.hkeys(self.actinia_template_id_db)\n for entry in list:\n entry = entry.decode()\n values.append(entry)\n\n return values",
"def allergen_get_id_list(allergens):\n allergen_id_list = []\n for allergen in allergens:\n allergen_id_list.append(allergen[\"_id\"])\n return allergen_id_list",
"def get_ids(self):\n return self.multiengine.get_ids()",
"def listMatchids(self):\n idlist = list()\n for key, matches in self.matches.items():\n for match in matches:\n idlist.append(match.matchedword.dbid)\n self.idlist = tuple(idlist)\n return self.idlist",
"def http_get_patch_id_list(self):\n obj = self.http_get_json(\n '%s%s' % (self._url, self.PATCHES_ENDPOINT),\n verify=False,\n auth=(self._user, self._password),\n headers=self.HTTP_HEADER_ACCEPT_JSON)\n titles = obj['patch_reporting_software_titles']\n return [record['id'] for record in titles]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A list of the SSHKey id. | def ssh_key_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "ssh_key_ids") | [
"def ex_list_ssh_keys(self):\r\n data = self.connection.request('/ssh_keys').object['ssh_keys']\r\n return list(map(self._to_ssh_key, data))",
"def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ssh_keys\")",
"def keys(self):\n c = self.get_cxn().cursor()\n c.execute('SELECT session_id FROM user_sessions')\n return [ id for (id,) in c.fetchall() ]",
"def getSSHKeys(self):\n\n file = open(self._ssh_keys_path, \"r\")\n key = file.read()\n return key",
"def id_list(self):\n return numpy.array(self.spiketrains.keys(), int)",
"def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineOsProfileLinuxConfigSshKeyArgs']]]]:\n return pulumi.get(self, \"ssh_keys\")",
"def topology_keys(self) -> typing.List[str]:\n return typing.cast(\n typing.List[str],\n self._properties.get(\"topologyKeys\"),\n )",
"def list_all_ids(self):\n values = []\n list = self.redis_server.hkeys(self.actinia_template_id_db)\n for entry in list:\n entry = entry.decode()\n values.append(entry)\n\n return values",
"def get_keys(self)->list:\n return list(self._config_contents.keys())",
"def getMessageKeyList(self):\n pass",
"def _list_stats_keys() -> list:\n key_list = []\n stmt = sqlalchemy.select([_STATS_TABLE.c.key.distinct()]).select_from(_STATS_TABLE)\n result = __query_execution(stmt)\n\n result = result.fetchall()\n for i in range(len(result)):\n key_list.append(str(result[i][0]).strip())\n\n return key_list",
"def get_keys(self):\n keys = self.key.split(',')\n keys = [k.strip() for k in keys]\n \n return keys",
"def _issuer_ids(self) -> List[str]:\n return list(self._issuers.keys())",
"def get_all_keys(self) -> List:\r\n key_list = []\r\n for i in self.hash_table:\r\n if i is not None:\r\n key_list.append(i[0])\r\n return key_list",
"def keys(self):\n\n return [c.id for c in self.comments]",
"def key(self):\n return (self.getManageIp(), self.name())",
"def get_all_sshkeys(self):\n self.mock_data = \"keys/all.json\"\n data = self.get_data(\"account/keys/\")\n ssh_keys = list()\n for jsoned in data['ssh_keys']:\n ssh_key = SSHKey(**jsoned)\n ssh_key.token = self.token\n ssh_key.mocked = self.mocked\n ssh_keys.append(ssh_key)\n return ssh_keys",
"def get_keys(self):\n key_list = []\n [key_list.append(x) for x in self.__settings]\n return key_list",
"def keys(self):\n return [item.key for item in self.storage.items()]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A list of the SSHKey text. | def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "ssh_keys") | [
"def ex_list_ssh_keys(self):\r\n data = self.connection.request('/ssh_keys').object['ssh_keys']\r\n return list(map(self._to_ssh_key, data))",
"def getSSHKeys(self):\n\n file = open(self._ssh_keys_path, \"r\")\n key = file.read()\n return key",
"def ssh_key_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ssh_key_ids\")",
"def ssh_keys(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineOsProfileLinuxConfigSshKeyArgs']]]]:\n return pulumi.get(self, \"ssh_keys\")",
"def get_keys() -> List[Tuple[str, str]]:\n with authorized_keys() as ak:\n return [\n (hashlib.new(\"md5\", line.encode()).hexdigest(), line)\n for line in ak.read().split(\"\\n\")\n if line.strip()\n ]",
"def get_all_sshkeys(self):\n self.mock_data = \"keys/all.json\"\n data = self.get_data(\"account/keys/\")\n ssh_keys = list()\n for jsoned in data['ssh_keys']:\n ssh_key = SSHKey(**jsoned)\n ssh_key.token = self.token\n ssh_key.mocked = self.mocked\n ssh_keys.append(ssh_key)\n return ssh_keys",
"def getMessageKeyList(self):\n pass",
"def list_keys(ctx, private):\n keys = ctx.parent.gpg.list_keys(private)\n\n length = len(keys)\n logging.info(f\"{length} {'public' if private is False else 'private'} keys exist.\")\n if not length:\n ctx.exit(1)\n\n click.secho(\"Current key is:\")\n click.secho(keys.curkey.get(\"fingerprint\"))\n click.secho(\"All keys are:\")\n for key, value in keys.key_map.items():\n click.secho(value.get(\"fingerprint\"))\n\n logging.info(\"List keys finished.\")",
"def test_user_current_list_gpg_keys(self):\n pass",
"def test_user_list_gpg_keys(self):\n pass",
"def get_keys(self)->list:\n return list(self._config_contents.keys())",
"def user_keys(self, user):\n if user not in self.ssh_keys:\n return []\n return self.ssh_keys[user]",
"def get_keys(self):\n keys = self.key.split(',')\n keys = [k.strip() for k in keys]\n \n return keys",
"def __str__(self):\n s_list = list()\n StringFormat.line(s_list)\n s_list.append(\"HSS private key\")\n StringFormat.format_hex(s_list, \"levels\", u32str(self.levels))\n for prv in self.pvt_keys:\n s_list.append(str(prv))\n StringFormat.line(s_list)\n return \"\\n\".join(s_list)",
"def get_keys(self):\n conn = sqlite3.connect(WALLET_KEYS)\n c = conn.cursor()\n query = (self.email, )\n c.execute('SELECT wallet_key FROM wallet_keys WHERE email=?', query)\n results = c.fetchall()\n wallet_keys = []\n for result in results:\n if result:\n wallet_keys.append(result[0])\n conn.close()\n return wallet_keys\n # return ['xprv9s21ZrQH143K2cZXLUxwnVuc1Yt5uXEXGqP1xbei7...\n # rXEooe26rcf91gC7yMhFfGuBXHu5rwoXtf69fd2GCPHNY6cE5MFcbVAizwQ2vxoNDx']",
"def parse_ssh_keys(self):\n for key in self.keys:\n entries = key.split(' ')\n if len(entries) != 3:\n continue\n if entries[0] != 'ssh-rsa':\n continue\n\n user = entries[2].split('@')\n self.add_key(entries[1], user[0])",
"def _ipython_key_completions_(self) -> list[str]:\n items = {\n item\n for source in self._item_sources\n for item in source\n if isinstance(item, str)\n }\n return list(items)",
"def signing_keys(*args):\n output = GPG(\"--list-secret-keys\", \"--with-colons\", \"--fingerprint\", *args, output=str)\n return _parse_secret_keys_output(output)",
"def getLicensesKey():\n entries = license_description.objects.values(\"text_key\")\n return_list = []\n for entry in entries:\n return_list.append(entry[\"text_key\"])\n\n return return_list"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The id of the API key to be injected into the Note/StartupScript when editing the disk. | def api_key_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "api_key_id") | [
"def api_key_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"api_key_id\")",
"def get_api_key(self):\n key_data = os.path.join(os.path.dirname(os.path.dirname\n (os.path.abspath(__file__))), \"json\", \"key.json\")\n if not os.path.exists(key_data):\n print(\n '''Api Key file does not exist. Please refer to readme to add key and restart program''')\n sys.exit(\"Thank You for Using MapThat\")\n with open(key_data) as json_file:\n data = json.load(json_file)\n self.api_key_1 = data[\"key\"]\n #loading the api key from json file. Due to security reasons, we store the key locally\n #our private machines as a json file which is ignored by github which making changes",
"def alt_api_key(self):\n return self.get_raw('alt_api_key')",
"async def identifier(request: Request) -> str:\n api_key = request.headers.get(\"X-API-Key\") or request.query_params.get(\"api_key\")\n return f\"{api_key}\"",
"def get_api_key():\n # Api Key for access\n f = open('api_key.txt', 'r')\n api_key = f.readline()\n f.close()\n\n #print(\"Our API Key is: \" + str(api_key))\n return str(api_key)",
"def get_api_key():\n\twith open('key.txt', 'r') as file:\n\t\tapi_key = file.readline()\n\n\treturn api_key",
"def set_api_key(self, api_key):\n self.settings[\"api_key\"] = api_key\n self.filehandler.dict_to_file(self.settings, self.settings_path)\n print(\"NOAA API key saved.\")",
"def storeapi(ctx):\n logging.info('Setting API Key File')\n api_key_file = ctx.obj['api_key_file']\n\n api_key = click.prompt(\n \"Please enter your API key\",\n default=ctx.obj.get('api_key', '')\n )\n\n with open(api_key_file, 'w') as cfg:\n cfg.write(api_key)",
"def get_apikey(self):\n return self.apikeys[self.apikey_index]",
"def get_saved_secretkey(api_key):\n api_key = int(api_key)\n key_def = key_cache.get(api_key)\n if not key_def:\n key_def = read_cloudauth(api_key)\n return key_def['api_secretkey']",
"def primary_secret_key(self) -> str:\n return pulumi.get(self, \"primary_secret_key\")",
"def Get_Key(apig,key_id: str,include_value=False):\n\t\t\t\treturn apig.client.get_api_key(apiKey=key_id,includeValue=include_value)",
"def zap_api_key(self):\n apikey = None\n\n try:\n with open(self.setting_file, 'r+') as f:\n data = json.load(f)\n load_api_key = data['zap_api_key']\n apikey = signing.loads(load_api_key)\n except Exception as e:\n print e\n\n return apikey",
"def id(self):\n\n from mbed_cloud.foundation._custom_methods import pre_shared_key_id_getter\n\n return pre_shared_key_id_getter(self=self)",
"def load_api_key(api_source):\n # create directory if missing\n os.makedirs(outputpath + '/API_Keys', exist_ok=True)\n # key path\n keyfile = outputpath + '/API_Keys/' + api_source + '_API_KEY.txt'\n key = \"\"\n try:\n with open(keyfile, mode='r') as keyfilecontents:\n key = keyfilecontents.read()\n except IOError:\n log.error(\"Key file not found in 'API_Keys' directory. See github wiki for help\"\n \"https://github.com/USEPA/flowsa/wiki/GitHub-Contributors#api-keys\")\n return key",
"def set_api_key(self, key):\n self.api_key = key",
"def get_key(file='api_key.dict', key='ElsevierDeveloper'):\n return eval(open(file, 'r').read())[key]",
"def get_key():\n config = configparser.ConfigParser()\n config.read(\"key.ini\")\n api_key = config[\"KEY\"][\"api_key\"]\n return api_key",
"def get_identifier(self, request):\n client = self._get_client(request, request.GET.get('api_key'))\n if client:\n return client.api_key\n return \"%s_%s\" % (request.META.get('REMOTE_ADDR', 'noaddr'), request.META.get('REMOTE_HOST', 'nohost'))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The id of the packet filter to attach to the network interface. | def packet_filter_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "packet_filter_id") | [
"def UUID(self):\n ret = libvirtmod.virNWFilterGetUUID(self._o)\n if ret is None: raise libvirtError ('virNWFilterGetUUID() failed')\n return ret",
"def id(self):\n if not self._id:\n self._id = self._layer.GetLayerId()\n return self._id",
"def vlan_iface_id(self) -> int:\n return self._vlan_iface_id",
"def uniqueId( self ):\r\n\t\treturn mxs.blurUtil.uniqueId( self._nativePointer.layerAsRefTarg )",
"def source_id(self):\n return self._can_id & 0xf",
"def _pipe_identifier_for_packet(self, packet):\n\n # FIXME: this should have a bus-ID-alike\n return (packet.device_address,)",
"def aipid(self):\n return self.fields[0]",
"def _get_encap_id(self):\n return self.__encap_id",
"def wip_id(self) -> str:\n return self._wip_id",
"def unique_id(self):\n return _wavelet_swig.wavelet_ff_sptr_unique_id(self)",
"def unique_id(self):\n return _wmbus_swig.wmbus_packet_sink_sptr_unique_id(self)",
"def layer_uid(layer):\n return str(id(layer))",
"def pkt_to_id(pkt):\n return f\"{pkt.packettype:x}-{pkt.subtype:x}-{pkt.id_string}\"",
"def network_binding_host_id(self, context, instance):\n# LOG.error(\"network_binding_host_id( context=%s, instance=%s)\", context, instance)\n return instance.get('host')",
"def select_id_str(self):\n conn = self.csm.context.pywbem_server.conn\n filter_inst = conn.GetInstance(self.instance.path['Filter'])\n dest_inst = conn.GetInstance(self.instance.path['Handler'])\n\n # Get filter and destination select_id_str strings\n filterinst = IndicationFilter(self.csm, filter_inst)\n filter_str = filterinst.select_id_str()\n\n destinst = IndicationDestination(self.csm, dest_inst)\n dest_str = destinst.select_id_str()\n\n return '{0} {1} {2}'.format(self._owned_flag, dest_str, filter_str)",
"def fmi_id(self) -> int:\n return self._fmi_id",
"def unique_id(self):\n return \"{}.{}\".format(self.__class__, self.wink.deviceId())",
"def unique_id(self):\n return _OFDM_Cyclic_Prefix_swig.vamsi_OFDMCP_ff_sptr_unique_id(self)",
"def unique_id(self):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr_unique_id(self)",
"def device_id(self):\n return self.id"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The SNMP community string used when checking by SNMP. | def community(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "community") | [
"def get_snmp_information(self):\n snmp_information = {}\n\n snmp_config = junos_views.junos_snmp_config_table(self.device)\n snmp_config.get(options=self.junos_config_options)\n snmp_items = snmp_config.items()\n\n if not snmp_items:\n return snmp_information\n\n snmp_information = {\n str(ele[0]): ele[1] if ele[1] else \"\" for ele in snmp_items[0][1]\n }\n\n snmp_information[\"community\"] = {}\n communities_table = snmp_information.pop(\"communities_table\")\n if not communities_table:\n return snmp_information\n\n for community in communities_table.items():\n community_name = str(community[0])\n community_details = {\"acl\": \"\"}\n community_details.update(\n {\n str(ele[0]): str(\n ele[1]\n if ele[0] != \"mode\"\n else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1])\n )\n for ele in community[1]\n }\n )\n snmp_information[\"community\"][community_name] = community_details\n\n return snmp_information",
"def communeName():",
"def get_snmp_information(self):\n\n raw_show_sys = self._send_command(\"show system\")\n raw_show_snmp = self._send_command(\"show snmp\")\n\n show_sys = textfsm_extractor(self, \"show_system-basic\", raw_show_sys)\n show_snmp_basic = textfsm_extractor(self, \"show_snmp-basic\", raw_show_snmp)\n show_snmp_communities = textfsm_extractor(\n self, \"show_snmp-communities\", raw_show_snmp\n )\n snmp_info = {\n # Dell OS6 doesn't support setting the chassis ID, it's derived from the hostname\n \"chassis_id\": show_sys[0][\"sys_name\"],\n \"community\": {},\n \"contact\": show_snmp_basic[0][\"contact\"],\n \"location\": show_snmp_basic[0][\"location\"],\n }\n\n for entry in show_snmp_communities:\n community = entry[\"community\"]\n if entry[\"acl\"] == \"All\":\n acl = u\"N/A\"\n else:\n # Dell OS6 only supports direct host entries, no ACLs\n acl = entry[\"acl\"] + \"/32\"\n if entry[\"mode\"] == \"Read Only\":\n mode = u\"ro\"\n if entry[\"mode\"] == \"Read/Write\":\n mode = u\"rw\"\n snmp_info[\"community\"][community] = {\"acl\": acl, \"mode\": mode}\n\n return snmp_info",
"def studyoid(self):\n return \"%s(%s)\" % (self.project_name, self.environment_name)",
"def __str__(self):\n \n return '{0}!{1}@{2}'.format(self.nickname, self.ident, self.host)",
"def inCommunity(self):\n if self.getParentNode().meta_type == 'BitakoraCommunity':\n return 1\n return 0",
"def security_credential_str(ver):\n if ver == OTA_UPG_HDR_SEC_CRED_VER_SE_1_0:\n ver_str = \"SE 1.0\"\n elif ver == OTA_UPG_HDR_SEC_CRED_VER_SE_1_1:\n ver_str = \"SE 1.1\"\n elif ver == OTA_UPG_HDR_SEC_CRED_VER_SE_2_0:\n ver_str = \"SE 2.0\"\n else:\n ver_str = \"Unknown\"\n return ver_str",
"def community_assignment(self):\n assignment_dict = {}\n for node in self.node_dict:\n assignment_dict[node] = self.node_dict[node].community\n return assignment_dict",
"def list_communities(self, width=120):\n stats = [ ]\n stats.append(\"List of communities:\")\n singleton_cmtys = [ ]\n cmtynodes = self.cmtynodes()\n cmtys_by_rev_size = [c for (c,ns) in sorted(cmtynodes.iteritems(),\n reverse=True,\n key=lambda (c,ns):len(ns))]\n for c in cmtys_by_rev_size:\n cmtysize = len(cmtynodes[c])\n if cmtysize == 1:\n singleton_cmtys.append(c)\n continue\n stats.append(textwrap.fill(\n ((\"%3s: \"%c)+' '.join(str(n) for n in sorted(cmtynodes[c]))),\n width=width,\n initial_indent=\"\", subsequent_indent=\" \",\n ))\n stats.append(textwrap.fill(\n \"Nodes in singleton communities: \"+' '.join(\n str(next(iter(cmtynodes[c]))) for c in singleton_cmtys),\n width=width,\n initial_indent=\"\", subsequent_indent=\" \",\n ))\n return stats",
"def AlwaysIncludeTunnelEncExtCommunity(self):\n return self._get_attribute('alwaysIncludeTunnelEncExtCommunity')",
"def community_html(self):\n if self._community_html:\n return self._community_html\n else:\n self._community_html = request.get(self.community_url)\n return self._community_html",
"def omniSnmpStatus(self):\n status = -1\n try:\n status = self.netcool.getSnmpStatus(system=self.getOrganizerName())\n status = self.convertStatus(status)\n except Exception: pass\n return status",
"def get_size_min_community(node_clustering_obj):\n print('Obtaining the size of the smalles community...')\n min_com = len(node_clustering_obj.communities[0])\n\n for com in node_clustering_obj.communities:\n if len(com) < min_com:\n min_com = len(com)\n print(f'Done!\\n The min size is {min_com}.\\n')\n\n return min_com",
"def community_detection(G, method='louvain'):\n if method == 'louvain':\n communities = community.best_partition(G)\n nx.set_node_attributes(G, 'modularity', communities)\n return communities",
"def GetClientString(self):\r\n return str(self.client_address)",
"def computer_name():\n return \"The name of this computer is \" + platform.node()",
"def AdvertiseTunnelEncapsulationExtendedCommunity(self):\n return self._get_attribute('advertiseTunnelEncapsulationExtendedCommunity')",
"def community_list():\n communities = Community.query.all()\n return render_template(\n \"communityList.html\",\n communities=communities,\n )",
"def sasl_nodom(self, mo):\n sasl_uname_hash = hashing_func(mo.group('sasl_nodom'), salt)\n\n trunc_sasl_hash = 'USN_' + sasl_uname_hash[:13]\n\n logging.debug('sasl user %s is %s' %\n (mo.group('sasl_nodom'),\n trunc_sasl_hash))\n\n return \"sasl_username=%s\" % (trunc_sasl_hash)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The SNMP version used when checking by SNMP. This must be one of `1`/`2c`. | def snmp_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "snmp_version") | [
"def protocolVersion():",
"def get_version():\n return 'PyS2OPC v' + VERSION + ' on ' + ffi.string(libsub.SOPC_LibSub_GetVersion()).decode()",
"def protocol_version(self):\n ret = self._get_attr(\"protocolVersion\")\n return ret",
"def mcuVersion(self):\r\n self.mcuserial.write('v' + chr(0) + chr(0))\r\n return self.mcuserial.read(30)",
"def version(self):\n\t\treturn self.query('SELECT VERSION()',1)[0]",
"def get_iphone_product_version(self):\n return self.parsed_info_file['Product Version']",
"def get_bios_version():\n cli_output = cli(\"show version\")\n if legacy:\n result = re.search(r'BIOS.*version\\s*(.*)\\n', cli_output[1])\n if result != None:\n return result.group(1)\n else:\n result = re.search(r'BIOS.*version\\s*(.*)\\n', cli_output)\n if result != None:\n return result.group(1)\n poap_log(\"Unable to get switch Bios version\")",
"def minor_version(self):\n d = uInt32 (0)\n CALL ('GetSysNIDAQMinorVersion', ctypes.byref (d))\n return d.value",
"def version(self):\n output = gdb.execute('show version', to_string=True)\n try:\n version = output.split('\\n')[0]\n except:\n version = None\n return version",
"def get_hypervisor_version():\n\n return platform.uname()[2]",
"def server_version(self):\n ret = getattr(self, \"_SERVER_VERSION\", \"\")\n return ret",
"def port_version(self):\n ret = self._get_attr(\"portVersion\")\n return ret",
"def version(self):\n return (self.hdr['type'] >> 13) & 0x7",
"def osgDBGetVersion():\r\n return _osgDB.osgDBGetVersion()",
"def version(self):\n if not hasattr(self, '_version'):\n found = re.search(rb'Linux version ([^ ]*)', self.get_data())\n if not found:\n raise Exception('could not recognize kernel version')\n version = found.group(1).decode()\n self.log.info('kernel version: %s', version)\n self._version = '.'.join(version.split('.')[:2])\n return self._version",
"def get_acm_version():\n return float(\".\".join(acm.ShortVersion().strip(string.ascii_letters) \\\n .split(\".\")[0:2]))",
"def version():\n return 'v%s' % ninecms.__version__",
"def pmonsd_version():\n return (1,0)#Must always be the same as the version in src/SemiDetHelper.cxx\n #Also remember to update the supported versions in __actual_parse(..)",
"def version():\n return 'CAN API V3 for generic CAN Interfaces (Python Wrapper {}.{}.{})'.format(\n CAN_API_V3_PYTHON['major'], CAN_API_V3_PYTHON['minor'], CAN_API_V3_PYTHON['patch'])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The index of the network interface on which to enable the DHCP service. This must be in the range [`1``7`]. | def interface_index(self) -> pulumi.Input[int]:
return pulumi.get(self, "interface_index") | [
"def interface_index(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interface_index\")",
"def port_index(self):\n return self.__port_index",
"def if_device(self, if_index):\n if \"%s\" % if_index in self._if_index:\n # found, return interface name\n return self._if_index[\"%s\" % if_index]\n else:\n # not found, return index\n return \"%s\" % if_index",
"def get_sw_if_index(node, interface_name):\n interface_data = InterfaceUtil.vpp_get_interface_data(\n node, interface=interface_name\n )\n return interface_data.get(u\"sw_if_index\")",
"def ethinterface():\n iflist = netifaces.interfaces()\n print('Interfaces found')\n\n for index in range(len(iflist)):\n print (index, ':', iflist[index])\n\n interface = input('enter interface # ')\n interface = int(interface)\n interface = iflist[interface]\n # interface = input('Enter an interface name if needed: ')\n print('interface selected is:', interface)\n print()\n return interface",
"def _get_lif_ifindex(self):\n return self.__lif_ifindex",
"def vpp_get_interface_sw_index(node, interface_name):\n if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)\n\n return if_data.get(u\"sw_if_index\")",
"def _hostmask_int(self):\n return (1 << (self._module.width - self._prefixlen)) - 1",
"def default_device_index():\n device_list = sd.query_devices()\n default_output_device = sd.default.device[\"output\"]\n # target device should be the name of the default_output device plus \" [Loopback]\"\n target_device = (\n f\"{device_list[default_output_device]['name']} [Loopback]\"\n )\n # We need to run over the device list looking for the target device\n for device_index, device in enumerate(device_list):\n if device[\"name\"] == target_device:\n # Return the loopback device index\n return device_index\n # No Loopback device matching output found - return the default input device index\n return sd.default.device[\"input\"]",
"def port_index(self):\n return self._port_index",
"def get_device_id(device_type, device_ip_addr, client_group_start_index: int, n_higher_nodes: int):\n host_name = str(dns.resolver.query(dns.reversename.from_address(device_ip_addr), \"PTR\")[0])\n container_name = host_name.split(\".\")[0]\n if device_type == \"cloud\":\n return 0, 0\n elif device_type == \"client_node\":\n node_index = n_higher_nodes + client_group_start_index + int(container_name.split(\"_\")[-1]) - 1\n device_index = client_group_start_index + int(container_name.split(\"_\")[-1]) - 1\n return node_index, device_index\n else:\n node_index = n_higher_nodes + int(container_name.split(\"_\")[-2])\n device_index = int(container_name.split(\"_\")[-2])\n return node_index, device_index",
"def get_wireless_interface(self):\n wireless_interface = None\n hardware_ports = subprocess.check_output(['/usr/sbin/networksetup',\n '-listallhardwareports'])\n match = re.search(\"(AirPort|Wi-Fi).*?(en\\\\d)\", hardware_ports, re.S)\n if match:\n wireless_interface = match.group(2)\n return wireless_interface",
"def reset_interface_nicid(self):\n return self.data.get('reset_interface_nicid')",
"def interface_ip(self):\n try:\n ip_address = socket.inet_ntoa(struct.pack('>i', self._attribute('interface_ip', 0)))\n except Exception:\n ip_address = self._attribute('interface_ip', 0)\n\n return ip_address",
"def _setup_interface(self):\n\n # Create and set the interface up.\n self._ip.link(\"add\", ifname=self.interface, kind=\"dummy\")\n dev = self._ip.link_lookup(ifname=self.interface)[0]\n self._ip.link(\"set\", index=dev, state=\"up\")\n\n # Set up default route for both IPv6 and IPv4\n self._ip.neigh(\"add\", dst='169.254.1.1', lladdr='21:21:21:21:21:21',\n state=ndmsg.states['permanent'], ifindex=dev)\n self._ip.neigh(\"add\", family=AF_INET6, dst='fe80::1', lladdr='21:21:21:21:21:21',\n state=ndmsg.states['permanent'], ifindex=dev)\n self._ip.addr(\"add\", index=dev, address=\"169.254.1.2\", mask=24)\n self._ip.route(\"add\", gateway=\"169.254.1.1\", oif=dev)\n self._ip.route(\"add\", family=AF_INET6, gateway='fe80::1', oif=dev)\n\n # Set the loopback up as well since some of the packets go through there.\n lo = self._ip.link_lookup(ifname=\"lo\")[0]\n self._ip.link(\"set\", index=lo, state=\"up\")\n\n # Return internal interface ID for later use\n return dev",
"def interface(self):\n return self.broker.interface(**{\"IfAddrID\": self.IfAddrID})",
"def cur_net_index() -> int:\n return _canvas.net_index",
"def nicid(self):\n return self.data.get('nicid')",
"def idr_address(self) -> int:\n return AP_IDR"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The index of the network interface on which to enable filtering. This must be in the range [`0``7`]. | def interface_index(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "interface_index") | [
"def interface_index(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"interface_index\")",
"def _get_lif_ifindex(self):\n return self.__lif_ifindex",
"def get_sw_if_index(node, interface_name):\n interface_data = InterfaceUtil.vpp_get_interface_data(\n node, interface=interface_name\n )\n return interface_data.get(u\"sw_if_index\")",
"def if_device(self, if_index):\n if \"%s\" % if_index in self._if_index:\n # found, return interface name\n return self._if_index[\"%s\" % if_index]\n else:\n # not found, return index\n return \"%s\" % if_index",
"def vpp_get_interface_sw_index(node, interface_name):\n if_data = InterfaceUtil.vpp_get_interface_data(node, interface_name)\n\n return if_data.get(u\"sw_if_index\")",
"def port_index(self):\n return self.__port_index",
"def cur_net_index() -> int:\n return _canvas.net_index",
"def ethinterface():\n iflist = netifaces.interfaces()\n print('Interfaces found')\n\n for index in range(len(iflist)):\n print (index, ':', iflist[index])\n\n interface = input('enter interface # ')\n interface = int(interface)\n interface = iflist[interface]\n # interface = input('Enter an interface name if needed: ')\n print('interface selected is:', interface)\n print()\n return interface",
"def default_device_index():\n device_list = sd.query_devices()\n default_output_device = sd.default.device[\"output\"]\n # target device should be the name of the default_output device plus \" [Loopback]\"\n target_device = (\n f\"{device_list[default_output_device]['name']} [Loopback]\"\n )\n # We need to run over the device list looking for the target device\n for device_index, device in enumerate(device_list):\n if device[\"name\"] == target_device:\n # Return the loopback device index\n return device_index\n # No Loopback device matching output found - return the default input device index\n return sd.default.device[\"input\"]",
"def port_index(self):\n return self._port_index",
"def _hostmask_int(self):\n return (1 << (self._module.width - self._prefixlen)) - 1",
"def ifmask(ifname):\n return Ip(_ifctl(ifname, 0x891b)[20:24]) # SIOCGIFNETMASK",
"def vlan_iface_id(self) -> int:\n return self._vlan_iface_id",
"def _get_filterIndex(self) -> \"int\" :\n return _core.FileDialog__get_filterIndex(self)",
"def device_index_validator(val):\n if val in AudioInputSource.valid_device_indexes():\n return val\n else:\n return AudioInputSource.default_device_index()",
"def get_wireless_interface(self):\n wireless_interface = None\n hardware_ports = subprocess.check_output(['/usr/sbin/networksetup',\n '-listallhardwareports'])\n match = re.search(\"(AirPort|Wi-Fi).*?(en\\\\d)\", hardware_ports, re.S)\n if match:\n wireless_interface = match.group(2)\n return wireless_interface",
"def findFilter(self,fltr):\n for i in range(self.count()):\n if self.widget(i).outputfilter == fltr:\n return i\n return None",
"def _netmask_int(self):\n return self._module.max_int ^ self._hostmask_int",
"def interface_ip(self):\n try:\n ip_address = socket.inet_ntoa(struct.pack('>i', self._attribute('interface_ip', 0)))\n except Exception:\n ip_address = self._attribute('interface_ip', 0)\n\n return ip_address"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The flag to allow the packet through the filter. | def allow(self) -> pulumi.Input[bool]:
return pulumi.get(self, "allow") | [
"def setFilterable(self, boolean: bool) -> None:\n ...",
"def can_attack(self):\n return False",
"def secured_packet_exemption_allowed(self):\n if \"securedPacketExemptionAllowed\" in self._prop_dict:\n return self._prop_dict[\"securedPacketExemptionAllowed\"]\n else:\n return None",
"def allow_promiscuous(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_promiscuous\")",
"def writable(self):\n return len(self.packet) > 0",
"def is_allowed(self, peer_ip, command=''):\n # TODO: more granularity here later\n # Always allow whitelisted ip to post as block\n if 'block' == command and self.is_whitelisted(peer_ip):\n return True\n # only allow local host for \"stop\" command\n if 'stop' == command:\n return peer_ip == '127.0.0.1'\n return peer_ip in self.config.allowed_conf or \"any\" in self.config.allowed_conf",
"def filter_packet(p):\n return p.haslayer(IP) and p.haslayer(TCP) and p[TCP].seq in sequence_numbers",
"def getAllow(self):\n return self.base.get(\"allow\", [])",
"def is_accepting_data(self):\n return self._is_accepting_data",
"def _get_filter_strict_security(self):\n return self.__filter_strict_security",
"def sendFlagDrop(self):\r\n\t\tpacket = struct.pack(\"<B\", 0x15)\r\n\t\tself.queuePacket(packet, reliable=True, priority=PRIORITY_HIGH)",
"def always_filter(self):\n return self._always_filter",
"def __neutFlagsCarriedByPlayerProccessor(self,event):\r\n\t\tp = event.player\r\n\t\tp.flag_count = 0",
"def verify_packet(self, packet, context):\n pass",
"def _update_use_filter_flag(\n self,\n awg_sequence,\n ):\n\n self._use_filter = any(\n [e is not None and\n e.get('metadata', {}).get('allow_filter', False)\n for e in awg_sequence.values()]\n )",
"def ingress_traffic_allowed(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ingress_traffic_allowed\")",
"def lFilter(ourFilter):\n def snarf(packet):\n if packet.haslayer(ourFilter):\n\n ## Ignore server hellos\n if packet[TLS].msg[0].name != 'TLS Handshake - Server Hello':\n return True\n return snarf",
"def set_ctrlbit(self):\n\n if sum(self.packet) % 2 == 0:\n return 1\n else:\n return 0",
"def authorize(self):\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The pre shared secret for the VPN. The length of this value must be in the range [`0``40`]. | def pre_shared_secret(self) -> pulumi.Input[str]:
return pulumi.get(self, "pre_shared_secret") | [
"def get_secret(self):\r\n return self.secret",
"def make_totp_secret():\n return pyotp.random_base32()",
"def primary_secret_key(self) -> str:\n return pulumi.get(self, \"primary_secret_key\")",
"def generate_secret(self):\n bits = self.args.get('length')\n # Bits should dividable by 8, because we will ask the os for random\n # bytes and because we can't encode partial bytes. Base32 will cause a\n # 160% inflation of the data and we can't have padding for TOTP secrets\n # so `bits * 1.6` can not be a fraction.\n if (bits % 8 > 0):\n self.msg('not_common_totp_val')\n exit(2)\n if bits not in [80, 160] and not self.args['expert']:\n self.msg('not_common_totp_val')\n exit(2)\n return base64.b32encode(os.urandom(bits // 8)).decode('utf-8')",
"def get_secret_key(self) -> str:\n if self.secret_key is None:\n self.secret_key = secrets.token_urlsafe(32)\n self.save_ini_file()\n return self.secret_key",
"def getSecretKey(self) -> bytes:\r\n return self.secretKey",
"def secret(self) -> Optional[pulumi.Input['ResourceReferenceArgs']]:\n return pulumi.get(self, \"secret\")",
"def config_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"config_secret\")",
"def puffer():\n if not app.settings.getboolean('security','puffer_response'):\n return ''\n return base64.b64encode(hashlib.sha256(hashlib.sha256(os.urandom(32)).digest()).digest())[:random.randint(16,32)]",
"def generate_secret_hash_3072_sha1(info):\n return rsa.encrypt(info, settings.PUB_KEY)",
"def sourcesecret(self) :\n\t\ttry :\n\t\t\treturn self._sourcesecret\n\t\texcept Exception as e:\n\t\t\traise e",
"def compute_secret(self, private_key, other_public_key):\n secret = pow(other_public_key, private_key, self.prime)\n key = hashlib.sha256(str(secret)).digest()\n return key",
"def get_totp_secret(user):\n totp_client = TOTPClient.object.get(user=user)\n return totp_client.secret",
"def recover_secret(access_token):\n return secret_for_access_token(access_token)",
"def get_apnSecret(self):\n # res\n if self._cacheExpiration <= YAPI.GetTickCount():\n if self.load(YAPI._yapiContext.GetCacheValidity()) != YAPI.SUCCESS:\n return YCellular.APNSECRET_INVALID\n res = self._apnSecret\n return res",
"def salt(self):\n return None",
"def _encrypt_secret(self):\n if not re.match(VAULT_REGEX, self.secret):\n auth_request = requests.post(\n '/'.join([self.safe.vault.url, 'v1/auth/userpass/login', self.safe.vault.vault_user]),\n data=json.dumps({'password': self.safe.vault.password}),\n verify=settings.PLOS_CA_CERTIFICATE\n )\n auth_request.raise_for_status()\n token = auth_request.json()['auth']['client_token']\n auth = {'X-Vault-Token': token}\n endpoint = '/'.join([self.safe.vault.url, 'v1/transit/encrypt', self.safe.vault.transit_key_name])\n encoded = base64.b64encode(bytes(self.secret, 'utf-8'))\n request = requests.post(endpoint, headers=auth,\n data=json.dumps({'plaintext': encoded.decode('utf-8')}),\n verify=settings.PLOS_CA_CERTIFICATE)\n request.raise_for_status()\n ciphertext = request.json()['data']['ciphertext']\n self.secret = ciphertext\n return self.secret",
"def _secret_key():\n return current_app.config.get(\"SECRET_KEY\")",
"def shared_key(priv, pub):\n key = priv.get_ecdh_key(pub)\n key = key[:32] + SHA384.new(key[32:]).digest()\n return key"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The pre shared secret for the VPN. The length of this value must be in the range [`0``40`]. | def pre_shared_secret(self) -> pulumi.Input[str]:
return pulumi.get(self, "pre_shared_secret") | [
"def get_secret(self):\r\n return self.secret",
"def make_totp_secret():\n return pyotp.random_base32()",
"def primary_secret_key(self) -> str:\n return pulumi.get(self, \"primary_secret_key\")",
"def generate_secret(self):\n bits = self.args.get('length')\n # Bits should dividable by 8, because we will ask the os for random\n # bytes and because we can't encode partial bytes. Base32 will cause a\n # 160% inflation of the data and we can't have padding for TOTP secrets\n # so `bits * 1.6` can not be a fraction.\n if (bits % 8 > 0):\n self.msg('not_common_totp_val')\n exit(2)\n if bits not in [80, 160] and not self.args['expert']:\n self.msg('not_common_totp_val')\n exit(2)\n return base64.b32encode(os.urandom(bits // 8)).decode('utf-8')",
"def get_secret_key(self) -> str:\n if self.secret_key is None:\n self.secret_key = secrets.token_urlsafe(32)\n self.save_ini_file()\n return self.secret_key",
"def getSecretKey(self) -> bytes:\r\n return self.secretKey",
"def secret(self) -> Optional[pulumi.Input['ResourceReferenceArgs']]:\n return pulumi.get(self, \"secret\")",
"def config_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"config_secret\")",
"def puffer():\n if not app.settings.getboolean('security','puffer_response'):\n return ''\n return base64.b64encode(hashlib.sha256(hashlib.sha256(os.urandom(32)).digest()).digest())[:random.randint(16,32)]",
"def generate_secret_hash_3072_sha1(info):\n return rsa.encrypt(info, settings.PUB_KEY)",
"def sourcesecret(self) :\n\t\ttry :\n\t\t\treturn self._sourcesecret\n\t\texcept Exception as e:\n\t\t\traise e",
"def compute_secret(self, private_key, other_public_key):\n secret = pow(other_public_key, private_key, self.prime)\n key = hashlib.sha256(str(secret)).digest()\n return key",
"def get_totp_secret(user):\n totp_client = TOTPClient.object.get(user=user)\n return totp_client.secret",
"def recover_secret(access_token):\n return secret_for_access_token(access_token)",
"def get_apnSecret(self):\n # res\n if self._cacheExpiration <= YAPI.GetTickCount():\n if self.load(YAPI._yapiContext.GetCacheValidity()) != YAPI.SUCCESS:\n return YCellular.APNSECRET_INVALID\n res = self._apnSecret\n return res",
"def salt(self):\n return None",
"def _encrypt_secret(self):\n if not re.match(VAULT_REGEX, self.secret):\n auth_request = requests.post(\n '/'.join([self.safe.vault.url, 'v1/auth/userpass/login', self.safe.vault.vault_user]),\n data=json.dumps({'password': self.safe.vault.password}),\n verify=settings.PLOS_CA_CERTIFICATE\n )\n auth_request.raise_for_status()\n token = auth_request.json()['auth']['client_token']\n auth = {'X-Vault-Token': token}\n endpoint = '/'.join([self.safe.vault.url, 'v1/transit/encrypt', self.safe.vault.transit_key_name])\n encoded = base64.b64encode(bytes(self.secret, 'utf-8'))\n request = requests.post(endpoint, headers=auth,\n data=json.dumps({'plaintext': encoded.decode('utf-8')}),\n verify=settings.PLOS_CA_CERTIFICATE)\n request.raise_for_status()\n ciphertext = request.json()['data']['ciphertext']\n self.secret = ciphertext\n return self.secret",
"def _secret_key():\n return current_app.config.get(\"SECRET_KEY\")",
"def shared_key(priv, pub):\n key = priv.get_ecdh_key(pub)\n key = key[:32] + SHA384.new(key[32:]).digest()\n return key"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculate the delta_e_cie_2000 between two values in rgb format. | def color_diff(a, b):
arr_ = (c_double * len(self.a))(*self.a)
rgb2srgb(arr_)
srgb2linear(arr_)
linear2xyz(arr_)
xyz2Lab(arr_)
arr_2 = (c_double * len(self.b))(*self.b)
rgb2srgb(arr_2)
srgb2linear(arr_2)
linear2xyz(arr_2)
xyz2Lab(arr_2)
delta = delta_cie_2000_(arr_, arr_2)
return delta | [
"def _cie_rgb_EOCF(value):\n\n value = np.asarray(value)\n\n return value ** 2.2",
"def color_diff(rgb1, rgb2):\n diff = math.sqrt((rgb1[0]-rgb2[0])**2 + (rgb1[1]-rgb2[1])**2 + (rgb1[2]-rgb2[2])**2)\n return diff",
"def __calculate_pixel_difference(self, pixel1, pixel2):\n return sum( [ (math.log(color1 / 255.0 + 1.0 / 255) - \n math.log(color2 / 255.0 + 1.0 / 255)) ** 2 \n for color1, color2 in zip(pixel1, pixel2) ])\n # This algorithm is not working as properly.\n # return sum( [ abs(color1 - color2) for color1, color2 in zip(pixel1, pixel2) ] ) ",
"def compute_delta(self, capteur):\n self.delta_hsv = lib.mod.compute_modulation(capteur, self.coef_matrice)",
"def _cie_rgb_OECF(value):\n\n value = np.asarray(value)\n\n return value ** (1 / 2.2)",
"def color_dist( c1, c2):\n return sum( (a-b)**2 for a,b in zip(to_ycc(c1),to_ycc(c2)) )",
"def rgb_distance(rgb1: RGB, rgb2: RGB) -> int:\n return sum(map(lambda c: (c[0] - c[1]) ** 2,\n zip(rgb1, rgb2)))",
"def __gradient(self, val):\n\n r = 2 * (1 - val) if val > 0.5 else 1\n g = 2 * val if val < 0.5 else 1\n return r * 255, g * 255, 0",
"def ImageDelta (image1, image2, mask = False):\n img1_factor = np.mean(image1)\n img2_factor = np.mean(image2)\n\n img1 = np.clip(image1/(img1_factor/10000),0,64000)\n img2 = np.clip(image2/(img2_factor/10000),0,64000)\n\n contrast_image = np.absolute(img1 - img2)\n raw_contrast_image = np.absolute(image1 - image2)\n\n if np.any(mask) == False:\n RMS_norm = math.sqrt(np.square(contrast_image).mean())\n RMS_raw = math.sqrt(np.square(raw_contrast_image).mean())\n else:\n RMS_norm = math.sqrt(np.square(contrast_image[~mask]).mean())\n RMS_raw = math.sqrt(np.square(raw_contrast_image[~mask]).mean())\n\n return RMS_norm, RMS_raw, contrast_image",
"def energy_colors(energies, min_val=None, max_val=None, satellite=None):\n\n # Get some endpoints if a satellite was provided:\n if satellite == 'GLM':\n min_val = 1e-15\n max_val = 5e-14\n elif satellite == 'LIS':\n min_val = 3e3\n max_val = 1e5\n else:\n print('Invalid satellite')\n\n if min_val is None:\n min_val = np.min(energies)\n if max_val is None:\n max_val = np.max(energies)\n\n _min_val = np.log10(min_val)\n _max_val = np.log10(max_val)\n _values = np.log10(energies)\n\n # Linear scaling... Really, we should break this out into a function...\n m = (255-0)/(_max_val-_min_val)\n b = 255.-m * _max_val\n\n scl_colors = m*_values+b\n\n # First, clip to bounds:\n scl_colors = np.clip(scl_colors, 0, 255)\n\n # Make it a byte for indexing\n scl_colors = np.uint8(scl_colors)\n\n colors = np.zeros((len(_values), 3))\n\n nsteps = 256 # We'll get 256 colors\n\n # Yellow -> Red color map\n redV = np.repeat(np.uint8(255), nsteps)\n blueV = np.repeat(np.uint8(0), nsteps)\n scale = np.arange(nsteps)/(nsteps-1)\n n0 = 255\n n1 = 0\n greenV = np.uint8(n0 + (n1-n0) * scale)\n\n colors[:, 0] = redV[scl_colors]\n colors[:, 1] = greenV[scl_colors]\n colors[:, 2] = blueV[scl_colors]\n\n return colors",
"def de00(bgr1, bgr2,ret_bool:bool=False):\n bgr1 = np.array([[bgr1]], dtype=np.uint8)\n bgr2 = np.array([[bgr2]], dtype=np.uint8)\n \n lab1 = _cvt_bgr2lab(bgr1)[0,0].tolist()\n lab2 = _cvt_bgr2lab(bgr2)[0,0].tolist()\n \n L1, a1, b1 = lab1[0], lab1[1], lab1[2]\n L2, a2, b2 = lab2[0], lab2[1], lab2[2]\n \n ##### CALCULATE Ci_p , hi_p\n # (2) \n C1 = (a1**2 + b1**2) ** 0.5\n C2 = (a2**2 + b2**2) ** 0.5\n \n # (3)\n mean_C = (C1 + C2) / 2\n \n # (4)\n G = 0.5 * (1 - (mean_C**7 / (mean_C**7 + 25**7))**0.5)\n \n # (5)\n a1_p = (1+G)*a1\n a2_p = (1+G)*a2\n \n # (6)\n C1_p = (a1_p**2 + b1**2) ** 0.5\n C2_p = (a2_p**2 + b2**2) ** 0.5\n \n # (7)\n h1_p = deg(atan2(b1,a1_p)) % 360\n h2_p = deg(atan2(b2,a2_p)) % 360 \n \n ##### CALCULATE Delta(s) of L, C, H\n # (8)\n delta_L_p = L2 - L1\n \n # (9)\n delta_C_p = C2_p - C1_p\n \n # (10)\n raw_delta_h = h2_p - h1_p\n abs_delta_h = abs(raw_delta_h)\n \n if C1_p * C2_p == 0:\n delta_h_p = 0\n elif abs_delta_h <= 180:\n delta_h_p = raw_delta_h\n elif raw_delta_h > 180:\n delta_h_p = raw_delta_h - 360\n elif raw_delta_h < -180:\n delta_h_p = raw_delta_h + 360\n \n # (11)\n delta_H_p = (C1_p * C2_p) ** 0.5 * sin( rad(delta_h_p) /2 ) * 2\n \n ##### CALCULATE CIE E2000\n # (12)\n mean_L_p = (L1 + L2) / 2\n \n # (13)\n mean_C_p = (C1_p + C2_p) / 2\n \n # (14)\n sum_h_p = h1_p + h2_p\n \n if C1_p * C2_p == 0:\n mean_h_p = sum_h_p\n elif abs_delta_h <= 180:\n mean_h_p = sum_h_p / 2\n elif sum_h_p < 360:\n mean_h_p = (sum_h_p + 360 ) / 2\n elif sum_h_p >= 360:\n mean_h_p = (sum_h_p - 360 ) / 2\n \n # (15)\n T = 1 - 0.17*cos(rad(mean_h_p - 30)) + 0.24*cos(rad(2*mean_h_p))\n T += 0.32*cos(rad(3*mean_h_p+6)) - 0.2*cos(rad(4*mean_h_p-63))\n \n # (16)\n delta_theta = 30*exp(-((mean_h_p - 275) / 25 )**2)\n \n # (17)\n Rc = 2 * (mean_C_p**7 / (mean_C_p**7 + 25**7))**0.5\n \n # (18)\n Sl = 1 + (0.015 * (mean_L_p - 50)**2 ) / (20+ (mean_L_p - 50)**2) ** 0.5\n \n # (19)\n Sc = 1 + 0.045 * mean_C_p\n \n # (20)\n Sh = 1 + 0.015 * mean_C_p * T\n \n # (21)\n Rt = -sin( rad(2 * delta_theta) ) * Rc\n \n # (22)\n kl = kc = kh = 1 # Unity by default\n delta_E2000 = (delta_L_p / (kl * Sl)) ** 2 \n delta_E2000 += (delta_C_p / (kc * Sc)) ** 2 \n delta_E2000 += (delta_H_p / (kh * Sh)) ** 2 \n delta_E2000 += Rt * (delta_C_p / (kc * Sc)) * (delta_H_p / (kh * Sh))\n delta_E2000 **= 0.5\n \n if ret_bool:\n noticable_diff = delta_E2000 >= 2\n return delta_E2000, noticable_diff\n else:\n return delta_E2000",
"def rgb_distance(rgb1, rgb2):\n return math.sqrt(np.sum((np.array(rgb1, np.float32) - np.array(rgb2, np.float32))**2))",
"def color_deconvolution(img):\n\n\t#Note: I am simply copying the naming conventions used in the matlab script\n\t\n\timg = img.copy()\n\n\t#STAIN VECTORS FOR H&E DECONVOLUTION (can add support for more later)\n\tMODx = [0.644211, 0.092789, 0]\n\tMODy = [0.716556, 0.954111, 0]\n\tMODz = [0.266844, 0.283111, 0]\n\n\t#Normalize columns to length 1 in 3D space\n\tleng = [0, 0, 0]\n\tcosx = [0, 0, 0]\n\tcosy = [0, 0, 0]\n\tcosz = [0, 0, 0]\n\tfor i in range(3):\n\t\tleng[i] = sqrt(MODx[i]*MODx[i] + MODy[i]*MODy[i] + MODz[i]*MODz[i])\n\t\tif not (leng[i] == 0):\n\t\t\tcosx[i] = MODx[i]/leng[i]\n\t\t\tcosy[i] = MODy[i]/leng[i]\n\t\t\tcosz[i] = MODz[i]/leng[i]\n\n\t#translation matrix\n\tif cosx[1] == 0:\n\t\tif cosy[1] == 0:\n\t\t\tif cosz[1] == 0: #2nd color is unspecified\n\t\t\t\tcosx[1] = cosz[0]\n\t\t\t\tcosy[1] = cosx[0]\n\t\t\t\tcosz[1] = cosy[0]\n\n\tif cosx[2] == 0:\n\t\tif cosy[2] == 0:\n\t\t\tif cosz[2] == 0: #3rd color is unspecified\n\t\t\t\t#3rd column will be cross product of first 2\n\t\t\t\t#fiji implementation allows for computation of 3rd color via Ruifroks method\n\t\t\t\t# but this is unnecessary for extracting just H&E \n\t\t\t\tcosx[2] = cosy[0] * cosz[1] - cosz[0] * cosy[1];\n\t\t\t\tcosy[2] = cosz[0] * cosx[1] - cosx[0] * cosz[1];\n\t\t\t\tcosz[2] = cosx[0] * cosy[1] - cosy[0] * cosx[1];\n\n\t#renormalize 3rd column\n\tleng = sqrt(cosx[2]*cosx[2] + cosy[2]*cosy[2] + cosz[2]*cosz[2])\n\tif leng != 0 and leng != 1:\n\t\tcosx[2] = cosx[2]/leng\n\t\tcosy[2] = cosy[2]/leng\n\t\tcosz[2] = cosz[2]/leng\n\n\tCOS3x3Mat = np.matrix([\n\t\t\t\t[cosx[0], cosy[0], cosz[0]], \n\t\t\t\t[cosx[1], cosy[1], cosz[1]],\n\t\t\t\t[cosx[2], cosy[2], cosz[2]]\n\t\t\t\t])\n\n\t#Note: I am skipping lines 390-459 of the matlab code, since\n\t# the determinant of the COS3x3Mat matrix is > 0 (~0.5). I think that\n\t# bit of code is trying to make the matrix invertible, but it already is\n\t# for H&E stain matrix \n\t#print(np.linalg.det(COS3x3Mat))\n\n\t#Invert the matrix\n\t# Note that this is done manually in the matlab code.\n\tQ3x3Mat = np.linalg.inv(COS3x3Mat)\n\tQ3x3MatInverted = COS3x3Mat #Just following the matlab code...\n\n\t#Compute transmittance \n\trowR = img.shape[0]\n\tcolR = img.shape[1]\n\n\t#These are the 1 channel transmittances of each dye \n\tDye1_transmittance = np.zeros([rowR, colR])\n\tDye2_transmittance = np.zeros([rowR, colR])\n\tDye3_transmittance = np.zeros([rowR, colR])\n\n\tfor r in range(rowR):\n\t\tfor c in range(colR):\n\t\t\tRGB1 = img[r, c]\n\t\t\tRGB1[RGB1==0] = 1 #Avoid log0\n\t\t\tACC = -np.log(RGB1 / 255)\n\t\t\ttransmittances = 255 * np.exp(-ACC*Q3x3Mat)\n\t\t\ttransmittances = transmittances[0,:]\n\t\t\ttransmittances[transmittances>255] = 255\n\n\t\t\tDye1_transmittance[r,c] = transmittances[0,0]\n\t\t\tDye2_transmittance[r,c] = transmittances[0,1]\n\t\t\tDye3_transmittance[r,c] = transmittances[0,2]\n\n\t#Construct lookup tables to convert 1 channel dye images to \n\t# \t3 channel RGB representations \n\trLUT = np.zeros([256,3])\n\tgLUT = np.zeros([256,3])\n\tbLUT = np.zeros([256,3])\n\n\tfor i in range(3):\n\t\tfor j in range(256):\n\t\t\tif cosx[i] < 0:\n\t\t\t\trLUT[255-j, i] = 255 + (j * cosx[i])\n\t\t\telse:\n\t\t\t\trLUT[255-j, i] = 255 - (j * cosx[i])\n\n\t\t\tif cosy[i] < 0:\n\t\t\t\tgLUT[255-j, i] = 255 + (j * cosy[i])\n\t\t\telse:\n\t\t\t\tgLUT[255-j, i] = 255 - (j * cosy[i])\n\n\t\t\tif cosz[i] < 0:\n\t\t\t\tbLUT[255-j, i] = 255 + (j * cosz[i])\n\t\t\telse:\n\t\t\t\tbLUT[255-j, i] = 255 - (j * cosz[i])\n\n\t#Apply the lookup table to first dye (Hematoxilin)\n\tDye1_color_im = np.zeros(img.shape)\n\tfor r in range(rowR):\n\t\tfor c in range(colR):\n\t\t\t#print(floor(Dye1_transmittance[r,c]))\n\t\t\tDye1_color_im[r,c,0] = rLUT[floor(Dye1_transmittance[r,c]),0]\n\t\t\tDye1_color_im[r,c,1] = gLUT[floor(Dye1_transmittance[r,c]),0]\n\t\t\tDye1_color_im[r,c,2] = bLUT[floor(Dye1_transmittance[r,c]),0]\n\n\tDye1_color_im = Dye1_color_im.astype(np.uint8)\n\n\treturn Dye1_transmittance, Dye1_color_im",
"def _color_dist(c, rgb):\n d0 = colortable[c][0] - rgb[0]\n d1 = colortable[c][1] - rgb[1]\n d2 = colortable[c][2] - rgb[2]\n return d0*d0 + d1*d1 + d2*d2",
"def debye_to_einstein(debye_temperature):\n return (np.pi / 6.)**(1. / 3.) * debye_temperature",
"def color_pair_distance(color_pair_1, color_pair_2):\n lux1 = np.average(color_pair_1)\n lux2 = np.average(color_pair_2)\n dux1 = (np.array(color_pair_1) / max(8, lux1)).tolist()\n dux2 = (np.array(color_pair_2) / max(8, lux2)).tolist()\n ds = list([rgb_distance(c1, c2) for c1 in dux1 for c2 in dux2])\n return min(ds[0] + ds[3], ds[1] + ds[2])",
"def colorToValResiduals(params, cmap, target_rgb):\n\n # Get the predicted color\n rgb = cmap(params[0])\n\n # Compute the cost for every colour\n cost = (rgb[0] - target_rgb[0])**2 + (rgb[1] - target_rgb[1])**2 + (rgb[2] - target_rgb[2])**2\n\n return cost",
"def dual_gradient_energy(img):\n R = img[:, :, 0]\n G = img[:, :, 1]\n B = img[:, :, 2]\n\n horizontal_red = filters.sobel_h(R)\n horizontal_green = filters.sobel_h(G)\n horizontal_blue = filters.sobel_h(B)\n\n vertical_red = filters.sobel_v(R)\n vertical_green = filters.sobel_v(G)\n vertical_blue = filters.sobel_v(B)\n\n horizontal_square_sum = add_squares(horizontal_red,\n horizontal_green, horizontal_blue)\n\n vertical_square_sum = add_squares(vertical_red,\n vertical_green, vertical_blue)\n\n energy = numpy.add(horizontal_square_sum, vertical_square_sum)\n\n return energy",
"def delta(self,element1,element2):\n \n delta = (self.model[element1]/self.model[element2]*self.solar[element2].loc[0]/self.solar[element1].loc[0]-1)*1000\n return delta"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Marginalizes image and psf along given axis Creates a tophat the same length as the data Convolves psf with tophat Fits result to data | def fit(image, psf, axis, bg_sigma=0, psf_scale_factor=1):
psf_data = np.sum(psf, axis=axis)
image_data = np.sum(image, axis=axis)
# Since we're summing down an axis, we need to also sum the bg sigma
# This is sqrt(height * sigma**2) = sqrt(height)*sigma
perp_height = image.shape[axis]
background = np.sqrt(perp_height)*bg_sigma
# Our "initial guess" for the positions of the step are
# 10% of the way through the data, and 90% of the way through the data
# since it assumed the input has been cropped to just include the trail with
# little space around it.
step_up = int(len(image_data) * 0.1)
step_down = int(len(image_data) * 0.9)
tophat_data = make_tophat(len(image_data), step_up, step_down)
convolved_tophat = np.convolve(psf_data, tophat_data)
normalized_tophat = np.divide(convolved_tophat, convolved_tophat.max())
half_length = int(len(normalized_tophat) / 2)
halves = {
'left': {
'half_tophat_data': normalized_tophat[0:half_length],
'half_image_data': image_data[0:half_length],
'zero_point': step_up,
'fill_value': (0, 1)
},
'right': {
'half_tophat_data': normalized_tophat[half_length:],
'half_image_data': image_data[half_length:],
'zero_point': step_down - half_length,
'fill_value': (1, 0)
}
}
for half in halves:
half_tophat_data, half_image_data, zero_point, fill_value = pluck(
halves[half], 'half_tophat_data', 'half_image_data', 'zero_point', 'fill_value')
interpolation_x_data = np.linspace(
- zero_point / psf_scale_factor,
(len(half_tophat_data) - zero_point) / psf_scale_factor,
len(half_tophat_data)
)
interpolated_step = interp1d(
interpolation_x_data, half_tophat_data,
kind="cubic",
fill_value=fill_value,
bounds_error=False
)
def tophat_function(x, B, A, x0):
return B + A*interpolated_step(x - x0)
# [initial_B, initial_A, initial_x0]
guesses = [
np.min(half_image_data), np.mean(half_image_data), zero_point
]
image_x_data = range(len(half_image_data))
opt, cov = curve_fit(tophat_function, image_x_data,
half_image_data, p0=guesses)
pretty_output(opt, cov, f'Initial Fit - {half}')
plt.ion()
plot_result(
x_data=image_x_data,
image=half_image_data,
fit=tophat_function(image_x_data, *opt),
title=f'Initial Fit - {half}'
)
outliers = []
residuals = []
for i in range(0, len(image_x_data)):
residual = np.abs(
half_image_data[i] - tophat_function(image_x_data[i], *opt))
residuals.append(residual)
if residual > 2 * background:
outliers.append(i)
sigs = make_sigmas(len(image_x_data), background, np.inf, outliers)
# Refit with outlier areas masked
opt2, cov2 = curve_fit(tophat_function, image_x_data, half_image_data, p0=guesses,
sigma=sigs, absolute_sigma=True)
pretty_output(opt2, cov2, f'After masking - {half}')
plot_result(
x_data=image_x_data,
image=half_image_data,
fit=tophat_function(image_x_data, *opt2),
title=f'After masking - {half}',
masked=outliers
)
plt.show()
done = input('Press return to close.')
return opt, cov, opt2, cov2 | [
"def marginalize_gaussian_process(gp, variable, center=True):\n kernel_types = [RBF, Matern]\n kernel = extract_covariance_kernel(gp.kernel_, kernel_types)\n\n constant_kernel = extract_covariance_kernel(gp.kernel_, [ConstantKernel])\n if constant_kernel is not None:\n kernel_var = constant_kernel.constant_value\n else:\n kernel_var = 1\n\n # Warning extract_gaussian_process scales kernel_var by gp.y_train_std**2\n x_train, y_train, K_inv, kernel_length_scale, kernel_var, \\\n transform_quad_rules = \\\n extract_gaussian_process_attributes_for_integration(gp)\n\n # x_train = gp.X_train_.T\n # kernel_length_scale = kernel.length_scale\n # transform_quad_rules = (not hasattr(gp, 'var_trans'))\n L_factor = gp.L_.copy()\n\n tau_list, P_list, u_list, lamda_list, Pi_list, nu_list, __ = \\\n get_gaussian_process_squared_exponential_kernel_1d_integrals(\n x_train, kernel_length_scale, variable, transform_quad_rules,\n skip_xi_1=True)\n\n if center is True:\n A_inv = K_inv*kernel_var\n tau = np.prod(np.array(tau_list), axis=0)\n A_inv_y = A_inv.dot(y_train)\n shift = tau.dot(A_inv_y)\n shift += gp._y_train_mean\n else:\n shift = 0\n\n kernel_var /= float(gp._y_train_std**2)\n\n length_scale = np.atleast_1d(kernel_length_scale)\n nvars = variable.num_vars()\n marginalized_gps = []\n for ii in range(nvars):\n tau = np.prod(np.array(tau_list)[:ii], axis=0)*np.prod(\n np.array(tau_list)[ii+1:], axis=0)\n u = np.prod(u_list[:ii])*np.prod(u_list[ii+1:])\n assert np.isscalar(kernel_var)\n kernel = kernel_var*UnivariateMarginalizedSquaredExponentialKernel(\n tau, u, length_scale[ii], gp.X_train_[:, ii:ii+1])\n # undo kernel_var *= gp._y_train_std**2 in extact_gaussian_process_attr\n gp_ii = UnivariateMarginalizedGaussianProcess(\n kernel, gp.X_train_[:, ii:ii+1].T, L_factor, gp.y_train_,\n gp._y_train_mean, gp._y_train_std, mean=shift)\n if hasattr(gp, 'var_trans'):\n variable_ii = IndependentMarginalsVariable(\n [gp.var_trans.variable.marginals()[ii]])\n var_trans_ii = AffineTransform(variable_ii)\n gp_ii.set_variable_transformation(var_trans_ii)\n marginalized_gps.append(gp_ii)\n return marginalized_gps",
"def psf_convolve(data_path, subset='', psf_type='ZTE_new', device=torch.device('cpu'), pad_size=200):\n \n print('Device:', device)\n \n ## DIRECTORIES\n psf_path = os.path.join(data_path, 'PSF/{}'.format(psf_type))\n in_path = os.path.join(data_path, 'synthetic_data/GT/{}/'.format(subset))\n\n for pos in range(1, 10):\n out_path = os.path.join(data_path, 'synthetic_data/input/{}_{}/{}/'.format(psf_type, pos, subset))\n os.makedirs(out_path, exist_ok=True)\n print('IN_PATH:', in_path)\n print('OUT_PATH:', out_path)\n\n # Load PSF\n psf = np.load(os.path.join(psf_path,'{}_psf_{}.npy'.format(psf_type, pos))).astype('float32')\n print(os.path.join(psf_path,'{}_psf_{}.npy'.format(psf_type, pos)))\n assert psf is not None, ('No PSF file found.')\n\n \n filenames = [f for f in sorted(os.listdir(in_path)) if f.endswith('.npy')]\n if len(filenames) == 0:\n raise Exception('No .npy files found in \"{}\" A subset argument may be required.'.format(in_path))\n \n # Go through files in folder\n for file in tqdm(filenames):\n # Load ground truth images\n img = np.load(os.path.join(in_path, file))\n \n # Pad or crop PSF if shape not the same as input image\n h, w, _ = img.shape\n pad_img = img_utils.pad_edges(img, (h + pad_size*2, w + pad_size*2))\n\n psf_matched = psf\n if psf_matched.shape[0] != pad_img.shape[0] or psf_matched.shape[1] != pad_img.shape[1]:\n psf_matched = img_utils.match_dim(psf_matched, pad_img.shape[:2])\n \n # FFT Convolution of image and PSF\n img_sim = np.zeros_like(img)\n for c in range(3):\n img_sim[..., c] = img_utils.center_crop(torch_utils.TorchFFTConv2d(torch.tensor(pad_img[..., c]).to(device),\n torch.tensor(psf_matched[..., c]).to(device)).numpy(), (h, w))\n img_sim = np.clip(img_sim, a_min=0, a_max=500)\n # Save output numpy file\n np.save(os.path.join(out_path, file), img_sim)",
"def makeConvolutionKernel(xobs, yobs, detector, psf):\n\n half=detector.nPix/2\n xx,yy=np.meshgrid((np.arange(detector.nPix)-half)*detector.pixScale,(np.arange(detector.nPix)-half)*detector.pixScale)\n if(psf.atmosFWHM > 0):\n atmos_sigma=psf.atmosFWHM/(2.*np.sqrt(2.*np.log(2.)))\n if(detector.vSampConvolve): # PSF and Fiber convolution\n psfArr=np.exp(-(xx**2 + yy**2)/(2.*atmos_sigma**2))\n fibArrs=np.zeros((detector.nVSamp,detector.nPix,detector.nPix))\n if(detector.vSampShape==\"circle\"):\n sel=np.array([((xx-pos[0])**2 + (yy-pos[1])**2 < detector.vSampSize**2) for pos in zip(xobs,yobs)])\n elif(detector.vSampShape==\"square\"):\n PArad=np.deg2rad(detector.vSampPA)\n sel=np.array([((np.abs((xx-pos[0])*np.cos(PArad) - (yy-pos[1])*np.sin(PArad)) < 0.5*detector.vSampSize) & (np.abs((xx-pos[0])*np.sin(PArad) + (yy-pos[1])*np.cos(PArad)) < 0.5*detector.vSampSize)) for pos in zip(xobs,yobs)])\n fibArrs[sel]=1.\n kernel=np.array([scipy.signal.fftconvolve(psfArr,fibArrs[ii],mode=\"same\") for ii in range(detector.nVSamp)])\n else:\n # this is basically the psf convolved with a delta function at the center of each fiber\n kernel=np.array([np.exp(-((xx-pos[0])**2 + (yy-pos[1])**2)/(2.*atmos_sigma**2)) for pos in zip(xobs,yobs)])\n else:\n # Fiber only\n kernel=np.zeros((detector.nVSamp,detector.nPix,detector.nPix))\n if(detector.vSampShape==\"circle\"):\n sel=np.array([((xx-pos[0])**2 + (yy-pos[1])**2 < detector.vSampSize**2) for pos in zip(xobs,yobs)])\n elif(detector.vSampShape==\"square\"):\n PArad=np.deg2rad(detector.vSampPA)\n sel=np.array([((np.abs((xx-pos[0])*np.cos(PArad) - (yy-pos[1])*np.sin(PArad)) < 0.5*detector.vSampSize) & (np.abs((xx-pos[0])*np.sin(PArad) + (yy-pos[1])*np.cos(PArad)) < 0.5*detector.vSampSize)) for pos in zip(xobs,yobs)])\n kernel[sel]=1.\n \n return kernel",
"def psf_convolve(data, psf, psf_rot=False, psf_type='fixed', method='scipy'):\n\n if psf_type not in ('fixed', 'obj_var'):\n raise ValueError('Invalid PSF type. Options are \"fixed\" or \"obj_var\"')\n\n if psf_rot and psf_type == 'fixed':\n psf = rotate(psf)\n\n elif psf_rot:\n psf = rotate_stack(psf)\n\n if psf_type == 'fixed':\n return np.array([convolve(data_i, psf, method=method) for data_i in\n data])\n\n elif psf_type == 'obj_var':\n\n return convolve_stack(data, psf, method=method)",
"def convert_to_dm(image, psf, psf_size):\n\n image_sizex, image_sizey = image.array.shape\n\n masked_image = afwImage.MaskedImageF(image_sizex, image_sizey)\n masked_image.image.array[:] = image.array\n\n var = mad_std(image.array)**2\n masked_image.variance.array[:] = var\n masked_image.mask.array[:] = 0\n exp = afwImage.ExposureF(masked_image)\n\n psf_image = galsim.ImageF(psf_size, psf_size, wcs=image.wcs)\n psf.drawImage(psf_image)\n exp_psf = KernelPsf(FixedKernel(afwImage.ImageD(psf_image.array.astype(float))))\n exp.setPsf(exp_psf)\n\n calib = afwImage.makePhotoCalibFromCalibZeroPoint(27)\n exp.setPhotoCalib(calib)\n\n # set WCS\n cd_matrix = image.wcs.cd\n crpix = geom.Point2D(image.wcs.crpix[0], image.wcs.crpix[1])\n crval = geom.SpherePoint(image.wcs.center.ra.deg, image.wcs.center.dec.deg, geom.degrees)\n wcs = makeSkyWcs(crpix=crpix, crval=crval, cdMatrix=cd_matrix)\n exp.setWcs(wcs)\n return exp",
"def convolve(image,psf,doPSF=True,edgeCheck=False):\n datadim1 = image.shape[0]\n datadim2 = image.shape[1]\n if datadim1!=datadim2:\n ddim = max(datadim1,datadim2)\n s = numpy.binary_repr(ddim-1)\n s = s[:-1]+'0' # Guarantee that padding is used\n else:\n ddim = datadim1\n s = numpy.binary_repr(ddim-1)\n if s.find('0')>0:\n size = 2**len(s)\n if edgeCheck==True and size-ddim<8:\n size*=2\n boxd = numpy.zeros((size,size))\n r = size-datadim1\n r1 = r2 = r/2\n if r%2==1:\n r1 = r/2+1\n c = size-datadim2\n c1 = c2 = c/2\n if c%2==1:\n c1 = c/2+1\n boxdslice = (slice(r1,datadim1+r1),slice(c1,datadim2+c1))\n boxd[boxdslice] = image\n else:\n boxd = image\n\n if doPSF:\n # Pad the PSF to the image size\n boxp = boxd*0.\n if boxd.shape[0]==psf.shape[0]:\n boxp = psf.copy()\n else:\n r = boxp.shape[0]-psf.shape[0]\n r1 = r/2+1\n c = boxp.shape[1]-psf.shape[1]\n c1 = c/2+1\n boxpslice = (slice(r1,psf.shape[0]+r1),slice(c1,psf.shape[1]+c1))\n boxp[boxpslice] = psf.copy()\n # Store the transform of the image after the first iteration\n a = (numpy.fft.rfft2(boxp))\n else:\n a = psf\n # PSF transform and multiplication\n b = a*numpy.fft.rfft2(boxd)\n # Inverse transform, including phase-shift to put image back in center;\n # this removes the requirement to do 2x zero-padding so makes things\n # go a bit quicker.\n b = numpy.fft.fftshift(numpy.fft.irfft2(b)).real\n # If the image was padded, remove the padding\n if s.find('0')>0:\n b = b[boxdslice]\n\n return b,a",
"def convolve_and_draw(self,obj):\n obj = galsim.Convolve([obj,self.localpsf])\n stamp = obj.drawImage(offset=self.offset,wcs=self.localwcs,method='no_pixel')\n stamp.setCenter(self.xpos,self.ypos)\n return stamp",
"def subtractnei(image,allcat,psfcat,psf):\n\n indnei = findpsfnei(allcat,psfcat,psf.npix)\n nnei = len(indnei)\n\n flux = image.data-image.sky\n resid = image.copy()\n fitradius = psf.fwhm()*0.5\n \n # Loop over neighboring stars and fit just the core\n for i in range(nnei):\n x1 = allcat['x'][indnei[i]]\n xp1 = int(np.minimum(np.maximum(np.round(x1),0),image.shape[1]-1))\n y1 = allcat['y'][indnei[i]]\n yp1 = int(np.minimum(np.maximum(np.round(y1),0),image.shape[0]-1))\n if 'amp' in allcat.columns:\n h1 = allcat['amp'][indnei[i]]\n elif 'peak' in allcat.columns:\n h1 = allcat['peak'][indnei[i]]\n else:\n h1 = flux[yp1,xp1]\n initpars = [h1,x1,y1] #image.sky[yp1,xp1]]\n bbox = psf.starbbox((initpars[1],initpars[2]),image.shape,psf.radius)\n # Fit amp empirically with central pixels\n flux1 = flux[bbox.slices]\n err1 = image[bbox.slices].error\n model1 = psf(pars=initpars,bbox=bbox)\n good = ((flux1/err1>2) & (flux1>0) & (model1/np.max(model1)>0.25))\n amp = np.median(flux1[good]/model1[good]) * initpars[0]\n pars = [amp, x1, y1]\n #starcat,perror = psf.fit(flux,pars=initpars,radius=fitradius,recenter=False,niter=2)\n #pars = [starcat['amp'][0],starcat['x'][0],starcat['y'][0]]\n im1 = psf(pars=pars,bbox=bbox)\n resid[bbox.slices].data -= im1\n return resid",
"def __zero_point(image_file, psf_sources, sep_max=2.0,\n \n plot_corr=False, corr_plotname=None,\n plot_source_offs=False, source_offs_plotname=None,\n plot_field_offs=False, field_offs_plotname=None, \n gaussian_blur_sigma=30.0, \n cat_num=None, \n write=False, output=None): \n\n # load in data \n image_data = fits.getdata(image_file)\n image_header = fits.getheader(image_file) \n \n # don't necessarily need:\n try:\n instrument = image_header[\"INSTRUME\"]\n except KeyError:\n instrument = \"Unknown\" \n # mandatory:\n pixscale = image_header[\"PIXSCAL1\"] \n try: filt = image_header[\"FILTER\"][0] \n except KeyError: filt = image_header[\"HIERARCH FPA.FILTER\"][0] # for PS1\n try: t_MJD = image_header[\"MJDATE\"] \n except KeyError: t_MJD = image_header[\"MJD-OBS\"]\n \n # determine the catalog to compare to for photometry\n if cat_num: # if a Vizier catalog number is given \n ref_cat = cat_num\n ref_cat_name = cat_num\n else: \n if filt in ['g','r','i','z','Y']:\n zp_filter = (filt).lower() # lowercase needed for PS1\n ref_cat = \"II/349/ps1\" # PanStarrs 1\n ref_cat_name = \"PS1\" \n elif filt == 'u':\n zp_filter = 'u' # closest option right now \n ref_cat = \"V/147\" \n ref_cat_name = \"SDSS DR12\"\n else: \n zp_filter = filt[0] # Ks must be K for 2MASS \n ref_cat = \"II/246/out\" # 2MASS\n ref_cat_name = \"2MASS\"\n \n w = wcs.WCS(image_header) # WCS object and coords of centre \n xsize = image_data.shape[1]\n ysize = image_data.shape[0] \n wcs_centre = np.array(w.all_pix2world(xsize/2.0, ysize/2.0, 1)) \n\n ra_centre = wcs_centre[0]\n dec_centre = wcs_centre[1]\n radius = pixscale*np.max([xsize,ysize])/60.0 #arcmins\n minmag = 13.0 # magnitude minimum\n maxmag = 20.0 # magnitude maximum\n max_emag = 0.4 # maximum allowed error \n nd = 5 # minimum no. of detections for a source (across all filters)\n \n # actual querying (internet connection needed)\n print(f\"\\nQuerying Vizier {ref_cat} ({ref_cat_name}) \"+\n f\"around RA {ra_centre:.4f}, Dec {dec_centre:.4f} \"+\n f\"with a radius of {radius:.4f} arcmin\", flush=True)\n \n v = Vizier(columns=[\"*\"], column_filters={\n zp_filter+\"mag\":str(minmag)+\"..\"+str(maxmag),\n \"e_\"+zp_filter+\"mag\":\"<\"+str(max_emag),\n \"Nd\":\">\"+str(nd)}, row_limit=-1) # no row limit \n Q = v.query_region(SkyCoord(ra=ra_centre, dec=dec_centre, \n unit=(u.deg, u.deg)), radius=f'{radius}m', \n catalog=ref_cat, cache=False)\n\n if len(Q) == 0: # if no matches\n raise ValueError(f\"\\nFound no matches in {ref_cat_name}; requested \"+\n \"region may be outside footprint of catalogue or \"+\n \"inputs are too strict\")\n \n \n # pixel coords of found sources\n cat_coords = w.all_world2pix(Q[0]['RAJ2000'], Q[0]['DEJ2000'], 1)\n \n # mask out edge sources\n # a bounding circle for WIRCam, rectangle for MegaPrime/other instruments\n if \"WIRCam\" in instrument:\n rad_limit = xsize/2.0\n dist_to_center = np.sqrt((cat_coords[0]-xsize/2.0)**2 + \n (cat_coords[1]-ysize/2.0)**2)\n mask = dist_to_center <= rad_limit\n good_cat_sources = Q[0][mask]\n else:\n x_lims = [int(0.05*xsize), int(0.95*xsize)] \n y_lims = [int(0.05*ysize), int(0.95*ysize)]\n mask = (cat_coords[0] > x_lims[0]) & (\n cat_coords[0] < x_lims[1]) & (\n cat_coords[1] > y_lims[0]) & (\n cat_coords[1] < y_lims[1])\n good_cat_sources = Q[0][mask] \n \n # cross-matching coords of sources found by astrometry\n source_coords = SkyCoord(ra=psf_sources['ra'], \n dec=psf_sources['dec'], \n frame='icrs', unit='degree')\n # and coords of valid sources in the queried catalog \n cat_source_coords = SkyCoord(ra=good_cat_sources['RAJ2000'], \n dec=good_cat_sources['DEJ2000'], \n frame='icrs', unit='degree')\n \n # indices of matching sources (within <sep_max> pixels of each other) \n idx_image, idx_cat, d2d, d3d = cat_source_coords.search_around_sky(\n source_coords, sep_max*pixscale*u.arcsec)\n\n if len(idx_image) <= 3:\n raise ValueError(f\"Found {len(idx_image)} matches between image but \"+\n f\"{ref_cat_name} and >3 matches are required\")\n return\n \n nmatches = len(idx_image) # store number of matches \n sep_mean = np.mean(d2d.value*3600.0) # store mean separation in \"\n print(f'\\nFound {nmatches:d} sources in {ref_cat_name} within '+\n f'{sep_max} pix of sources detected by astrometry, with average '+\n f'separation {sep_mean:.3f}\" ', flush=True)\n \n # get coords for sources which were matched\n source_matches = source_coords[idx_image]\n cat_matches = cat_source_coords[idx_cat]\n source_matches_ra = [i.ra.value for i in source_matches]\n cat_matches_ra = [i.ra.value for i in cat_matches]\n source_matches_dec = [i.dec.value for i in source_matches]\n cat_matches_dec = [i.dec.value for i in cat_matches]\n # compute offsets \n ra_offsets = np.subtract(source_matches_ra, cat_matches_ra)*3600.0 # arcsec\n dec_offsets = np.subtract(source_matches_dec, cat_matches_dec)*3600.0\n ra_offsets_mean = np.mean(ra_offsets)\n dec_offsets_mean = np.mean(dec_offsets)\n\n # plot the correlation\n if plot_corr:\n # fit a straight line to the correlation\n from scipy.optimize import curve_fit\n def f(x, m, b):\n return b + m*x\n \n xdata = good_cat_sources[zp_filter+'mag'][idx_cat] # catalog\n xdata = [float(x) for x in xdata]\n ydata = psf_sources['mag_fit'][idx_image] # instrumental \n ydata = [float(y) for y in ydata]\n popt, pcov = curve_fit(f, xdata, ydata) # obtain fit\n m, b = popt # fit parameters\n perr = np.sqrt(np.diag(pcov))\n m_err, b_err = perr # errors on parameters \n fitdata = [m*x + b for x in xdata] # plug fit into data \n \n # plot correlation\n fig, ax = plt.subplots(figsize=(10,10))\n ax.errorbar(good_cat_sources[zp_filter+'mag'][idx_cat], \n psf_sources['mag_fit'][idx_image], \n psf_sources['mag_unc'][idx_image],\n marker='.', mec=\"#fc5a50\", mfc=\"#fc5a50\", ls=\"\", color='k', \n markersize=12, label=f\"Data [{filt}]\", zorder=1) \n ax.plot(xdata, fitdata, color=\"blue\", \n label=r\"$y = mx + b $\"+\"\\n\"+r\"$ m=$%.3f$\\pm$%.3f, $b=$%.3f$\\pm$%.3f\"%(\n m, m_err, b, b_err), zorder=2) # the linear fit \n ax.set_xlabel(f\"Catalog magnitude [{ref_cat_name}]\", fontsize=15)\n ax.set_ylabel(\"Instrumental PSF-fit magnitude\", fontsize=15)\n ax.set_title(\"PSF Photometry\", fontsize=15)\n ax.legend(loc=\"upper left\", fontsize=15, framealpha=0.5)\n \n if not(corr_plotname):\n corr_plotname=image_file.replace(\".fits\", \"_PSF_photometry.png\")\n plt.savefig(corr_plotname, bbox_inches=\"tight\")\n plt.close() \n \n # plot the RA, Dec offset for each matched source \n if plot_source_offs: \n # plot\n plt.figure(figsize=(10,10))\n plt.plot(ra_offsets, dec_offsets, marker=\".\", linestyle=\"\", \n color=\"#ffa62b\", mec=\"black\", markersize=5)\n plt.xlabel('RA (J2000) offset [\"]', fontsize=15)\n plt.ylabel('Dec (J2000) offset [\"]', fontsize=15)\n plt.title(f\"Source offsets from {ref_cat_name} catalog\", fontsize=15)\n plt.axhline(0, color=\"k\", linestyle=\"--\", alpha=0.3) # (0,0)\n plt.axvline(0, color=\"k\", linestyle=\"--\", alpha=0.3)\n plt.plot(ra_offsets_mean, dec_offsets_mean, marker=\"X\", \n color=\"blue\", label = \"Mean\", linestyle=\"\") # mean\n plt.legend(fontsize=15)\n plt.rc(\"xtick\",labelsize=14)\n plt.rc(\"ytick\",labelsize=14)\n \n if not(source_offs_plotname):\n source_offs_plotname = image_file.replace(\".fits\", \n \"_source_offsets_astrometry.png\")\n plt.savefig(source_offs_plotname, bbox_inches=\"tight\") \n plt.close()\n \n # plot the overall offset across the field \n if plot_field_offs:\n from scipy.ndimage import gaussian_filter\n # add offsets to a 2d array\n offsets_image = np.zeros(image_data.shape)\n for i in range(len(d2d)): \n x = psf_sources[idx_image][i][\"x_0\"]\n y = psf_sources[idx_image][i][\"y_0\"]\n intx, inty = int(x), int(y)\n offsets_image[inty, intx] = d2d[i].value*3600.0 \n # apply a gaussian blur to visualize large-scale structure\n blur_sigma = gaussian_blur_sigma\n offsets_image_gaussian = gaussian_filter(offsets_image, blur_sigma)\n offsets_image_gaussian *= np.max(offsets_image)\n offsets_image_gaussian *= np.max(offsets_image_gaussian)\n \n # plot\n if \"WIRCam\" in instrument:\n plt.figure(figsize=(10,9))\n else:\n plt.figure(figsize=(9,13)) \n ax = plt.subplot(projection=w)\n plt.imshow(offsets_image_gaussian, cmap=\"magma\", \n interpolation=\"nearest\", origin=\"lower\")\n # textbox indicating the gaussian blur and mean separation\n textstr = r\"Gaussian blur: $\\sigma = %.1f$\"%blur_sigma+\"\\n\"\n textstr += r'$\\overline{offset} = %.3f$\"'%sep_mean\n box = dict(boxstyle=\"square\", facecolor=\"white\", alpha=0.8)\n if \"WIRCam\" in instrument:\n plt.text(0.6, 0.91, transform=ax.transAxes, s=textstr, \n bbox=box, fontsize=15)\n else:\n plt.text(0.44, 0.935, transform=ax.transAxes, s=textstr, \n bbox=box, fontsize=15) \n plt.xlabel(\"RA (J2000)\", fontsize=16)\n plt.ylabel(\"Dec (J2000)\", fontsize=16)\n plt.title(f\"Field offsets from {ref_cat_name} catalog\", fontsize=15)\n ax.coords[\"ra\"].set_ticklabel(size=15)\n ax.coords[\"dec\"].set_ticklabel(size=15)\n \n if not(field_offs_plotname):\n field_offs_plotname = image_file.replace(\".fits\", \n \"_field_offsets_astrometry.png\")\n \n plt.savefig(field_offs_plotname, bbox_inches=\"tight\") \n plt.close()\n \n # compute magnitude differences and zero point mean, median and error\n mag_offsets = ma.array(good_cat_sources[zp_filter+'mag'][idx_cat] - \n psf_sources['mag_fit'][idx_image])\n\n zp_mean, zp_med, zp_std = sigma_clipped_stats(mag_offsets)\n \n # add these to the header of the image file \n f = fits.open(image_file, mode=\"update\")\n f[0].header[\"ZP_MEAN\"] = zp_mean\n f[0].header[\"ZP_MED\"] = zp_med\n f[0].header[\"ZP_STD\"] = zp_std\n f.close()\n \n # add a mag_calib and mag_calib_unc column to psf_sources\n mag_calib = psf_sources['mag_fit'] + zp_mean\n mag_calib.name = 'mag_calib'\n # propagate errors \n mag_calib_unc = np.sqrt(psf_sources['mag_unc']**2 + zp_std**2)\n mag_calib_unc.name = 'mag_calib_unc'\n psf_sources['mag_calib'] = mag_calib\n psf_sources['mag_calib_unc'] = mag_calib_unc\n \n # add flag indicating if source is in a catalog and which catalog \n in_cat = []\n for i in range(len(psf_sources)):\n if i in idx_image:\n in_cat.append(True)\n else:\n in_cat.append(False)\n in_cat_col = Column(data=in_cat, name=\"in_catalog\")\n psf_sources[f\"in {ref_cat_name}\"] = in_cat_col\n \n # add new columns \n nstars = len(psf_sources)\n col_filt = Column([filt for i in range(nstars)], \"filter\",\n dtype = np.dtype(\"U2\"))\n col_mjd = Column([t_MJD for i in range(nstars)], \"MJD\")\n psf_sources[\"filter\"] = col_filt\n psf_sources[\"MJD\"] = col_mjd\n \n # compute magnitude differences between catalog and calibration \n # diagnostic for quality of zero point determination \n sources_mags = psf_sources[idx_image][\"mag_calib\"]\n cat_mags = good_cat_sources[idx_cat][zp_filter+\"mag\"]\n mag_diff_mean = np.mean(sources_mags - cat_mags)\n print(\"\\nMean difference between calibrated magnitudes and \"+\n f\"{ref_cat_name} magnitudes = {mag_diff_mean}\", flush=True)\n \n if write: # write the table of sources w calibrated mags, if desired\n if not(output):\n output = image_file.replace(\".fits\", \"_PSF_photometry.fits\")\n psf_sources.write(output, overwrite=True, format=\"ascii\") \n \n return psf_sources",
"def cube_recenter_moffat2d_fit(array, pos_y, pos_x, fwhm=4, subi_size=5, \n nproc=None, full_output=False, verbose=True, \n save_shifts=False, debug=False, \n unmoving_star=True): \n if not array.ndim == 3:\n raise TypeError('Input array is not a cube or 3d array')\n # if not pos_x or not pos_y:\n # raise ValueError('Missing parameters POS_Y and/or POS_X')\n \n # If frame size is even we drop a row and a column\n if array.shape[1]%2==0:\n array = array[:,1:,:].copy()\n if array.shape[2]%2==0:\n array = array[:,:,1:].copy()\n \n if verbose: start_time = timeInit()\n \n n_frames = array.shape[0]\n cy, cx = frame_center(array[0])\n array_recentered = np.zeros_like(array) \n\n if isinstance(fwhm,float) or isinstance(fwhm,int):\n fwhm_scal = fwhm\n fwhm = np.zeros((n_frames))\n fwhm[:] = fwhm_scal\n size = np.zeros(n_frames) \n for kk in range(n_frames):\n size[kk] = max(2,int(fwhm[kk]*subi_size))\n \n if isinstance(pos_x,int) or isinstance(pos_y,int):\n if isinstance(pos_x,int) and not isinstance(pos_y,int):\n raise ValueError('pos_x and pos_y should have the same shape')\n elif not isinstance(pos_x,int) and isinstance(pos_y,int):\n raise ValueError('pos_x and pos_y should have the same shape')\n pos_x_scal, pos_y_scal = pos_x, pos_y\n pos_x, pos_y = np.zeros((n_frames)),np.zeros((n_frames))\n pos_x[:], pos_y[:] = pos_x_scal, pos_y_scal\n\n ### Precaution: some frames are dominated by noise and hence cannot be used\n ### to find the star with a Moffat or Gaussian fit.\n ### In that case, just replace the coordinates by the approximate ones\n if unmoving_star:\n star_approx_coords, star_not_present = approx_stellar_position(array,\n fwhm,\n True)\n star_approx_coords.tolist()\n star_not_present.tolist()\n else:\n star_approx_coords, star_not_present = [None]*n_frames, [None]*n_frames\n\n if not nproc: # Hyper-threading \"duplicates\" the cores -> cpu_count/2\n nproc = (cpu_count()/2) \n if nproc==1:\n res = []\n bar = pyprind.ProgBar(n_frames, stream=1, \n title='Looping through frames')\n for i in range(n_frames):\n res.append(_centroid_2dm_frame(array, i, size[i], pos_y[i], \n pos_x[i], star_approx_coords[i], \n star_not_present[i]))\n bar.update()\n res = np.array(res)\n elif nproc>1:\n pool = Pool(processes=int(nproc)) \n res = pool.map(eval_func_tuple,itt.izip(itt.repeat(_centroid_2dm_frame),\n itt.repeat(array),\n range(n_frames),\n size.tolist(),\n pos_y.tolist(), \n pos_x.tolist(),\n star_approx_coords,\n star_not_present)) \n res = np.array(res)\n pool.close()\n y = cy - res[:,0]\n x = cx - res[:,1]\n \n for i in xrange(n_frames):\n if debug: print y[i], x[i]\n array_recentered[i] = frame_shift(array[i], y[i], x[i])\n\n if verbose: timing(start_time)\n\n if save_shifts: \n np.savetxt('recent_moffat_shifts.txt', np.transpose([y, x]), fmt='%f')\n if full_output:\n return array_recentered, y, x\n else:\n return array_recentered",
"def convolve(self, img):",
"def psf2otf(psf, s):\n \n # computes padding values\n sh = np.array(psf.shape)\n s = np.array(s)\n pad = s - sh\n \n # centers psf\n h_centered = np.pad(psf, ((pad[0]//2+1, pad[0]-pad[0]//2-1), (pad[1]//2+1, pad[1]-pad[1]//2-1)), mode='constant')\n\n plt.imshow(h_centered)\n plt.show()\n # Fourier transform (aka OTF) of the psf\n h_centered = np.fft.fftshift(h_centered)\n H = np.fft.fft2(h_centered, s)\n \n # Keep only real values (simple approximation)\n H = np.real(H)\n return H",
"def marginalize(self,iaxis,bin_range=None):\n\n h = Histogram(self._axes[(iaxis+1)%2],style=self._style) \n\n if iaxis == 1:\n\n if bin_range is None: \n h._counts = np.apply_over_axes(np.sum,self._counts,[1]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,self._var,[1]).reshape(h._counts.shape)\n else:\n c = self._counts[:,bin_range[0]:bin_range[1]]\n v = self._var[:,bin_range[0]:bin_range[1]]\n\n h._counts = np.apply_over_axes(np.sum,c,[1]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,v,[1]).reshape(h._counts.shape)\n else:\n\n if bin_range is None: \n h._counts = np.apply_over_axes(np.sum,self._counts,[0]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,self._var,[0]).reshape(h._counts.shape)\n else:\n c = self._counts[bin_range[0]:bin_range[1],:]\n v = self._var[bin_range[0]:bin_range[1],:]\n\n h._counts = np.apply_over_axes(np.sum,c,[0]).reshape(h._counts.shape)\n h._var = np.apply_over_axes(np.sum,v,[0]).reshape(h._counts.shape)\n\n return h",
"def cube_recenter_gauss2d_fit(array, pos_y, pos_x, fwhm=4, subi_size=1, \n nproc=None, full_output=False, verbose=True, \n save_shifts=False, debug=False): \n if not array.ndim == 3:\n raise TypeError('Input array is not a cube or 3d array')\n if not pos_x or not pos_y:\n raise ValueError('Missing parameters POS_Y and/or POS_X')\n \n # If frame size is even we drop a row and a column\n if array.shape[1]%2==0:\n array = array[:,1:,:].copy()\n if array.shape[2]%2==0:\n array = array[:,:,1:].copy()\n \n if verbose: start_time = timeInit()\n \n n_frames = array.shape[0]\n cy, cx = frame_center(array[0])\n array_recentered = np.zeros_like(array) \n\n if isinstance(fwhm,float) or isinstance(fwhm,int):\n fwhm_scal = fwhm\n fwhm = np.zeros((n_frames))\n fwhm[:] = fwhm_scal\n size = np.zeros(n_frames) \n for kk in range(n_frames):\n size[kk] = max(2,int(fwhm[kk]*subi_size))\n \n if not nproc: # Hyper-threading \"duplicates\" the cores -> cpu_count/2\n nproc = (cpu_count()/2) \n elif nproc==1:\n res = []\n bar = pyprind.ProgBar(n_frames, stream=1, \n title='Looping through frames')\n for i in range(n_frames):\n res.append(_centroid_2dg_frame(array, i, size[i], pos_y, pos_x))\n bar.update()\n res = np.array(res)\n elif nproc>1:\n pool = Pool(processes=int(nproc)) \n res = pool.map(eval_func_tuple,itt.izip(itt.repeat(_centroid_2dg_frame),\n itt.repeat(array),\n range(n_frames),\n itt.repeat(size),\n itt.repeat(pos_y), \n itt.repeat(pos_x))) \n res = np.array(res)\n pool.close()\n y = cy - res[:,0]\n x = cx - res[:,1]\n \n for i in xrange(n_frames):\n if debug: print y[i], x[i]\n array_recentered[i] = frame_shift(array[i], y[i], x[i])\n\n if verbose: timing(start_time)\n\n if save_shifts: \n np.savetxt('recent_gauss_shifts.txt', np.transpose([y, x]), fmt='%f')\n if full_output:\n return array_recentered, y, x\n else:\n return array_recentered",
"def destagger(self,data,ax):\n # Check for dimensions of 1.\n # If it exists, don't destagger it.\n\n shp = data.shape\n for n,size in enumerate(shp):\n if (size==1) and (n==ax):\n ax = None\n break\n\n #pdb.set_trace()\n\n if ax==None:\n return data\n else:\n nd = data.ndim\n sl0 = [] # Slices to take place on staggered axis\n sl1 = []\n\n for n in range(nd):\n if n is not ax:\n sl0.append(slice(None))\n sl1.append(slice(None))\n else:\n sl0.append(slice(None,-1))\n sl1.append(slice(1,None))\n\n data_unstag = 0.5*(data[sl0] + data[sl1])\n return data_unstag",
"def get_fwhm(img, pos_center):\n x = np.linspace(0, img.shape[1], img.shape[1])\n y = np.linspace(0, img.shape[0], img.shape[0])\n x, y = np.meshgrid(x, y)\n #Parameters: xpos, ypos, sigmaX, sigmaY, amp, baseline\n initial_guess = (pos_center[1],pos_center[0],10,1,0)\n # subtract background and rescale image into [0,1], with floor clipping\n bg = np.percentile(img,5)\n img_scaled = np.clip((img - bg) / (img.max() - bg),0,1)\n popt, pcov = opt.curve_fit(\n _twoD_GaussianScaledAmp, (x,y),\n img_scaled.ravel(),\n p0=initial_guess,\n bounds=((pos_center[1]-img.shape[1]*0.1,\n pos_center[0]-img.shape[0]*0.1,\n 1, 0.5, -0.1),\n (pos_center[1]+img.shape[1]*0.1,\n pos_center[0]+img.shape[0]*0.1,\n (img.shape[1]+img.shape[0])/4, 1.5, 0.5)\n )\n )\n # xcenter, ycenter, sigma, amp, offset = popt[0], popt[1], popt[2], popt[3], popt[4]\n sigma = popt[2]\n FWHM = np.abs(4*sigma*np.sqrt(-0.5*np.log(0.5)))\n \n return FWHM",
"def pca(tensor, target_dim, device):\n\n n, f = tensor.size()\n if n < 25: # no point in PCA, just clip\n res = tensor[:, :target_dim]\n else:\n if device == \"cuda\":\n tensor = tensor.to('cpu')\n model = PCA(n_components=target_dim, whiten=True)\n\n res = model.fit_transform(tensor)\n res = torch.from_numpy(res)\n res = res.to(device)\n\n return res",
"def Spatial_Bootstrap(x,y,var,variog,seed,nsim,outfl):\r\n\r\n random.seed(seed)\r\n # load the variogram fo data\r\n nst = variog['nst']\r\n cc = np.zeros(nst); aa = np.zeros(nst); it = np.zeros(nst)\r\n ang = np.zeros(nst); anis = np.zeros(nst)\r\n \r\n c0 = variog['nug'];\r\n cc[0] = variog['cc1']; it[0] = variog['it1']; ang[0] = variog['azi1'];\r\n aa[0] = variog['hmaj1']; anis[0] = variog['hmin1']/variog['hmaj1'];\r\n if nst == 2:\r\n cc[1] = variog['cc2']; it[1] = variog['it2']; ang[1] = variog['azi2'];\r\n aa[1] = variog['hmaj2']; anis[1] = variog['hmin2']/variog['hmaj2'];\r\n \r\n # Initialize the covariance subroutine and cbb at the same time:\r\n rotmat, maxcov = setup_rotmat2(c0,nst,it,cc,ang)\r\n PMX = 9999.0\r\n cbb = cova2(0.0,0.0,0.0,0.0,nst,c0,PMX,cc,aa,it,ang,anis,rotmat,maxcov)\r\n \r\n # Allocate the needed memory:\r\n len_=len(x)\r\n corr=np.zeros((len_,len_))\r\n low=np.zeros((len_,len_))\r\n wvec=np.zeros((len_,nsim))\r\n yvec_n=np.zeros((len_,nsim))\r\n \r\n for i in range(len_):\r\n for j in range(len_):\r\n co=cova2(x[i], y[i], x[j], y[j], nst, c0, PMX, cc, aa, it, ang, anis, rotmat, maxcov)\r\n if(i==j): co=co+0.01 # Add small value to diagonal elements to make matrix positive definite\r\n corr[i,j] = co\r\n corr[j,i] = co\r\n \r\n # LU decomposition\r\n try: \r\n L=scipy.linalg.cholesky(corr, lower=True, overwrite_a=True)\r\n except ValueError:\r\n pass\r\n \r\n # Generate uncorrelated Gaussian simulation at data locations\r\n mu=0; sigma=1\r\n for i in range(len_):\r\n Dist = np.random.normal(mu, sigma, nsim)\r\n wvec[i,:]=Dist\r\n \r\n # Generate correlated Gaussian simulation at data locations\r\n yvec_r=[]\r\n for k in range(nsim):\r\n tmp=(np.matmul(L,wvec[:,k]))\r\n tmp_=[]\r\n if(outfl):\r\n outfl_=outfl+'_'+str(k+1)\r\n f1=open(outfl_,'w')\r\n txt='data \\n'+'1 \\n'+'Value \\n'\r\n f1.write(str(txt))\r\n for j in range(len(tmp)):\r\n yvec_n[j,k]=tmp[j]\r\n prob=norm.cdf(tmp[j])\r\n quantle=np.quantile(var, prob, axis=0, keepdims=True)[0]\r\n if(outfl): f1.write(\"{0:.4f}\".format(quantle)+\"\\n\")\r\n tmp_.append(quantle)\r\n f1.close() \r\n yvec_r.append(tmp_) \r\n \r\n # Calculate distribution of the mean\r\n mean=[]\r\n for k in range(nsim):\r\n mean.append(np.mean(yvec_r[k]))\r\n \r\n print(' Effective number of data= ', np.var(var)/np.var(mean)) \r\n print(' Mean of mean=',np.round(np.mean(mean),4),', Variance of the mean = ', np.round(np.var(mean),4)) \r\n \r\n return yvec_r, mean",
"def profile_asterix(data, center=None, nprofiles=5, clim=None):\n fig = plt.figure(figsize=(17,11))\n ax1 = fig.add_subplot(121)\n im = plt.imshow(data,cmap=plt.cm.jet)\n cb = plt.colorbar()\n if clim:\n plt.clim(clim)\n plt.xlabel('col #')\n plt.ylabel('row #')\n\n ax2 = fig.add_subplot(122)\n plt.axhline(0,c='k')\n plt.title('profiles')\n plt.ylabel('deformation')\n plt.xlabel('pixel')\n\n nrow, ncol = data.shape\n if center:\n r0, c0 = center\n else:\n r0 = nrow/2\n c0 = ncol/2\n\n #profiles = {}\n #colors = ['b', 'g', 'r', 'c', 'm','y','k']\n slopes = np.linspace(0, np.pi, nprofiles)\n for i,rad in enumerate(slopes[:-1]): #don't repeat 0 & pi\n # Add profile line to interferogram\n #print i, rad, colors[i]\n #special case division by zeros\n if rad == 0: #could also do m=np.inf\n start = (0, r0)\n end = (ncol, r0)\n elif rad == np.pi/2:\n start = (c0, 0)\n end = (c0, nrow)\n else:\n m = np.tan(rad)\n leftIntercept = r0 + m*-c0 #NOTE: imshow takes care of axes flipping automatically!\n rightIntercept = r0 + m*(ncol-c0)\n start = (0, leftIntercept)\n end = (ncol, rightIntercept)\n ax1.plot([start[0],end[0]], [start[1],end[1]], scalex=0, scaley=0)\n\n # Add profile to adjacent plot\n #NOTE: mean, probably more representative\n length = np.floor(np.hypot(start[0]-end[0], start[1]-end[1])) #sample each pixel line passes through\n cols = np.linspace(start[0], end[0]-1, length) #NOTE end-2 to make sure indexing works\n rows = np.linspace(start[1], end[1]-1, length)\n\n # Radial-plot\n radii = np.hypot(cols-c0, rows-r0)\n\n # East Positive (to check for E-W symmetry)\n if rad == np.pi/2: #special case for vertical profile\n radii[np.where(rows>r0)] *= -1\n else:\n radii[np.where(cols<c0)] *= -1\n\n # North Positive\n #if rad == 0:\n # radii[np.where(cols<c0)] *= -1\n #else:\n # radii[np.where(rows>r0)] *= -1\n\n # not sure why there are indexing errors:\n good = (rows <= data.shape[0]) & (cols <= data.shape[1])\n rows = rows[good]\n indrows = rows.astype(np.int)\n cols = cols[good]\n indcols = cols.astype(np.int)\n pPoints = data[indrows, indcols]\n ax2.plot(radii[good], pPoints, marker='.')\n\n #ax1.plot(c0,r0, marker='s', mec='k', mew=2, mfc='none', scalex=0, scaley=0)\n ax1.plot(c0,r0,'ko', ms=2, scalex=0, scaley=0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the recent internet online datetime | def online_since(self):
return self._dt_online | [
"def offline_since(self):\n return self._dt_offline",
"def last_updated(self, url):\n return self.metadata(url).last_updated_in_seconds",
"def get_date_time(webpage_url):\n article = Article(webpage_url)\n article.download()\n article.parse()\n return article.publish_date.strftime('%d %B %Y')",
"def test_Bridge_getNetworkstatusLastPublished(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n\n published = self.bridge.getNetworkstatusLastPublished()\n self.assertIsNotNone(published)\n self.assertIsInstance(published, datetime.datetime)\n self.assertEqual(str(published), '2014-12-22 21:51:27')",
"def _last_updated():\n #TODO: implement\n return datetime.datetime.now()",
"def last_geo_failover_time(self) -> str:\n return pulumi.get(self, \"last_geo_failover_time\")",
"def datetime(self):\n return self.date_published.strftime('%Y-%m-%d %H:%M:%S')",
"def get_last_check() -> datetime.datetime:\n return getter(\"last_check_dt\")",
"def latest_info():",
"def detect_latest_dates(source, user, passwd):\n\n soup = retrieve_url(source, user, passwd)\n dates = [d.text[:-1] for d in soup.find_all(\"a\", href=re.compile(\"..-..-..\"))]\n print \"Latest date: {}\".format(dates[0])\n return dates",
"def duration(self):\n if self._dt_offline is None or self._dt_online >= self._dt_offline:\n return datetime.now() - self._dt_online\n else:\n return datetime.now() - self._dt_offline",
"def get_date_scraped(self):\n return datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")",
"def chameleon_mirror_server_time(self):\n command = ('wget -S %s' % self.CHAMELEON_MIRROR_URL).split()\n for line in system_tools.SystemTools.Output(*command).splitlines():\n if 'Date' in line:\n _, _, day, month, year, hrs_mins_secs, _ = line.split()\n month = self.MONTH.get(month, '')\n if month:\n hrs, mins, secs = hrs_mins_secs.split(':')\n return datetime.datetime(int(year), int(month), int(day),\n int(hrs), int(mins), int(secs))\n raise ServerTimeException('Failed to get chameleon mirror server time.')",
"def date_latest(self):\n dt = self.sort_date_latest()\n return self._adjust_for_precision(dt, 1.0)",
"def get_last_online_version():\n import httplib\n conn = httplib.HTTPSConnection(\"raw.githubusercontent.com\")\n conn.request(\"GET\", \"/c0rpse/pyzmap/master/pyzmap_CURRENT_VERSION.txt\")\n online_version = bytes.decode(conn.getresponse().read()).strip()\n return online_version",
"def get_latest_imagery_date(overlay):\n meta = _sources[overlay]\n uri = _server_uri + _dir_info['path']\n \n # find a good date to start from, assuming tomorrow\n search_date = datetime.now() + timedelta(days=1)\n assert search_date > datetime(2015, 8, 1) # start of imagery (ignoring 2012)\n last_pub_date = None\n for i in range(7):\n r = requests.get(uri.format(subdir=meta['subdir'], date=search_date))\n if r.status_code != 404:\n n = len(get_overlay_image_list(overlay, date=search_date))\n if n == 48:\n last_pub_date = search_date\n break\n search_date += timedelta(days=-1) \n return last_pub_date",
"def perf_archive_get_oldest_timestamp(self):\n return self.request( \"perf-archive-get-oldest-timestamp\", {\n }, {\n 'timestamp': [ int, False ],\n } )",
"def get_live_date(self) -> Optional[dt.date]:\n return self.__live_date",
"def get_last_update(name: str) -> float:\n global _feeds\n return _feeds[name]['last_update']"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the recent internet offline datetime | def offline_since(self):
return self._dt_offline | [
"def online_since(self):\n return self._dt_online",
"def last_updated(self, url):\n return self.metadata(url).last_updated_in_seconds",
"def _last_updated():\n #TODO: implement\n return datetime.datetime.now()",
"def last_geo_failover_time(self) -> str:\n return pulumi.get(self, \"last_geo_failover_time\")",
"def get_last_check() -> datetime.datetime:\n return getter(\"last_check_dt\")",
"def test_Bridge_getNetworkstatusLastPublished(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n\n published = self.bridge.getNetworkstatusLastPublished()\n self.assertIsNotNone(published)\n self.assertIsInstance(published, datetime.datetime)\n self.assertEqual(str(published), '2014-12-22 21:51:27')",
"def duration(self):\n if self._dt_offline is None or self._dt_online >= self._dt_offline:\n return datetime.now() - self._dt_online\n else:\n return datetime.now() - self._dt_offline",
"def last_modified(self):\n return remote_to_local_datetime(self.last_modified_string)",
"def outdated_at(self) -> str:\n return self._outdated_at",
"def last_disarmed_at(self):\n return self._last_disarmed_at",
"def get_date_of_last_update(self):\n # We obtain this information by checking the last modification time of\n # the .git/FETCH_HEAD file. This is not bulletproof (see the comments\n # in http://stackoverflow.com/a/9229377), but I don't know of any\n # better way.\n return datetime.datetime.fromtimestamp(\n os.path.getmtime(os.path.join(self.path, '.git', 'FETCH_HEAD'))\n )",
"def iso_last(self):\n return arrow.get((self.mjd_last - 40_587) * 86400.0)",
"def latest_info():",
"def get_last_update(name: str) -> float:\n global _feeds\n return _feeds[name]['last_update']",
"def get_date_time(webpage_url):\n article = Article(webpage_url)\n article.download()\n article.parse()\n return article.publish_date.strftime('%d %B %Y')",
"def discord_oldest_unread():",
"def mock_get_last_fetch_time(last_run, params):\n last_fetch = last_run.get('latest_detection_found')\n if not last_fetch:\n # To handle the fact that we can't freeze the time and still parse relative time expressions such as 2 days\n last_fetch = \"2021-07-16T11:08:55.000Z\"\n\n return last_fetch",
"def last_access_time(self):\n return datetime.datetime.fromtimestamp(self.java_obj.lastAccessTime().getTime() / 1000)",
"def get_latest_imagery_date(overlay):\n meta = _sources[overlay]\n uri = _server_uri + _dir_info['path']\n \n # find a good date to start from, assuming tomorrow\n search_date = datetime.now() + timedelta(days=1)\n assert search_date > datetime(2015, 8, 1) # start of imagery (ignoring 2012)\n last_pub_date = None\n for i in range(7):\n r = requests.get(uri.format(subdir=meta['subdir'], date=search_date))\n if r.status_code != 404:\n n = len(get_overlay_image_list(overlay, date=search_date))\n if n == 48:\n last_pub_date = search_date\n break\n search_date += timedelta(days=-1) \n return last_pub_date"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the online or offline duration | def duration(self):
if self._dt_offline is None or self._dt_online >= self._dt_offline:
return datetime.now() - self._dt_online
else:
return datetime.now() - self._dt_offline | [
"def live_duration(self):\n if self.live_info:\n end = self.live_ended_at\n start = self.live_info.get(\"started_at\")\n if end and start:\n return int(end) - int(start)\n return 0",
"def time_diff():\n now = datetime.utcnow()\n diff = now - timedelta(minutes=flaskbb_config['ONLINE_LAST_MINUTES'])\n return diff",
"def get_duration(self):\n lessons = Lesson.objects.filter(section__module__mnemo=self.mnemo).all()\n time_total = lessons.aggregate(Sum('duration'))['duration__sum']\n if time_total is None or time_total == 0:\n return 0\n time_hours = round(time_total.seconds / 3600)\n return time_hours if time_hours else 1",
"def get_duration(self):\n lessons = Lesson.objects.filter(section__module__course__mnemo=self.mnemo).all()\n time_total = lessons.aggregate(Sum('duration'))['duration__sum']\n if time_total is None or time_total == 0:\n return 0\n time_hours = round(time_total.seconds / 3600)\n return time_hours if time_hours else 1",
"def offline_since(self):\n return self._dt_offline",
"def polling_duration(self):\n return self.sdk.SCC_PollingDuration(self._serial)",
"def duration(self):\n return _get_story_duration(self.xml)",
"def get_duration(self):\n return int(self.duration)",
"def connection_duration(connection):\n a = connection['to']['arrival']\n d = connection['from']['departure']\n a = dateutil.parser.parse(a)\n d = dateutil.parser.parse(d)\n return a-d",
"def network_time(self):\n if self.has_fetch:\n return self.shuffle_finish_time - self.start_time - self.local_read_time\n return 0",
"def getSingleSessionRemainingTime(self):\n return self.getRemainingTime(1) #single h323 implies multi_login = False",
"def observation_live_time_duration(self):\n return u.Quantity(self.table.meta[\"LIVETIME\"], \"second\")",
"def idle(self):\n return (datetime.datetime.now() - self._last_received).total_seconds()",
"def online_since(self):\n return self._dt_online",
"def duration(self):\n return self._t_stop - self._t_start",
"def fetch_time(self) -> float:\n return self.navigation_timing.response_end - self.navigation_timing.fetch_start",
"def get_duration(self):\n\t\tself.query_duration()\n\t\treturn self.sound_file.duration",
"def get_timeAvailable(self):\r\n\r\n return self._timeAvailable",
"def get_duration(self):\n time_file_dir = self.dir + '/time.dat'\n session_time = read_time_dat_file(time_file_dir, self.sample_rate)\n self.duration = session_time[-1]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Define the internet connection online state callback implementation. | def on_online(self, func):
self._on_online = func | [
"def _setOnline(self, online):\n if self.online != online:\n self.online = online\n twisted_logger.writeLog(self.logPrefix, self.logName, \"Changing online status to %s\" % online)\n # Call back (trigger) external deferreds\n if online:\n [self.__callbackDeferred(deferred, self.extOnlineDeferreds[deferred], online) for deferred in self.extOnlineDeferreds.keys()]\n else:\n [self.__callbackDeferred(deferred, self.extOfflineDeferreds[deferred], online) for deferred in self.extOfflineDeferreds.keys()]",
"def on_offline(self, func):\n self._on_offline = func",
"def got_state(self, state):\n if state == NM_STATE_CONNECTED:\n self.call_result_cb(ONLINE)\n elif state == NM_STATE_CONNECTING:\n logger.debug(\"Currently connecting, waiting for signal\")\n else:\n self.call_result_cb(OFFLINE)",
"def _get_onlineStatusChanged(self) -> \"adsk::core::Ptr< adsk::core::ApplicationEvent >\" :\n return _core.Application__get_onlineStatusChanged(self)",
"def rpc_online(self, sender, *args):\n \n if (len(args) != 1):\n raise rpc.RPCFault(604, 'online TRUE/FALSE')\n val = args[0]\n if (type(val) != bool):\n raise rpc.RPCFault(605, 'online TRUE/FALSE')\n if (self.factory.online and (not val)):\n self.factory.online = False\n return 'factory now offline for new bot requests'\n if ((not self.factory.online) and val):\n self.factory.online = True\n return 'factory now online for new bot requests'\n return 'no change to online status'",
"def on_offline(self):\n return self._on_offline",
"def handle_system_state_messages_that_were_requested_and_set_online_status(**data):\r\n device_id = data['topic'].split('/')[2]\r\n # check if device is already online, according to the state we keep\r\n if devices[device_id]['device']['online'] == 'online':\r\n log.debug(f\"device {device_id} is already online\")\r\n devices[device_id]['device']['when'] = arrow.now().isoformat()\r\n else:\r\n log.info(f\"device {device_id} is back online ♥\")\r\n # update device status in our state manager (devices)\r\n devices[device_id]['device']['online'] = 'online'\r\n devices[device_id]['device']['when'] = arrow.now().isoformat()\r\n # update online status of the entities (channels)\r\n for channel in devices[device_id]['device']['channels']:\r\n mqtt.publish(\r\n topic=f\"meross/{device_id}/{channel}/available\",\r\n payload=\"online\",\r\n retain=True\r\n )",
"def checkNetworkStatus(self):\r\n pass",
"def process_online(self, data, reset=True, **kwargs):\n raise NotImplementedError('Must be implemented by subclass.')",
"def online(self):\n ret = self._get_attr(\"online\")\n return ret",
"def get_state(self):\n if self.connected is True:\n return self.__request(\n WemoSwitch.body_status, WemoSwitch.headers_get)\n else:\n return WemoSwitch.ERROR_STATE",
"def set_connected(self):\n self.connected = True\n self.async_schedule_update_ha_state()",
"def addStateChangeCallback(self, *args) -> \"void\":\n return _coin.ScXMLStateMachine_addStateChangeCallback(self, *args)",
"def session_online(self, session_online):\n\n self._session_online = session_online",
"def __mpdconnect(fn):\n\t\tdef wrapper(self,*args,**kwargs):\t\t\t\n\t\t\t# check if idle set\n\t\t\tresult = True\n\t\t\ttry:\n\t\t\t\tself.mpdc.noidle()\n\t\t\texcept MPDConnectionError as e:\n\t\t\t\tself.mpdc.connect(\"localhost\", 6600)\n\t\t\t\tresult = False\n\t\t\texcept:\n\t\t\t\tself.__printer('WEIRD... no idle was set..')\n\t\t\t\t\n\t\t\tret = fn(self,*args,**kwargs)\n\t\t\tself.mpdc.send_idle()\n\t\t\treturn ret\n\t\t\t\n\t\treturn wrapper",
"def addStateChangeCallback(self, *args):\n return _coin.ScXMLStateMachine_addStateChangeCallback(self, *args)",
"def on_connected(self):\n log.debug('on_connected called.')",
"def kasaya_connection_started(self, addr):\n LOG.debug(\"Connected to %s\", addr)\n self.SYNC.notify_worker_live(self.status)",
"def internet_on():\n try:\n # connect to the google.com -- tells us if the host is actually reachable\n g_start()\n socket.create_connection((\"www.google.com\", 80), timeout=1)\n g_end('connection to internet is available')\n return True\n except OSError:\n pass\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If implemented, called when internet connection state was first detected offline | def on_offline(self):
return self._on_offline | [
"def on_offline(self, func):\n self._on_offline = func",
"def on_online(self, func):\n self._on_online = func",
"def checkNetworkStatus(self):\r\n pass",
"def test_no_calls_are_made_when_hubstate_is_offline(self):\n self.hs.setkey(self.hs.STATE_KEY_IS_ONLINE, False)\n call_command(\"update_weather_cache\")\n self.mock_get.assert_not_called()",
"def check_if_a_device_is_offline(**data):\r\n for device_id in devices.keys():\r\n when = devices[device_id]['device'].get('when')\r\n if not when:\r\n # there is no timestamp, means that the state machine was just created. We are optimistic and pass for now but we set the timestamp\r\n devices[device_id]['device']['when'] = arrow.now().isoformat()\r\n log.debug(f\"optimistic online for {device_id}, will see on next round\")\r\n continue\r\n if arrow.get(when) < arrow.now().shift(minutes=-2): # should be synchronized with how often we poll for status (1 minute)\r\n if devices[device_id]['device']['online'] == 'online':\r\n log.warning(f\"device {device_id} is now offline ಠ‸ಠ, last online was on {when} \")\r\n for channel in devices[device_id]['device']['channels']:\r\n mqtt.publish(\r\n topic=f\"meross/{device_id}/{channel}/available\",\r\n payload=\"offline\",\r\n retain=True\r\n )\r\n devices[device_id]['device']['online'] = 'offline'\r\n else:\r\n log.debug(f\"device {device_id} is online (responding to status request)\")",
"def internet_on():\n try:\n # connect to the google.com -- tells us if the host is actually reachable\n g_start()\n socket.create_connection((\"www.google.com\", 80), timeout=1)\n g_end('connection to internet is available')\n return True\n except OSError:\n pass\n return False",
"def offline(self):\n return request.render('website_event_track.pwa_offline')",
"def _setOnline(self, online):\n if self.online != online:\n self.online = online\n twisted_logger.writeLog(self.logPrefix, self.logName, \"Changing online status to %s\" % online)\n # Call back (trigger) external deferreds\n if online:\n [self.__callbackDeferred(deferred, self.extOnlineDeferreds[deferred], online) for deferred in self.extOnlineDeferreds.keys()]\n else:\n [self.__callbackDeferred(deferred, self.extOfflineDeferreds[deferred], online) for deferred in self.extOfflineDeferreds.keys()]",
"def got_state(self, state):\n if state == NM_STATE_CONNECTED:\n self.call_result_cb(ONLINE)\n elif state == NM_STATE_CONNECTING:\n logger.debug(\"Currently connecting, waiting for signal\")\n else:\n self.call_result_cb(OFFLINE)",
"def on_error(self):\n self.log.info('Network error: disconnected from %s' % (self.address,))\n # Inform upstream Network of error\n self.hooks.error()\n self.socket = None\n #AsyncDelayed(self.connect, 10)()",
"def check_connection():\n while True:\n result = try_and_print(message='Ping test...', function=ping, cs='OK')\n if result['CS']:\n break\n if not ask('ERROR: System appears offline, try again?'):\n if ask('Continue anyway?'):\n break\n else:\n abort()",
"def _async_set_unavailable(self, now):\n if self._ping_loss > 2:\n _LOGGER.info(\"Gateway became unavailable by timeout!\")\n self._is_available = False\n for func in self.callbacks:\n func({\"availability\": False})",
"def notonline(self, nick):\n if self.users.has_key(nick):\n self.announce(\"%s is not online\" % nick)",
"def on_disconnected(self):\n log.debug('on_disconnected called.')",
"def network_is_ready(self):\n time_elapsed = 0\n for i in range(0, 60):\n if self.__network.state >= self.__network.STATE_AWAKED:\n return True\n else:\n time_elapsed += 1\n time.sleep(1.0)\n\n if self.__network.state < self.__network.STATE_AWAKED:\n return False",
"def process_offline(self, data, **kwargs):\n raise NotImplementedError('Must be implemented by subclass.')",
"def waitForNetwork(self):\n time.sleep(0.1)",
"def is_online(self):\n value = b'test'\n try:\n return self.probe_server(value=value) == value\n except ConnectionError:\n return False",
"def online(self):\n ret = self._get_attr(\"online\")\n return ret"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Define the internet connection offline state callback implementation. | def on_offline(self, func):
self._on_offline = func | [
"def on_offline(self):\n return self._on_offline",
"def on_online(self, func):\n self._on_online = func",
"def process_offline(self, data, **kwargs):\n raise NotImplementedError('Must be implemented by subclass.')",
"def got_state(self, state):\n if state == NM_STATE_CONNECTED:\n self.call_result_cb(ONLINE)\n elif state == NM_STATE_CONNECTING:\n logger.debug(\"Currently connecting, waiting for signal\")\n else:\n self.call_result_cb(OFFLINE)",
"def check_if_a_device_is_offline(**data):\r\n for device_id in devices.keys():\r\n when = devices[device_id]['device'].get('when')\r\n if not when:\r\n # there is no timestamp, means that the state machine was just created. We are optimistic and pass for now but we set the timestamp\r\n devices[device_id]['device']['when'] = arrow.now().isoformat()\r\n log.debug(f\"optimistic online for {device_id}, will see on next round\")\r\n continue\r\n if arrow.get(when) < arrow.now().shift(minutes=-2): # should be synchronized with how often we poll for status (1 minute)\r\n if devices[device_id]['device']['online'] == 'online':\r\n log.warning(f\"device {device_id} is now offline ಠ‸ಠ, last online was on {when} \")\r\n for channel in devices[device_id]['device']['channels']:\r\n mqtt.publish(\r\n topic=f\"meross/{device_id}/{channel}/available\",\r\n payload=\"offline\",\r\n retain=True\r\n )\r\n devices[device_id]['device']['online'] = 'offline'\r\n else:\r\n log.debug(f\"device {device_id} is online (responding to status request)\")",
"def test_no_calls_are_made_when_hubstate_is_offline(self):\n self.hs.setkey(self.hs.STATE_KEY_IS_ONLINE, False)\n call_command(\"update_weather_cache\")\n self.mock_get.assert_not_called()",
"def offline(self):\n return request.render('website_event_track.pwa_offline')",
"def _setOnline(self, online):\n if self.online != online:\n self.online = online\n twisted_logger.writeLog(self.logPrefix, self.logName, \"Changing online status to %s\" % online)\n # Call back (trigger) external deferreds\n if online:\n [self.__callbackDeferred(deferred, self.extOnlineDeferreds[deferred], online) for deferred in self.extOnlineDeferreds.keys()]\n else:\n [self.__callbackDeferred(deferred, self.extOfflineDeferreds[deferred], online) for deferred in self.extOfflineDeferreds.keys()]",
"def __mpdconnect(fn):\n\t\tdef wrapper(self,*args,**kwargs):\t\t\t\n\t\t\t# check if idle set\n\t\t\tresult = True\n\t\t\ttry:\n\t\t\t\tself.mpdc.noidle()\n\t\t\texcept MPDConnectionError as e:\n\t\t\t\tself.mpdc.connect(\"localhost\", 6600)\n\t\t\t\tresult = False\n\t\t\texcept:\n\t\t\t\tself.__printer('WEIRD... no idle was set..')\n\t\t\t\t\n\t\t\tret = fn(self,*args,**kwargs)\n\t\t\tself.mpdc.send_idle()\n\t\t\treturn ret\n\t\t\t\n\t\treturn wrapper",
"def addStateChangeCallback(self, *args) -> \"void\":\n return _coin.ScXMLStateMachine_addStateChangeCallback(self, *args)",
"def addStateChangeCallback(self, *args):\n return _coin.ScXMLStateMachine_addStateChangeCallback(self, *args)",
"def test_create_offline_backend(self, client):\n net = mfactory.NetworkFactory(state='ACTIVE')\n bn1 = mfactory.BackendNetworkFactory(network=net)\n mfactory.BackendNetworkFactory(network=net,\n backend__offline=True)\n msg = self.create_msg(operation='OP_NETWORK_CONNECT',\n network=net.backend_id,\n cluster=bn1.backend.clustername)\n update_network(client, msg)\n self.assertTrue(client.basic_ack.called)\n new_net = Network.objects.get(id=net.id)\n self.assertEqual(new_net.state, 'ACTIVE')",
"def offline_ivr(self, offline_ivr):\n\n self._offline_ivr = offline_ivr",
"def _get_onlineStatusChanged(self) -> \"adsk::core::Ptr< adsk::core::ApplicationEvent >\" :\n return _core.Application__get_onlineStatusChanged(self)",
"def checkNetworkStatus(self):\r\n pass",
"def _dispatchNetworkEventLifecycleCallback(self, net, event, detail, cbData):\n cb = cbData[\"cb\"]\n opaque = cbData[\"opaque\"]\n\n cb(self, virNetwork(self, _obj=net), event, detail, opaque)\n return 0",
"def rpc_online(self, sender, *args):\n \n if (len(args) != 1):\n raise rpc.RPCFault(604, 'online TRUE/FALSE')\n val = args[0]\n if (type(val) != bool):\n raise rpc.RPCFault(605, 'online TRUE/FALSE')\n if (self.factory.online and (not val)):\n self.factory.online = False\n return 'factory now offline for new bot requests'\n if ((not self.factory.online) and val):\n self.factory.online = True\n return 'factory now online for new bot requests'\n return 'no change to online status'",
"def addConnectionFailedCallback(*args, **kwargs):\n \n pass",
"def config_offline_site(self, site):\n logger.warn('config offline site keyword DEPRECATED')\n logger.info('Switching to {0} site'.format(site))\n self.manage_offline_site(site, 'config')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initialise the gripper and action server | def __init__(self, action_server_name, gripper_id="EZGripper", port_name="/dev/ttyUSB0", baudrate=57600,
servo_ids=[1]):
# Initialise the object allowing to control the EZGripper
self.gripper = EZGripper(gripper_id, port_name, baudrate, servo_ids)
# Initialise a JointStateGripperResult message
self.result_message = JointStateGripperResult()
# Initialise the action server
self.action_server = actionlib.SimpleActionServer(action_server_name, JointStateGripperAction, auto_start=False)
# Set the callback to be executed when a goal is received
self.action_server.register_goal_callback(self.goal_callback)
# Set the callback that should be executed when a preempt request is received
self.action_server.register_preempt_callback(self.preempt_callback)
# Start the server
self.action_server.start()
# Start another thread in which we publish continuously the joint state of the gripper
thread.start_new_thread(self.publish_joint_state, ())
rospy.loginfo("Joint state EZGripper controller ready to receive commands") | [
"def __init__(self, ):\n self.config = None\n self.goal = None\n\n self._config_server = Server(GraspPlannerConfig, self._config_cb)\n\n self._action_server = SimpleActionServer(\n '~plan_grasp', PlanGraspAction,\n execute_cb=self._execute_cb, auto_start=False)\n self._action_server.start()",
"def start(self):\n self.action_server.start()",
"def start(self):\n self.init_trajectory()\n self.server.start()\n print(\"The action server for this driver has been started\")",
"def __init__(self):\n self._actions = Actions()\n self._actors = Actors()\n self._clock = Clock()\n self._is_directing = True",
"def init(self):\n\n self.session.run(self.init_op)",
"def initialize( self, request, response ):\n self.isAjax = ((request.headers.environ.get('HTTP_X_REQUESTED_WITH')=='XMLHttpRequest') or (request.headers.get('X-Requested-With')=='XMLHttpRequest'))\n# logging.debug(\"Content Type is %s\", str(request.headers.get('Content-Type')))\n self.request = request\n self.response = response\n if self.request.headers.environ.get('CONTENT_TYPE') == 'application/json':\n data = simplejson.loads(self.request.body)\n #TODO handle the parameters\n self.params = HalRequestHandler.RequestParameters(request)\n elif self.request.headers.environ.get('CONTENT_TYPE') == 'application/xml':\n data = serializers.deserialize('xml', self.request.body)\n #TODO handle the parameters\n self.params = HalRequestHandler.RequestParameters(request)\n else:\n self.params = HalRequestHandler.RequestParameters(self.request)\n #self.request = super(MyRequestHandler, self).request\n if not self.isAjax: self.isAjax = self.g('isAjax')=='true'\n # set the status variable\n if self.session.has_key( 'status' ):\n self.status = self.session.pop('status')\n #set the default operations\n self.SetDefaultOperations()\n #make any customisations by overloading this method\n self.SetOperations()",
"def start(self, **kw):\n\t\tsuper(webapp_enhanced, self).__init__(self._controller_map, **kw)",
"def __init__(self, application):\r\n sys.path.append('.')\r\n\r\n # create a store of services\r\n self.services = service.IServiceCollection(application)\r\n self.amp_protocol = None # set by amp factory\r\n self.sessions = SESSIONS\r\n self.sessions.server = self\r\n\r\n # Database-specific startup optimizations.\r\n self.sqlite3_prep()\r\n\r\n # Run the initial setup if needed\r\n self.run_initial_setup()\r\n\r\n self.start_time = time.time()\r\n\r\n # initialize channelhandler\r\n channelhandler.CHANNELHANDLER.update()\r\n\r\n # set a callback if the server is killed abruptly,\r\n # by Ctrl-C, reboot etc.\r\n reactor.addSystemEventTrigger('before', 'shutdown',\r\n self.shutdown, _reactor_stopping=True)\r\n\r\n self.game_running = True\r\n\r\n self.run_init_hooks()",
"def collectd_init(self):\n self.server = KatcpServer(self.config['host'],\n int(self.config['port']))\n self.server.start()",
"def __init__(self):\n self.app_grid = Grid() # create a grid object\n self.interface()",
"def _set_up_action_client(self):\n self._action_running = False\n\n self._client = SimpleActionClient(\n self._get_trajectory_controller_name() + \"/follow_joint_trajectory\",\n FollowJointTrajectoryAction\n )\n\n if self._client.wait_for_server(timeout=rospy.Duration(4)) is False:\n rospy.logfatal(\"Failed to connect to action server in 4 sec\")\n raise Exception(\"Failed to connect to action server in 4 sec\")",
"def initialize(self, actions=None, *args, **kwargs):\n self.actions = actions or []",
"def __init__(self):\n self.mainloop = g_main_loop_new(g_main_context_default(), True)\n MainLoop.default = self",
"def __init__(self, server):\n print('[CB] Initializing CuBolt...')\n begin = time.time()\n \n ServerScript.__init__(self, server)\n \n server.particle_effects = []\n \n self.injector = Injector(server)\n self.injector.inject_update()\n self.injector.inject_factory()\n self.injector.inject_entity()\n \n if not has_world:\n print(('[CB] The world module could not be imported, ' + \n 'are you using an old cuwo version?'))\n if not block_types_available:\n print(('[CB] Can not inject terrain module modification' +\n ', this may break some scripts. To use the ' +\n 'terrain module modifications a cuwo build ' +\n 'newer than f8b2c4da58 is needed.'))\n self.injector.inject_world_modification()\n \n needed = time.time() - begin\n print('[CB] Done (%.2fs).' % needed)",
"def __init__(self):\n socketIO.emit(\"status\", \"Walabot initializing\")\n self.wlbt = Walabot()",
"def __init__(self):\n rospy.init_node('route_network')\n self.config = None\n\n # advertise visualization marker topic\n self.pub = rospy.Publisher('route_network', RouteNetwork,\n latch=True, queue_size=10)\n self.graph = None\n rospy.wait_for_service('get_geographic_map')\n self.get_map = rospy.ServiceProxy('get_geographic_map',\n GetGeographicMap)\n\n # register dynamic reconfigure callback, which runs immediately\n self.reconf_server = ReconfigureServer(Config, self.reconfigure)",
"def _init_client(self):\n pass",
"def __init__(self):\n super(TNL3ServicePlugin, self).__init__()\n self._tn_info = None\n # self._driver = None\n self.task_manager = tasks.TaskManager()\n self.task_manager.start()\n self.tn_init()",
"def initialize(self):\n self.addcmd('admin',self.admin_command)\n self.addcmd('deduct',self.deduct_command)\n self.addcmd('setcredit',self.setcredit_command)\n self.addcmd('user',self.user_command)\n self.addcmd('setpin',self.setpin_command)\n self.addcmd('setstatus',self.setstatus_command)\n self.addcmd('logs',self.logs_command)\n self.addcmd('forcereturn',self.forcereturn_command)\n self.addcmd('ban',self.ban_command)\n self.addcmd('unban',self.unban_command)\n # self.addcmd('py',self.py_command)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Callback executed when a goal is received. Execute the joint state contained in the input | def goal_callback(self):
# Get the input field from the goal, containing the target joint state (JointState msg)
joint_state = self.action_server.accept_new_goal().input
# Make sure the input msg is correct
target_value = joint_state.position[0]
if joint_state.name[0] != self.gripper.JOINT_NAME:
rospy.logerr("Command should be given for the joint {}, not {}".format(joint_state.name[0],
self.gripper.JOINT_NAME))
# Send a result message containing a failure
self.result_message.outcome = 1
self.result_message.returned_object = False
self.action_server.set_aborted(self.result_message)
return
if target_value < 0 or target_value > self.gripper.JOINT_LIMIT:
rospy.logerr("The valid joint range of the EZGripper is [0, 1.94]. Request is {}".format(target_value))
# Send a result message containing a failure
self.result_message.outcome = 1
self.result_message.returned_object = False
self.action_server.set_aborted(self.result_message)
return
# Move the gripper to the joint state contained in the goal received with maximum speed.
# Once the gripper has finished to move, maximum torque is applied to hold the object
self.gripper.go_to_joint_value(target_value, 100)
# Initialise and fill a JointStateGripperFeedback message
action_feedback = JointStateGripperFeedback()
action_feedback.current_joint_state.header.stamp = rospy.Time.now()
action_feedback.current_joint_state.name = [self.gripper.JOINT_NAME]
action_feedback.current_joint_state.position = [self.gripper.get_joint_value()]
# Publish the message
self.action_server.publish_feedback(action_feedback)
# Send a result message containing a success
self.result_message.outcome = 0
self.result_message.returned_object = True
self.action_server.set_succeeded(self.result_message) | [
"def go_to_joint_state(joint_goal):\n print(\"Calling 'Go to joint state' with the following goal: {}\".format(joint_goal))\n # The 'go' command can be called with joint values, poses, or without any\n # parameters if you have already set the pose or joint target for the group.\n ##NOTE: wait=True means the call is blocking.\n move_group.go(joint_goal, wait=True)\n\n # Calling ``stop()`` ensures that there is no residual movement\n move_group.stop()\n\n # Double check if arrived at destination\n current_joints = move_group.get_current_joint_values()\n if all_close(joint_goal, current_joints, target_tolerance):\n print(\"Target joint state reached\")\n else:\n print(\"Warning, target joint state NOT reached!\")\n print(current_joints)",
"def _SendNewNavGoal(self):\n if self.execute_path.simple_state != actionlib.SimpleGoalState.DONE:\n self.execute_path.cancel_goal()\n # We need to wait until the goal was really canceled\n if not self.execute_path.wait_for_result(rospy.Duration(2)):\n rospy.logwarn('Cancellation of goal took more than 2 seconds. Continuing anyway.')\n self.execute_path.send_goal(\n navigation_waypoints_server.msg.ExecutePathGoal(\n waypoints=[wp.pose for wp in self._waypoints.GetWaypoints()],\n continue_on_error=True),\n done_cb=self._NavigationDoneCallback,\n feedback_cb=self._NavigationFeedbackCallback)",
"def sample_jointaction_outcome(self, state: State, jointaction: JointAction):\n pass",
"def step(self):\n \tif not self.is_done():\n actions = [agent.program(self.percept(agent))for agent in self.agents]\n for (agent, action) in zip(self.agents, actions):\n \t\t self.execute_action(agent, action)\n self.exogenous_change()",
"def next_state(self, state: State, jointaction: JointAction) -> State:\n pass",
"def callback_env_joint_trajetory(self, data):\n try:\n # Add to the Queue the next command to execute\n self.queue.put(data)\n except:\n pass",
"def move_to_joints(self, joint_state):\n goal = control_msgs.msg.FollowJointTrajectoryGoal()\n goal.trajectory.joint_names.extend(ArmJoints.names())\n point = trajectory_msgs.msg.JointTrajectoryPoint()\n point.positions.extend(joint_state.values())\n point.time_from_start = rospy.Duration(TIME_FROM_START)\n goal.trajectory.points.append(point)\n self._joint_client.send_goal(goal)\n self._joint_client.wait_for_result(rospy.Duration(10))",
"def observe(self, pre_observation, action, reward, post_observation, done):",
"def go_to_target_pose(self, target_joint_values):\n\n rospy.loginfo(\"Start going to the target pose at {}\".format(target_joint_values))\n\n # a goal to be sent to action server\n goal = FollowJointTrajectoryGoal()\n goal.trajectory.header.stamp = rospy.Time.now() + rospy.Duration(1.0)\n\n # a joint point in the trajectory\n trajPt = JointTrajectoryPoint()\n\n for idx in range(self._joint_num): # for each joint \n joint_name = \"joint_a\"+str(idx+1)\n goal.trajectory.joint_names.append(joint_name)\n\n\n trajPt.positions = target_joint_values\n trajPt.time_from_start = rospy.Duration(secs=3.0)\n\n # add the joint trajectory point to the goal\n goal.trajectory.points.append(trajPt)\n\n # send the goal to the action server\n self._action_client.send_goal(goal)\n\n # wait for the result\n rospy.loginfo(\"controlling iiwa to go to the given pose\")\n self._action_client.wait_for_result()\n rospy.loginfo(\"given position reached\")\n\n # show the error code\n #rospy.loginfo(self._action_client.get_result())",
"def goal_status(self, status, result):\n self.completion += 1\n\n # Goal reached\n if status == 3:\n rospy.loginfo(\"Goal succeeded\")\n\n # Goal aborted\n if status == 4:\n rospy.loginfo(\"Goal aborted\")\n\n # Goal rejected\n if status == 5:\n rospy.loginfo(\"Goal rejected\")",
"def _update_goal(self, observation, state, step_type):\n new_goal_mask = torch.unsqueeze((step_type == StepType.FIRST), dim=-1)\n generated_goal = self._generate_goal(observation, state)\n new_goal = torch.where(new_goal_mask, generated_goal, state.goal)\n return new_goal",
"def rviz_goal_callback(self, msg):\n self.x_g = msg.pose.position.x\n self.y_g = msg.pose.position.y\n rotation = [msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w]\n euler = tf.transformations.euler_from_quaternion(rotation)\n self.theta_g = euler[2]\n self.mode = Mode.NAV",
"def go_to_target_pose(self, target_joint_values):\n\n rospy.loginfo(\"Start going to the target pose at {}\".format(target_joint_values))\n\n # a goal to be sent to action server\n goal = FollowJointTrajectoryGoal()\n goal.trajectory.header.stamp = rospy.Time.now() + rospy.Duration(1.0)\n\n # a joint point in the trajectory\n trajPt = JointTrajectoryPoint()\n goal.trajectory.joint_names.append('base_x')\n goal.trajectory.joint_names.append('x_y')\n goal.trajectory.joint_names.append('y_car')\n for idx in range(self._joint_num): # for each joint \n joint_name = \"joint_a\"+str(idx+1)\n goal.trajectory.joint_names.append(joint_name)\n #trajPt.velocities.append(0.0)\n trajPt.positions = target_joint_values\n trajPt.time_from_start = rospy.Duration(secs=3.0)\n\n # add the joint trajectory point to the goal\n goal.trajectory.points.append(trajPt)\n\n # send the goal to the action server\n self._action_client.send_goal(goal)\n\n # wait for the result\n rospy.loginfo(\"Start waiting for going to the target pose\")\n self._action_client.wait_for_result()\n rospy.loginfo(\"Waiting ends\")\n\n # show the error code\n rospy.loginfo(self._action_client.get_result())",
"def controller_action(self, obs:Dict, take_action:bool=True, DEBUG:bool=False):\n grip_pos = obs['observation'][:3]\n object_pos = obs['observation'][3:6]\n object_rel_pos = obs['observation'][6:9]\n goal_pos = obs['desired_goal']\n # lift the hand little from the table vertically\n if not self.hand_higher:\n action = [0,0,1,0]\n if grip_pos[2]-object_pos[2] > 0.05:\n if take_action:\n self.hand_higher = True\n if DEBUG:\n print('Hand lifted from the table')\n # once above, move it above the puck\n if self.hand_higher and not self.hand_behind:\n goal_grip_pos = object_pos + (0.025 + self.r)*(object_pos - goal_pos)/np.linalg.norm(object_pos - goal_pos)\n # goal_object_vec = object_pos - goal_pos # vector pointing towards object from goal\n # action_pos = list(self.kp * goal_object_vec)\n action_pos = list(self.kp*(goal_grip_pos - grip_pos))\n action = action_pos[:2] + [0,0]\n if np.linalg.norm(grip_pos[:2]-goal_grip_pos[:2]) < 0.001:\n if take_action:\n self.hand_behind = True\n if DEBUG:\n print('Hand has moved behind')\n # now move the hand down\n if self.hand_behind and not self.hand_down:\n action = [0,0,-1,0]\n if grip_pos[2]-object_pos[2] <0.01:\n self.start_time = self.fetch_env.env.sim.data.time # start the time once we are ready to hit\n self.prev_time = self.start_time\n self.d1 = np.linalg.norm(goal_pos[:-1] - object_pos[:-1])/5 # Define d1 wrt the initial gripper pose rather than the object pose\n self.d2 = (np.linalg.norm(goal_pos[:-1] - object_pos[:-1]) - self.d1)\n self.f = self.d2 * self.mu * self.g / self.d1\n\n if take_action:\n self.hand_down = True\n\n v1 = np.sqrt(2*self.d2*self.mu*self.g)\n a = v1**2/(2*self.d1)\n\n if DEBUG:\n print('d2 = ' + str(self.d2))\n print('mu = ' + str(self.mu))\n print('v1 = ' +str(v1))\n print('d1 = ' + str(self.d1))\n print('a = '+str(a))\n print('Ready to HIT')\n # slide the puck\n if self.hand_down:\n v1 = np.sqrt(2*self.d2*self.mu*self.g)\n a = v1**2/(2*self.d1)\n if np.linalg.norm(goal_pos[:-1] - grip_pos[:-1]) > self.d2:\n if DEBUG:\n print('this is the distance ' + str(np.linalg.norm(goal_pos[:-1] - grip_pos[:-1])))\n cur_time = self.fetch_env.env.sim.data.time\n # delta s = sdot * dt, where sdot = f*t and s is measured along direction from puck to goal\n action_pos = list((goal_pos - grip_pos)/np.linalg.norm(goal_pos - grip_pos) * self.f * (cur_time - self.start_time)*(cur_time - self.prev_time))\n self.prev_time = cur_time\n #print('current speed = ' + str(a*(cur_time-self.start_time)))\n else:\n #print('no push')\n action_pos = [0,0]\n action = action_pos[:2] + [0,0]\n if DEBUG:\n print('commanded action = ' + str(np.linalg.norm(action[0:2])))\n # added clipping here\n #return action\n return np.clip(action, -1, 1)",
"def get_goal_from_trajectory(self, trajectory):\n pass",
"def _joint_states_callback(self, joint_state):\n with self._joint_states_lock:\n self._joints_position = {n: p for n, p in\n zip(joint_state.name,\n joint_state.position)}\n self._joints_velocity = {n: v for n, v in\n zip(joint_state.name,\n joint_state.velocity)}\n self._joints_effort = {n: v for n, v in\n zip(joint_state.name, joint_state.effort)}",
"def _step(self, *args, **kwargs):\n trajectory = self._env.step(*args, **kwargs)\n return self.get_trajectory_with_goal(trajectory, self._goal)",
"def update_movejoint(self, i):\n # Take value from spinBox\n goal = [self.jointpos[i - 1].value(), ]\n # Request movement\n from_arm_server(i, goal)",
"def get_action_to_position(self, action, last_position):\n\n distance = self.get_distance_gripper_to_object()\n self._joint_increment_value = 0.18 * distance[0] + 0.01\n\n joint_states_position = last_position\n action_position = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n\n rospy.logdebug(\"get_action_to_position>>>\" + str(joint_states_position))\n if action == 0: # Increment joint3_position_controller (elbow joint)\n action_position[0] = joint_states_position[0] + self._joint_increment_value / 2\n action_position[1] = joint_states_position[1]\n action_position[2] = joint_states_position[2]\n action_position[3] = joint_states_position[3]\n action_position[4] = joint_states_position[4]\n action_position[5] = joint_states_position[5]\n elif action == 1: # Decrement joint3_position_controller (elbow joint)\n action_position[0] = joint_states_position[0] - self._joint_increment_value / 2\n action_position[1] = joint_states_position[1]\n action_position[2] = joint_states_position[2]\n action_position[3] = joint_states_position[3]\n action_position[4] = joint_states_position[4]\n action_position[5] = joint_states_position[5]\n\n elif action == 2: # Increment joint2_position_controller (shoulder_lift_joint)\n action_position[0] = joint_states_position[0]\n action_position[1] = joint_states_position[1] + self._joint_increment_value / 2\n action_position[2] = joint_states_position[2]\n action_position[3] = joint_states_position[3]\n action_position[4] = joint_states_position[4]\n action_position[5] = joint_states_position[5]\n elif action == 3: # Decrement joint2_position_controller (shoulder_lift_joint)\n action_position[0] = joint_states_position[0]\n action_position[1] = joint_states_position[1] - self._joint_increment_value / 2\n action_position[2] = joint_states_position[2]\n action_position[3] = joint_states_position[3]\n action_position[4] = joint_states_position[4]\n action_position[5] = joint_states_position[5]\n\n elif action == 4: # Increment joint1_position_controller (shoulder_pan_joint)\n action_position[0] = joint_states_position[0]\n action_position[1] = joint_states_position[1]\n action_position[2] = joint_states_position[2] + self._joint_increment_value / 2\n action_position[3] = joint_states_position[3]\n action_position[4] = joint_states_position[4]\n action_position[5] = joint_states_position[5]\n elif action == 5: # Decrement joint1_position_controller (shoulder_pan_joint)\n action_position[0] = joint_states_position[0]\n action_position[1] = joint_states_position[1]\n action_position[2] = joint_states_position[2] - self._joint_increment_value / 2\n action_position[3] = joint_states_position[3]\n action_position[4] = joint_states_position[4]\n action_position[5] = joint_states_position[5]\n\n elif action == 6: # Increment joint4_position_controller (wrist_1_joint)\n action_position[0] = joint_states_position[0]\n action_position[1] = joint_states_position[1]\n action_position[2] = joint_states_position[2]\n action_position[3] = joint_states_position[3] + self._joint_increment_value\n action_position[4] = joint_states_position[4]\n action_position[5] = joint_states_position[5]\n elif action == 7: # Decrement joint4_position_controller (wrist_1_joint)\n action_position[0] = joint_states_position[0]\n action_position[1] = joint_states_position[1]\n action_position[2] = joint_states_position[2]\n action_position[3] = joint_states_position[3] - self._joint_increment_value\n action_position[4] = joint_states_position[4]\n action_position[5] = joint_states_position[5]\n\n elif action == 8: # Increment joint5_position_controller (wrist_2_joint)\n action_position[0] = joint_states_position[0]\n action_position[1] = joint_states_position[1]\n action_position[2] = joint_states_position[2]\n action_position[3] = joint_states_position[3]\n action_position[4] = joint_states_position[4] + self._joint_increment_value\n action_position[5] = joint_states_position[5]\n elif action == 9: # Decrement joint5_position_controller (wrist_2_joint)\n action_position[0] = joint_states_position[0]\n action_position[1] = joint_states_position[1]\n action_position[2] = joint_states_position[2]\n action_position[3] = joint_states_position[3]\n action_position[4] = joint_states_position[4] - self._joint_increment_value\n action_position[5] = joint_states_position[5]\n\n elif action == 10: # Increment joint6_position_controller (wrist_3_joint)\n action_position[0] = joint_states_position[0]\n action_position[1] = joint_states_position[1]\n action_position[2] = joint_states_position[2]\n action_position[3] = joint_states_position[3]\n action_position[4] = joint_states_position[4]\n action_position[5] = joint_states_position[5] + self._joint_increment_value\n elif action == 11: # Decrement joint6_position_controller (wrist_3_joint)\n action_position[0] = joint_states_position[0]\n action_position[1] = joint_states_position[1]\n action_position[2] = joint_states_position[2]\n action_position[3] = joint_states_position[3]\n action_position[4] = joint_states_position[4]\n action_position[5] = joint_states_position[5] - self._joint_increment_value\n elif action == 12: # turn on/off vacuum gripper\n if self.gripper_state.enabled:\n self.turn_off_gripper()\n else:\n self.turn_on_gripper()\n\n return action_position"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Callback executed when a preempt request has been received. | def preempt_callback(self):
rospy.loginfo("Action preempted")
self.action_server.set_preempted() | [
"def preempt(self, preempt: bool):\n\n self._preempt = preempt",
"def before_request_callback():\n schedule.run_pending()",
"def preempt_delay(self, preempt_delay: str):\n\n self._preempt_delay = preempt_delay",
"def process_request(self, request):\r\n if (PINNING_COOKIE in request.COOKIES or\r\n request.method not in READ_ONLY_METHODS):\r\n pin_this_thread()\r\n else:\r\n # In case the last request this thread served was pinned:\r\n unpin_this_thread()",
"def preempt_delay(self) -> str:\n return self._preempt_delay",
"def _throttle(self):\n\n if (time.time() - self.last_access_time) < \\\n DEFAULT_WEB_REQUEST_SLEEP_TIME:\n time.sleep(self.sleep_time)\n self.last_access_time = time.time()",
"def _handle_preapproval(self, features):\n log.debug(\"Server supports subscription pre-approvals.\")\n self.xmpp.features.add('preapproval')",
"async def pre_response(self, request, response, context=None):\n pass",
"def _request(self, *args: Any) -> None:\n if self._request_timer:\n self._reset_request_timer()\n else:\n self.logger.debug(f\"Firing request event.\")\n self._current_profile = None\n self._reset_turn_off_timer()\n self.fire_event(EVENT_TYPE_AUTOMATIC_LIGHTING, entity_id=self.entity_id, type=EVENT_DATA_TYPE_REQUEST)\n\n def _on_request_finished(*args: Any) -> None:\n \"\"\" Triggered when the request event has finished. \"\"\"\n self._reset_request_timer()\n\n if self.is_blocked:\n return\n\n if self._current_profile:\n self.logger.debug(f\"Turning on profile {self._current_profile.id} with the following values: { {CONF_ENTITY_ID: self._current_profile.lights, **self._current_profile.attributes} }\")\n self._current_status = self._current_profile.status\n self._turn_off_unused_entities(self._tracked_lights, self._current_profile.lights)\n self.call_service(LIGHT_DOMAIN, SERVICE_TURN_ON, entity_id=self._current_profile.lights, **self._current_profile.attributes)\n else:\n self.logger.debug(f\"No profile was provided.\")\n self._current_status = STATUS_IDLE\n self._turn_off_unused_entities(self._tracked_lights, [])\n\n self.async_schedule_update_ha_state(True)\n\n self._request_timer = async_call_later(self.hass, REQUEST_DEBOUNCE_TIME, _on_request_finished)",
"def parallel_call(self, request, claims):\n\n self.sleep(request['number'])\n request['number'] = request['number']**POWER\n return request",
"async def on_resumed(self):",
"def process_remote_request(self, request):\n worktime = random.uniform(.1,3)\n gevent.sleep(worktime)\n result = {\n 'command_id' : request.command_id,\n 'result' : 'fake_result'\n }\n log.debug('Finished processing request: %s', str(request))\n self._remote_client.enqueue(result)",
"def test_preemptive_priorities_at_class_change(self):\n # First without preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.NoArrivals()],\n 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(2.5)], \n 'Class 1': [ciw.dists.Deterministic(2.5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [False]),\n class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]]\n )\n Q = ciw.Simulation(N, exact=26)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(float(recs[0].arrival_date), 2)\n self.assertEqual(float(recs[1].arrival_date), 4)\n self.assertEqual(float(recs[2].arrival_date), 6)\n self.assertEqual(float(recs[3].arrival_date), 8)\n self.assertEqual(float(recs[4].arrival_date), 10)\n self.assertEqual(float(recs[0].waiting_time), 0)\n self.assertEqual(float(recs[1].waiting_time), 0.5)\n self.assertEqual(float(recs[2].waiting_time), 1)\n self.assertEqual(float(recs[3].waiting_time), 1.5)\n self.assertEqual(float(recs[4].waiting_time), 2)\n self.assertEqual(float(recs[0].service_start_date), 2)\n self.assertEqual(float(recs[1].service_start_date), 4.5)\n self.assertEqual(float(recs[2].service_start_date), 7)\n self.assertEqual(float(recs[3].service_start_date), 9.5)\n self.assertEqual(float(recs[4].service_start_date), 12)\n self.assertEqual(float(recs[0].service_end_date), 4.5)\n self.assertEqual(float(recs[1].service_end_date), 7)\n self.assertEqual(float(recs[2].service_end_date), 9.5)\n self.assertEqual(float(recs[3].service_end_date), 12)\n self.assertEqual(float(recs[4].service_end_date), 14.5)\n\n # Now with preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.NoArrivals()],\n 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(2.5)], \n 'Class 1': [ciw.dists.Deterministic(2.5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"resample\"]),\n class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]]\n )\n Q = ciw.Simulation(N, exact=26)\n Q.simulate_until_max_time(20)\n all_recs = Q.get_all_records()\n recs = [r for r in all_recs if r.record_type == 'service']\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(float(recs[0].arrival_date), 2)\n self.assertEqual(float(recs[1].arrival_date), 4)\n self.assertEqual(float(recs[2].arrival_date), 6)\n self.assertEqual(float(recs[3].arrival_date), 8)\n self.assertEqual(float(recs[4].arrival_date), 10)\n self.assertEqual(float(recs[0].waiting_time), 0)\n self.assertEqual(float(recs[1].waiting_time), 0.5)\n self.assertEqual(float(recs[2].waiting_time), 5.7)\n self.assertEqual(float(recs[3].waiting_time), 1.2)\n self.assertEqual(float(recs[4].waiting_time), 4.2)\n self.assertEqual(float(recs[0].service_start_date), 2)\n self.assertEqual(float(recs[1].service_start_date), 4.5)\n self.assertEqual(float(recs[2].service_start_date), 11.7)\n self.assertEqual(float(recs[3].service_start_date), 9.2)\n self.assertEqual(float(recs[4].service_start_date), 14.2)\n self.assertEqual(float(recs[0].service_end_date), 4.5)\n self.assertEqual(float(recs[1].service_end_date), 7)\n self.assertEqual(float(recs[2].service_end_date), 14.2)\n self.assertEqual(float(recs[3].service_end_date), 11.7)\n self.assertEqual(float(recs[4].service_end_date), 16.7)\n\n # Test interrupted service data records\n interrupted_recs = [r for r in all_recs if r.record_type == 'interrupted service']\n self.assertEqual(len(interrupted_recs), 1)\n self.assertEqual(float(interrupted_recs[0].arrival_date), 6)\n self.assertEqual(float(interrupted_recs[0].service_start_date), 7)\n self.assertEqual(float(interrupted_recs[0].waiting_time), 1)\n self.assertEqual(float(interrupted_recs[0].exit_date), 9.2)\n self.assertEqual(float(interrupted_recs[0].service_time), 2.5)\n self.assertTrue(isnan(interrupted_recs[0].service_end_date))",
"def process_request(self,req):\r\n pass",
"def pre_refresh_callback(self, authorizer: prawcore.auth.BaseAuthorizer):",
"def invokePreCallbacks(self, node: 'SoNode') -> \"void\":\n return _coin.SoCallbackAction_invokePreCallbacks(self, node)",
"def initiate_ping_event():\n pass",
"def PendingSweeps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def __preempt_lease(self, lease, preemption_time): \n \n self.logger.info(\"Preempting lease #%i...\" % (lease.id))\n self.logger.vdebug(\"Lease before preemption:\")\n lease.print_contents()\n vmrr = lease.get_last_vmrr()\n \n if vmrr.state == ResourceReservation.STATE_SCHEDULED and vmrr.start >= preemption_time:\n self.logger.debug(\"Lease was set to start in the middle of the preempting lease.\")\n must_cancel_and_requeue = True\n else:\n susptype = get_config().get(\"suspension\")\n if susptype == constants.SUSPENSION_NONE:\n must_cancel_and_requeue = True\n else:\n can_suspend = self.vm_scheduler.can_suspend_at(lease, preemption_time)\n if not can_suspend:\n self.logger.debug(\"Suspending the lease does not meet scheduling threshold.\")\n must_cancel_and_requeue = True\n else:\n if lease.numnodes > 1 and susptype == constants.SUSPENSION_SERIAL:\n self.logger.debug(\"Can't suspend lease because only suspension of single-node leases is allowed.\")\n must_cancel_and_requeue = True\n else:\n self.logger.debug(\"Lease can be suspended\")\n must_cancel_and_requeue = False\n \n if must_cancel_and_requeue:\n self.logger.info(\"... lease #%i has been cancelled and requeued.\" % lease.id)\n self.preparation_scheduler.cancel_preparation(lease)\n self.vm_scheduler.cancel_vm(vmrr)\n lease.remove_vmrr(vmrr)\n # TODO: Take into account other states\n if lease.get_state() == Lease.STATE_SUSPENDED_SCHEDULED:\n lease.set_state(Lease.STATE_SUSPENDED_QUEUED)\n else:\n lease.set_state(Lease.STATE_QUEUED)\n self.__enqueue_in_order(lease)\n else:\n self.logger.info(\"... lease #%i will be suspended at %s.\" % (lease.id, preemption_time))\n self.vm_scheduler.preempt_vm(vmrr, preemption_time) \n \n get_persistence().persist_lease(lease)\n\n self.logger.vdebug(\"Lease after preemption:\")\n lease.print_contents()",
"def handle_request(self,req):\r\n self.process_request(req)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Publish continuously the gripper's joint state in order to be able to correctly plan and avoid collision | def publish_joint_state(self):
# Define the publisher (the topic name is standard)
pub = rospy.Publisher('/joint_states', JointState, queue_size=10)
# In order to avoid overloading the gripper with too many reading/writing commands, limit the reading at 200Hz
rate = rospy.Rate(200)
# Initialise a JointState message with the proper joint name
joint_state_message = JointState()
joint_state_message.header = Header()
joint_state_message.name = [self.gripper.JOINT_NAME]
joint_state_message.velocity = []
joint_state_message.effort = []
# While the node is running publish an updated message
while not rospy.is_shutdown():
joint_state_message.header.stamp = rospy.Time.now()
joint_state_message.position = [self.gripper.get_joint_value()]
pub.publish(joint_state_message)
rate.sleep() | [
"def _publish_joint_state(self):\n # only publish if we have a subscriber\n if self._joint_state_pub.get_num_connections() == 0:\n return\n\n js = JointState()\n js.header.stamp = rospy.Time.now()\n js.header.frame_id = 'vector'\n js.name = ['head', 'lift']\n js.position = [self._vector.head_angle_rad,\n self._vector.lift_height_mm * 0.001]\n js.velocity = [0.0, 0.0]\n js.effort = [0.0, 0.0]\n self._joint_state_pub.publish(js)",
"def joint_trajectory_publisher(self):\n while not rospy.is_shutdown():\n if self.queue.full():\n # If a command from the environment is waiting to be executed,\n # publish the command, otherwise preempt trajectory\n self.jt_pub.publish(self.queue.get())\n self.stop_flag = False\n else:\n # If the empty JointTrajectory message has not been published,\n # publish it and set the stop flag to True, else pass\n if not self.stop_flag:\n self.jt_pub.publish(JointTrajectory())\n self.stop_flag = True\n else: # TODO - is this condition ever needed?\n pass",
"def publish():\n car_pose = mux(g['curr_car_state'])\n if car_pose is not None:\n car_pose.header.stamp = rospy.Time.now()\n pub.publish(car_pose)",
"def publishCommand(self):\n self.joint_angles_msg.joint_angles = [self.angle_setpoints[key] for key in self.joint_angles_msg.joint_names]\n self.joint_angles_pub.publish(self.joint_angles_msg)\n\n # Update previous setpoint\n for key in self.joint_names:\n self.angle_setpoints_previous[key] = self.angle_setpoints[key]",
"def publish_feedback(self):\n # If there is no trajectory, there is nothing to do\n if self.received_trajectory is None:\n return\n\n # Get the current states of the joints\n success, current_joints_states = \\\n self.robot_interface.get_current_joints()\n\n if not success: # Couldn't get the current joints' state\n rospy.logwarn(\"Could not publish on feedback_states:\"\n \"current states are unknown. Assuming all to 0.\")\n current_joints_states = [0 for _ in range(6)]\n\n # Get the desired position. What should the robot joints be right now?\n time_from_start = rospy.Time.now() - self.start_time\n # Find which point represents the current desired position\n for point in self.received_trajectory.points:\n if time_from_start > point.time_from_start:\n continue\n break\n desired_point = point\n\n # Make sure the length of the current states and the target states\n # is exactly the length of the joints\n assert len(JOINTS_NAMES) == len(current_joints_states) and \\\n len(JOINTS_NAMES) == len(desired_point.positions), \\\n \"Target and current states have different length. \" \\\n \"Expected {} joints, got {} (target) and {} (current)\".format(\n len(JOINTS_NAMES), len(desired_point.positions),\n len(current_joints_states)\n )\n\n # Create the message to be published\n msg = FollowJointTrajectoryFeedback()\n msg.header.frame_id = \"\"\n msg.header.stamp = rospy.get_rostime()\n msg.joint_names = JOINTS_NAMES\n\n # Set the goal states4\n msg.desired.positions = desired_point.positions\n msg.desired.velocities = []\n msg.desired.accelerations = []\n msg.desired.effort = []\n msg.desired.time_from_start = desired_point.time_from_start\n\n # Set the actual states\n msg.actual.positions = current_joints_states\n msg.actual.velocities = []\n msg.actual.accelerations = []\n msg.actual.effort = []\n msg.actual.time_from_start = desired_point.time_from_start\n\n # Calculate the error\n position_error = [goal - current for goal, current in zip(\n msg.desired.positions, msg.actual.positions\n )]\n velocity_error = [goal - current for goal, current in zip(\n msg.desired.velocities, msg.actual.velocities\n )]\n acceleration_error = [goal - current for goal, current in zip(\n msg.desired.accelerations,\n msg.actual.accelerations\n )]\n effort_error = [goal - current for goal, current in zip(\n msg.desired.effort, msg.actual.effort\n )]\n\n # Set the errors\n msg.error.positions = position_error\n msg.error.velocities = velocity_error\n msg.error.accelerations = acceleration_error\n msg.error.effort = effort_error\n msg.error.time_from_start = desired_point.time_from_start\n\n # Publish the message on /feedback_states topic\n self.pub_feedback_states.publish(msg)",
"def init_pose(self):\n\t\t# Return all joints to 0\n\t\t# legs\n\t\trate = rospy.Rate(2)\n\t\ti = 0\n\t\twhile not rospy.is_shutdown():\n\t\t\tself.left_thigh.publish(self.init)\n\t\t\tself.left_knee.publish(self.init)\n\t\t\tself.left_ankle.publish(self.init)\n\t\t\t\n\t\t\tself.right_thigh.publish(self.init)\n\t\t\tself.right_knee.publish(self.init)\n\t\t\tself.right_ankle.publish(self.init)\n\t\t\t\n\t\t\tself.torso.publish(self.init)\n\t\t\tself.head.publish(self.init)\n\t\t\t\n\t\t\tself.left_shoulder_roll.publish(self.init)\n\t\t\tself.left_shoulder_pitch.publish(self.init)\n\t\t\tself.left_shoulder_yaw.publish(self.init)\n\t\t\tself.left_elbow_flexion.publish(self.init)\n\t\t\tself.left_elbow_pronation.publish(self.init)\n\t\t\tself.left_wrist_flexion.publish(self.init)\n\t\t\tself.left_wrist_deviation.publish(self.init)\n\t\t\t\n\t\t\tself.right_shoulder_roll.publish(self.init)\n\t\t\tself.right_shoulder_pitch.publish(self.init)\n\t\t\tself.right_shoulder_yaw.publish(self.init)\n\t\t\tself.right_elbow_flexion.publish(self.init)\n\t\t\tself.right_elbow_pronation.publish(self.init)\n\t\t\tself.right_wrist_flexion.publish(self.init)\n\t\t\tself.right_wrist_deviation.publish(self.init)\n\t\t\trate.sleep()",
"def get_joint_state(self):\n\n joint_state = np.zeros((p.num_rovers, p.num_inputs))\n\n for rover_id in range(self.num_agents):\n self_x = self.rover_pos[rover_id, 0]; self_y = self.rover_pos[rover_id, 1]\n self_orient = self.rover_pos[rover_id, 2]\n\n rover_state = [0.0 for _ in range(int(360 / p.angle_resolution))]\n poi_state = [0.0 for _ in range(int(360 / p.angle_resolution))]\n temp_poi_dist_list = [[] for _ in range(int(360 / p.angle_resolution))]\n temp_rover_dist_list = [[] for _ in range(int(360 / p.angle_resolution))]\n\n # Log POI distances into brackets\n for poi_id in range(p.num_pois):\n poi_x = self.poi_pos[poi_id, 0]\n poi_y = self.poi_pos[poi_id, 1]\n poi_value = self.poi_values[poi_id]\n\n angle, dist = self.get_angle_dist(self_x, self_y, poi_x, poi_y)\n\n if dist >= self.obs_radius:\n continue # Observability radius\n\n angle -= self_orient\n if angle < 0:\n angle += 360\n\n bracket = int(angle / p.angle_resolution)\n if bracket >= len(temp_poi_dist_list):\n print(\"ERROR: BRACKET EXCEED LIST\", bracket, len(temp_poi_dist_list))\n bracket = len(temp_poi_dist_list) - 1\n if dist < p.min_distance: # Clip distance to not overwhelm tanh in NN\n dist = p.min_distance\n\n temp_poi_dist_list[bracket].append(poi_value/dist)\n\n # Log rover distances into brackets\n for other_rover_id in range(p.num_rovers):\n if other_rover_id == rover_id: # Ignore self\n continue\n rov_x = self.rover_pos[other_rover_id, 0]\n rov_y = self.rover_pos[other_rover_id, 1]\n angle, dist = self.get_angle_dist(self_x, self_y, rov_x, rov_y)\n\n if dist >= self.obs_radius:\n continue # Observability radius\n\n angle -= self_orient\n if angle < 0:\n angle += 360\n\n if dist < p.min_distance: # Clip distance to not overwhelm sigmoid in NN\n dist = p.min_distance\n\n bracket = int(angle / p.angle_resolution)\n if bracket >= len(temp_rover_dist_list):\n print(\"ERROR: BRACKET EXCEED LIST\", bracket, len(temp_rover_dist_list))\n bracket = len(temp_rover_dist_list) - 1\n temp_rover_dist_list[bracket].append(1/dist)\n\n # Encode the information into the state vector\n for bracket in range(int(360 / p.angle_resolution)):\n # POIs\n num_poi = len(temp_poi_dist_list[bracket]) # Number of POIs in bracket\n if num_poi > 0:\n if p.sensor_model == 'density':\n poi_state[bracket] = sum(temp_poi_dist_list[bracket]) / num_poi # Density Sensor\n elif p.sensor_model == 'summed':\n poi_state[bracket] = sum(temp_poi_dist_list[bracket]) # Summed Distance Sensor\n elif p.sensor_model == 'closest':\n poi_state[bracket] = max(temp_poi_dist_list[bracket]) # Closest Sensor\n else:\n sys.exit('Incorrect sensor model')\n else:\n poi_state[bracket] = -1.0\n joint_state[rover_id, bracket] = poi_state[bracket]\n\n # Rovers\n num_agents = len(temp_rover_dist_list[bracket]) # Number of rovers in bracket\n if num_agents > 0:\n if p.sensor_model == 'density':\n rover_state[bracket] = sum(temp_rover_dist_list[bracket]) / num_agents # Density Sensor\n elif p.sensor_model == 'summed':\n rover_state[bracket] = sum(temp_rover_dist_list[bracket]) # Summed Distance Sensor\n elif p.sensor_model == 'closest':\n rover_state[bracket] = max(temp_rover_dist_list[bracket]) # Closest Sensor\n else:\n sys.exit('Incorrect sensor model')\n else:\n rover_state[bracket] = -1.0\n joint_state[rover_id, (bracket + 4)] = rover_state[bracket]\n\n return joint_state",
"def push(self):\n home_pose = tfx.pose([0.54, 0.2, 0.71], tfx.tb_angles(-90,0,0), frame='base_link')\n home_joints = [0.6857, 0.31154, 2.21, -1.062444, -0.33257,-1.212881, -0.81091]\n delta_pos = [0, -0.10, 0]\n speed = 0.02\n file = '../data/push_{0}_on_{1}.bag'.format(self.object_material, self.floor_material)\n \n self.execute_experiment(file, home_joints, home_pose, delta_pos, speed=speed)",
"def joint_states_cb(self, data):\n if self.start_recording:\n #self.last_joint_states_data = data\n #self.current_rosbag.write(DEFAULT_JOINT_STATES, data)\n self.joint_states_accumulator.append(data)\n #self.time_accumulator.append(rospy.Time.now())",
"def publish_once_from_queue(self):\n self.messages_lock.acquire()\n if len(self.preds) >= 3:\n world_frame = '/ar_marker_13'\n turtlebot_frame = '/ar_marker_14'\n try:\n trans, rot = self.listener.lookupTransform(world_frame,\n turtlebot_frame,\n rospy.Time(0))\n # convert quaternion into 3x3 matrix\n rot = tf.transformations.quaternion_matrix(rot)[:3, :3]\n except (tf.LookupException,\n tf.ConnectivityException, \n tf.ExtrapolationException) as e:\n print(e)\n return\n\n self.preds = np.array(self.preds)\n self.states = np.array(self.states)\n self.intersects = np.array(self.intersects)\n self.avgs = np.array(self.avgs)\n ax = plt.gca()\n plt.scatter(self.preds[:, 0], self.preds[:, 1], label='predicted_point')\n plt.scatter(self.states[:, 0], self.states[:, 1], label='state_estimate')\n plt.scatter(self.intersects[:, 0], self.intersects[:, 1], label='intersect')\n plt.scatter(self.avgs[:, 0], self.avgs[:, 1], label='average_state_est')\n plt.scatter([trans[0]], [trans[1]], label='turtlebot')\n ax.legend()\n plt.show()\n\n self.preds = []\n self.states = []\n self.intersects = []\n self.avgs = []\n self.messages_lock.release()",
"def _init_publisher(self):\n pub_queue_size = 10\n if self.real_robot:\n self.jt_pub = rospy.Publisher(\n '/pos_traj_controller/command', JointTrajectory, queue_size=pub_queue_size)\n else:\n self.jt_pub = rospy.Publisher(\n '/eff_joint_traj_controller/command', JointTrajectory, queue_size=pub_queue_size)",
"def execute(plan, freq=140): #freq in hz\n # print(plan.joint_trajectory.points)\n override = [0, 0, 0, -0.5, 0, 0.5, 0.75]\n target_pos = plan.joint_trajectory.points\n rate = rospy.Rate(freq)\n for point in target_pos:\n joint_pos = point.positions\n for i in range(7):\n #if i < 6:\n #publishers[i].publish(override[i])\n #else:\n publishers[i].publish(joint_pos[i])\n rate.sleep()",
"def publish(self):\n\t\t# asks the ReST each second for a new game update\n\t\twhile True and self.ACTIVE:\n\t\t\tresp = self.RestReceiver.get_current_message()\n\t\t\tif resp is not None:\n\t\t\t\tmsg = resp[0]\n\t\t\t\tcurr_topic = resp[1]\n\t\t\t\t# when a game winner is declared, stop the process\n\t\t\t\tif str(msg).__contains__(\"winner\"):\n\t\t\t\t\tself.GAME_STOP = True\n\t\t\t\t\tself.ui.show_frame(self.ui.frames[GameStartPage])\n\t\t\t\t\tself.main.reset()\n\t\t\t\tif str(msg).__contains__(\"game start\"):\n\t\t\t\t\tself.informationDisplay.update_information(dict(msg)['type'])\n\t\t\t\t\tself.client.publish(self.GEN_TOPIC, str(msg))\n\t\t\t\t# evaluate if message should be send or just be displayed\n\t\t\t\tif evaluate_relevance(msg) and not self.GAME_STOP:\n\t\t\t\t\tself.informationDisplay.update_information(display_message(dict(msg)))\n\t\t\t\t\tresult = self.client.publish(curr_topic, str(msg))\n\t\t\t\t\tresult: [0, 1]\n\t\t\t\t\tstatus = result[0]\n\t\t\t\t\tif status == 0:\n\t\t\t\t\t\tprint(f\"[{self.game}]: Send `{msg}` to `{curr_topic}`\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(f\"[{self.game}]: Failed to send message to {curr_topic}\")\n\n\t\t\ttime.sleep(1)",
"def publish_navigation(self, event=None):\n # current state\n pos = np.copy(self.navsim.pos)\n vel = np.copy(self.navsim.vel)\n depth = self.navsim.depth_bottom\n\n # apply offsets (simple strategy)\n pos[0:3] += self.offset_pos[0:3]\n\n # send ROS messages\n self.send_nav_sts(pos, vel, depth)\n\n # send TF messages\n self.send_tf_odom(pos, vel)\n #self.send_tf_ned(pos, vel)\n\n # publish water currents\n self.send_currents()",
"def goal_callback(self):\n # Get the input field from the goal, containing the target joint state (JointState msg)\n joint_state = self.action_server.accept_new_goal().input\n\n # Make sure the input msg is correct\n target_value = joint_state.position[0]\n if joint_state.name[0] != self.gripper.JOINT_NAME:\n rospy.logerr(\"Command should be given for the joint {}, not {}\".format(joint_state.name[0],\n self.gripper.JOINT_NAME))\n # Send a result message containing a failure\n self.result_message.outcome = 1\n self.result_message.returned_object = False\n self.action_server.set_aborted(self.result_message)\n return\n if target_value < 0 or target_value > self.gripper.JOINT_LIMIT:\n rospy.logerr(\"The valid joint range of the EZGripper is [0, 1.94]. Request is {}\".format(target_value))\n # Send a result message containing a failure\n self.result_message.outcome = 1\n self.result_message.returned_object = False\n self.action_server.set_aborted(self.result_message)\n return\n\n # Move the gripper to the joint state contained in the goal received with maximum speed.\n # Once the gripper has finished to move, maximum torque is applied to hold the object\n self.gripper.go_to_joint_value(target_value, 100)\n\n # Initialise and fill a JointStateGripperFeedback message\n action_feedback = JointStateGripperFeedback()\n action_feedback.current_joint_state.header.stamp = rospy.Time.now()\n action_feedback.current_joint_state.name = [self.gripper.JOINT_NAME]\n action_feedback.current_joint_state.position = [self.gripper.get_joint_value()]\n # Publish the message\n self.action_server.publish_feedback(action_feedback)\n\n # Send a result message containing a success\n self.result_message.outcome = 0\n self.result_message.returned_object = True\n self.action_server.set_succeeded(self.result_message)",
"def publish_relay_state():\n global relay\n if relay.value():\n client.publish(topic_name(b\"state\"), b\"on\")\n else:\n client.publish(topic_name(b\"state\"), b\"off\")\n print(\"Relay state: {}\".format(\"on\" if relay.value() else \"off\"))",
"def send_setpoints(self):\n pose = PoseStamped()\n pose.header.stamp = rospy.Time.now()\n pose.pose.position.x = 0\n pose.pose.position.y = 0\n pose.pose.position.z = 0\n\n rate = rospy.Rate(20.0)\n for i in range(100):\n self.local_pos_pub.publish(pose)\n rate.sleep()\n #rospy.loginfo(pose)",
"def move_to_joints(self, joint_state):\n goal = control_msgs.msg.FollowJointTrajectoryGoal()\n goal.trajectory.joint_names.extend(ArmJoints.names())\n point = trajectory_msgs.msg.JointTrajectoryPoint()\n point.positions.extend(joint_state.values())\n point.time_from_start = rospy.Duration(TIME_FROM_START)\n goal.trajectory.points.append(point)\n self._joint_client.send_goal(goal)\n self._joint_client.wait_for_result(rospy.Duration(10))",
"def publish_state(self) -> None:\n if self._setup:\n state = self.get()\n if state is not None:\n self._log.debug(\n \"Read state %s logic %s from '%s' on %s\",\n TEXT_STATE[state],\n Logic(int(state ^ self._invert)).name,\n self.name,\n self.pin_name,\n )\n common.publish_queue.put_nowait(\n PublishMessage(path=self.path, content=TEXT_STATE[state])\n )",
"def __init__(self, action_server_name, gripper_id=\"EZGripper\", port_name=\"/dev/ttyUSB0\", baudrate=57600,\n servo_ids=[1]):\n # Initialise the object allowing to control the EZGripper\n self.gripper = EZGripper(gripper_id, port_name, baudrate, servo_ids)\n # Initialise a JointStateGripperResult message\n self.result_message = JointStateGripperResult()\n # Initialise the action server\n self.action_server = actionlib.SimpleActionServer(action_server_name, JointStateGripperAction, auto_start=False)\n # Set the callback to be executed when a goal is received\n self.action_server.register_goal_callback(self.goal_callback)\n # Set the callback that should be executed when a preempt request is received\n self.action_server.register_preempt_callback(self.preempt_callback)\n # Start the server\n self.action_server.start()\n # Start another thread in which we publish continuously the joint state of the gripper\n thread.start_new_thread(self.publish_joint_state, ())\n rospy.loginfo(\"Joint state EZGripper controller ready to receive commands\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.